code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
from tools.factories import generator_factory
import ctypes
basic_cases = [
[b'%U\n', ctypes.c_long(0)],
[b'% U\n', ctypes.c_long(0)],
[b'%+U\n', ctypes.c_long(0)],
[b'%-U\n', ctypes.c_long(0)],
[b'%0U\n', ctypes.c_long(0)],
[b'%#U\n', ctypes.c_long(0)],
[b'%10U\n', ctypes.c_long(0)],
[b'%.6U\n', ctypes.c_long(0)],
[b'%hhU\n', ctypes.c_long(0)],
[b'%llU\n', ctypes.c_long(0)],
[b'%hU\n', ctypes.c_long(0)],
[b'%lU\n', ctypes.c_long(0)],
[b'%jU\n', ctypes.c_long(0)],
[b'%zU\n', ctypes.c_long(0)],
]
mixed_cases = [
[b'%-02U\n', ctypes.c_short(0)],
[b'% 0+-#10.5llU\n', ctypes.c_long(42)],
]
test_sets = [
{
'name': 'U tests - basics.',
'cases': basic_cases
},
{
'name': 'U tests - basics.',
'cases': mixed_cases
}
]
cases_generator = generator_factory(test_sets)
| vmonteco/YAPT | test_files/uu_cases_regular.py | Python | gpl-3.0 | 907 |
import numpy as np
### 1: IH
### 2: EH
### 3: AE
### 4: AO
### 6: AH
### 7: UH
###
### 11: iyC beat
### 12: iyF be
### 21: eyC bait
### 22: eyF bay
### 41: ayV buy
### 47: ayO bite
### 61: oy boy
### 42: aw bough
### 62: owC boat
### 63: owF bow
### 72: uwC boot
### 73: uwF too
### 82: iw suit
### 43: ah father
### 53: oh bought
MEANS = {'1': [525.2877, 1941.229, 4.429892, 4.79822],
'2': [663.079, 1847.689, 4.542639, 4.867548],
'3': [728.7752, 1893.15, 4.615404, 4.989836],
'5': [810.9155, 1336.642, 4.839556, 4.933221],
'6': [714.8795, 1448.528, 4.671836, 4.831577],
'7': [543.7372, 1288.546, 4.424041, 4.69879],
'11': [411.3944, 2275.093, 4.39259, 4.796765],
'12': [440.3333, 2197.091, 4.476098, 4.667141],
'14': [441.1307, 2233.011, 4.403062, 4.860253],
'21': [599.2163, 1978.542, 4.40423, 4.894037],
'22': [592.5507, 1959.899, 4.087865, 4.802181],
'24': [569.8613, 1991.994, 4.516866, 4.941955],
'33': [620.1378, 2059.224, 4.347911, 5.027536],
'39': [766.087, 1829.261, 4.693657, 5.013284],
'41': [808.0645, 1449.711, 4.8443, 4.95776],
'42': [782.8331, 1672.451, 4.788051, 5.007045],
'43': [780.8631, 1326.295, 4.705, 4.908504],
'44': [718.2819, 1273.59, 4.702502, 4.840136],
'47': [777.8737, 1478.317, 4.718795, 4.919554],
'53': [740.2186, 1167.617, 4.744859, 4.794929],
'54': [557.1122, 975.9273, 4.660808, 4.762645],
'61': [539.7747, 982.9505, 4.592872, 4.76811],
'62': [628.9169, 1342.568, 4.514038, 4.772455],
'63': [620.0192, 1332.923, 4.057321, 4.700364],
'64': [528.9181, 953.4962, 4.608001, 4.762555],
'72': [452.4824, 1282.609, 4.364288, 4.775122],
'73': [445.9345, 1819.35, 4.312133, 4.828277],
'74': [467.2353, 1204.176, 4.356453, 4.634414],
'82': [416.0622, 1873.91, 4.364174, 4.858582],
'94': [553.9443, 1486.107, 4.564759, 4.965292]}
COVS = {'1': np.matrix([[8156.961,4075.974,13.05440,6.823964],
[4075.974,85957.07,23.81249,31.39354],
[13.05440,23.81249,0.425308,0.02637788],
[6.823964,31.39354,0.02637788,0.2685742]]),
'2': np.matrix([[12610.98, 4598.212, 15.72022, 10.93343],
[4598.212, 76695.1, 35.53953, 32.24821],
[15.72022, 35.53953, 0.3856857, 0.04138077],
[10.93343, 32.24821, 0.04138077, 0.2402458]]),
'3': np.matrix([[20287.03, -945.841, 23.69788, 19.12778],
[-945.841, 85500.7, 32.35261, 42.61164],
[23.69788, 32.35261, 0.408185, 0.05798509],
[19.12778, 42.61164, 0.05798509, 0.2402007]]),
'5': np.matrix([[14899.38, 14764.41, 31.29953, 25.64715],
[14764.41, 33089.55, 30.80144, 35.65717],
[31.29953, 30.80144, 0.3399745, 0.1391051],
[25.64715, 35.65717, 0.1391051, 0.2939521]]),
'6': np.matrix([[10963.32, 11881.45, 24.02174, 14.15601],
[11881.45, 50941.8, 30.80307, 29.26477],
[24.02174, 30.80307, 0.356582, 0.08377454],
[14.15601, 29.26477, 0.08377454, 0.2798376]]),
'7': np.matrix([[7374.7, 6907.065, 14.77475, 6.575189],
[6907.065, 103775.4, -6.194884, 33.8729],
[14.77475, -6.194884, 0.3619565, 0.08537324],
[6.575189, 33.8729, 0.08537324, 0.3309069]]),
'11': np.matrix([[7398.308, 111.3878, 14.47063, 5.261133],
[111.3878, 112484.4, 4.204222, 27.97763],
[14.47063, 4.204222, 0.439087, 0.01820014],
[5.261133, 27.97763, 0.01820014, 0.2864814]]),
'12': np.matrix([[8980.604, 16.4375, 12.43177, 6.508381],
[16.4375, 68185.02, -41.39826, 43.07926],
[12.43177, -41.39826, 0.4922286, 0.04943888],
[6.508381, 43.07926, 0.04943888, 0.2746969]]),
'14': np.matrix([[5766.88, 1678.53, 13.6561, 6.172833],
[1678.53, 97981.07, -18.30658, 5.520951],
[13.6561, -18.30658, 0.391424, 0.02907505],
[6.172833, 5.520951, 0.02907505, 0.2467823]]),
'21': np.matrix([[11345.63, 902.1107, 15.79774, 8.412416],
[902.1107, 94016.08, 21.16553, 52.47692],
[15.79774, 21.16553, 0.3749903, 0.04202547],
[8.412416, 52.47692, 0.04202547, 0.2549386]]),
'22': np.matrix([[7981.016, 7101.174, 15.52651, 7.784475],
[7101.174, 67936.53, 30.4288, 81.06186],
[15.52651, 30.4288, 0.4057237, 0.07124884],
[7.784475, 81.06186, 0.07124884, 0.4493804]]),
'24': np.matrix([[7187.811, 4778.768, 11.81843, 8.616023],
[4778.768, 97292.62, 24.02699, 46.71447],
[11.81843, 24.02699, 0.3862976, 0.05487306],
[8.616023, 46.71447, 0.05487306, 0.2361443]]),
'33': np.matrix([[13020.63, -808.1123, 24.56315, 8.443287],
[-808.1123, 86325.97, 40.21192, 34.7022],
[24.56315, 40.21192, 0.4743995, 0.04472998],
[8.443287, 34.7022, 0.04472998, 0.2473551]]),
'39': np.matrix([[9703.72, 5470.067, 24.62053, 27.96038],
[5470.067, 24951.84, 1.931964, 29.95240],
[24.62053, 1.931964, 0.2513445, 0.06440874],
[27.96038, 29.95240, 0.06440874, 0.1886862]]),
'41': np.matrix([[15762.87, 13486.23, 34.61164, 22.15451],
[13486.23, 36003.67, 33.8431, 30.52712],
[34.61164, 33.8431, 0.4143354, 0.1125765],
[22.15451, 30.52712, 0.1125765, 0.2592451]]),
'42': np.matrix([[17034.35, 8582.368, 28.08871, 21.32564],
[8582.368, 83324.55, 22.75919, 38.33975],
[28.08871, 22.75919, 0.3619946, 0.06974927],
[21.32564, 38.33975, 0.06974927, 0.2425371]]),
'43': np.matrix([[12651.21, 14322.93, 32.66122, 27.76152],
[14322.93, 31322.54, 35.98834, 42.55531],
[32.66122, 35.98834, 0.3651260, 0.1821268],
[27.76152, 42.55531, 0.1821268, 0.3104338]]),
'44': np.matrix([[11222.69, 12217.39, 25.91937, 20.97844],
[12217.39, 42712.38, 31.49909, 51.63623],
[25.91937, 31.49909, 0.3007976, 0.1284959],
[20.97844, 51.63623, 0.1284959, 0.3128419]]),
'47': np.matrix([[14093.57, 9982.23, 34.45142, 19.68046],
[9982.23, 45110.74, 35.51612, 32.38417],
[34.45142, 35.51612, 0.3875129, 0.1126590],
[19.68046, 32.38417, 0.1126590, 0.2684052]]),
'53': np.matrix([[13901.81, 14774.98, 29.65039, 23.37561],
[14774.98, 28293.08, 26.55524, 28.10525],
[29.65039, 26.55524, 0.3192664, 0.1368551],
[23.37561, 28.10525, 0.1368551, 0.3102375]]),
'54': np.matrix([[9024.312, 11004.40, 14.01676, 6.774474],
[11004.40, 31347.50, 0.5099728, 1.338353],
[14.01676, 0.5099728, 0.3226124, 0.1001887],
[6.774474, 1.338353, 0.1001887, 0.3517336]]),
'61': np.matrix([[8717.966, 8360.663, 9.581423, -3.629271],
[8360.663, 32997.70, -18.37126, -13.78926],
[9.581423, -18.37126, 0.31812, 0.09862598],
[-3.629271, -13.78926, 0.09862598, 0.3626406]]),
'62': np.matrix([[11036.78, 18957.63, 21.16886, 10.91295],
[18957.63, 86701.64, 15.58485, 35.06782],
[21.16886, 15.58485, 0.3620286, 0.08347947],
[10.91295, 35.06782, 0.08347947, 0.2859568]]),
'63': np.matrix([[11190.96, 16442.24, 34.42818, 9.032116],
[16442.24, 53108.15, 44.34654, 47.59889],
[34.42818, 44.34654, 0.2837371, -0.000626268],
[9.032116, 47.59889, -0.000626268, 0.4513407]]),
'64': np.matrix([[7020.379, 9304.635, 11.09179, 2.643800],
[9304.635, 34884.03, -2.304886, -0.4383724],
[11.09179, -2.304886, 0.3025123, 0.09179999],
[2.643800, -0.4383724, 0.09179999, 0.3638192]]),
'72': np.matrix([[5302.16, 8112.09, 11.229, -1.767770],
[8112.09, 142019.8, -1.869954, 25.76638],
[11.229, -1.869954, 0.4222974, 0.03546093],
[-1.767770, 25.76638, 0.03546093, 0.3773977]]),
'73': np.matrix([[5441.397, 6032.27, 6.348957, 0.7710968],
[6032.27, 89482.47, -10.52576, 19.44117],
[6.348957, -10.52576, 0.418909, 0.01018179],
[0.7710968, 19.44117, 0.01018179, 0.2577171]]),
'74': np.matrix([[3658.316, -3584.357, 6.224247, 9.464968],
[-3584.357, 51303.03, -78.23124, 26.34888],
[6.224247, -78.23124, 0.3590685, 0.04111837],
[9.464968, 26.34888, 0.04111837, 0.2657895]]),
'82': np.matrix([[5067.216, 3725.284, 8.112584, -2.087986],
[3725.284, 95441.09, 4.191305, 8.484181],
[8.112584, 4.191305, 0.4392269, 0.02049446],
[-2.087986, 8.484181, 0.02049446, 0.284428]]),
'94': np.matrix([[7035.538, 4075.101, 14.86012, 4.748889],
[4075.101, 41818.21, 26.42395, 26.1902],
[14.86012, 26.42395, 0.3585293, 0.03962729],
[4.748889, 26.1902, 0.03962729, 0.2598092]])}
| mmcauliffe/linguistic-helper-functions | linghelper/phonetics/vowels/mahalanobis.py | Python | gpl-3.0 | 11,375 |
import os
import sys
import logging
import inspect
from inspect import getmembers, isfunction
from commands import command
import handlers
logger = logging.getLogger(__name__)
class tracker:
def __init__(self):
self.bot = None
self.list = []
self.reset()
def set_bot(self, bot):
self.bot = bot
def reset(self):
self._current = {
"commands": {
"admin": [],
"user": [],
"all": None
},
"handlers": [],
"shared": [],
"metadata": None
}
def start(self, metadata):
self.reset()
self._current["metadata"] = metadata
def current(self):
self._current["commands"]["all"] = list(
set(self._current["commands"]["admin"] +
self._current["commands"]["user"]))
return self._current
def end(self):
self.list.append(self.current())
def register_command(self, type, command_names):
"""call during plugin init to register commands"""
self._current["commands"][type].extend(command_names)
self._current["commands"][type] = list(set(self._current["commands"][type]))
def register_handler(self, function, type, priority):
self._current["handlers"].append((function, type, priority))
def register_shared(self, id, objectref, forgiving):
self._current["shared"].append((id, objectref, forgiving))
tracking = tracker()
"""helpers"""
def register_user_command(command_names):
"""user command registration"""
if not isinstance(command_names, list):
command_names = [command_names]
tracking.register_command("user", command_names)
def register_admin_command(command_names):
"""admin command registration, overrides user command registration"""
if not isinstance(command_names, list):
command_names = [command_names]
tracking.register_command("admin", command_names)
def register_handler(function, type="message", priority=50):
"""register external handler"""
bot_handlers = tracking.bot._handlers
bot_handlers.register_handler(function, type, priority)
def register_shared(id, objectref, forgiving=True):
"""register shared object"""
bot = tracking.bot
bot.register_shared(id, objectref, forgiving=forgiving)
"""plugin loader"""
def retrieve_all_plugins(plugin_path=None, must_start_with=False):
"""recursively loads all plugins from the standard plugins path
* a plugin file or folder must not begin with . or _
* a subfolder containing a plugin must have an __init__.py file
* sub-plugin files (additional plugins inside a subfolder) must be prefixed with the
plugin/folder name for it to be automatically loaded
"""
if not plugin_path:
plugin_path = os.path.dirname(os.path.realpath(sys.argv[0])) + os.sep + "plugins"
plugin_list = []
nodes = os.listdir(plugin_path)
for node_name in nodes:
full_path = os.path.join(plugin_path, node_name)
module_names = [ os.path.splitext(node_name)[0] ] # node_name without .py extension
if node_name.startswith(("_", ".")):
continue
if must_start_with and not node_name.startswith(must_start_with):
continue
if os.path.isfile(full_path):
if not node_name.endswith(".py"):
continue
else:
if not os.path.isfile(os.path.join(full_path, "__init__.py")):
continue
for sm in retrieve_all_plugins(full_path, must_start_with=node_name):
module_names.append(module_names[0] + "." + sm)
plugin_list.extend(module_names)
logger.debug("retrieved {}: {}.{}".format(len(plugin_list), must_start_with or "plugins", plugin_list))
return plugin_list
def get_configured_plugins(bot):
all_plugins = retrieve_all_plugins()
config_plugins = bot.get_config_option('plugins')
if config_plugins is None: # must be unset in config or null
logger.info("plugins is not defined, using ALL")
plugin_list = all_plugins
else:
"""perform fuzzy matching with actual retrieved plugins, e.g. "abc" matches "xyz.abc"
if more than one match found, don't load plugin
"""
plugins_included = []
plugins_excluded = all_plugins
plugin_name_ambiguous = []
plugin_name_not_found = []
for configured in config_plugins:
dotconfigured = "." + configured
matches = []
for found in plugins_excluded:
fullfound = "plugins." + found
if fullfound.endswith(dotconfigured):
matches.append(found)
num_matches = len(matches)
if num_matches <= 0:
logger.debug("{} no match".format(configured))
plugin_name_not_found.append(configured)
elif num_matches == 1:
logger.debug("{} matched to {}".format(configured, matches[0]))
plugins_included.append(matches[0])
plugins_excluded.remove(matches[0])
else:
logger.debug("{} ambiguous, matches {}".format(configured, matches))
plugin_name_ambiguous.append(configured)
if plugins_excluded:
logger.info("excluded {}: {}".format(len(plugins_excluded), plugins_excluded))
if plugin_name_ambiguous:
logger.warning("ambiguous plugin names: {}".format(plugin_name_ambiguous))
if plugin_name_not_found:
logger.warning("plugin not found: {}".format(plugin_name_not_found))
plugin_list = plugins_included
logger.info("included {}: {}".format(len(plugin_list), plugin_list))
return plugin_list
def load(bot, command_dispatcher):
"""load plugins and perform any initialisation required to set them up"""
tracking.set_bot(bot)
command_dispatcher.set_tracking(tracking)
plugin_list = get_configured_plugins(bot)
for module in plugin_list:
module_path = "plugins.{}".format(module)
tracking.start({ "module": module, "module.path": module_path })
try:
exec("import {}".format(module_path))
except Exception as e:
logger.exception("EXCEPTION during plugin import: {}".format(module_path))
continue
public_functions = [o for o in getmembers(sys.modules[module_path], isfunction)]
candidate_commands = []
"""pass 1: run optional callable: _initialise, _initialize
* performs house-keeping tasks (e.g. migration, tear-up, pre-init, etc)
* registers user and/or admin commands
"""
available_commands = False # default: ALL
try:
for function_name, the_function in public_functions:
if function_name == "_initialise" or function_name == "_initialize":
"""accepted function signatures:
CURRENT
version >= 2.4 | function()
version >= 2.4 | function(bot) - parameter must be named "bot"
LEGACY
version <= 2.4 | function(handlers, bot)
ancient | function(handlers)
"""
_expected = list(inspect.signature(the_function).parameters)
if len(_expected) == 0:
the_function()
_return = []
elif len(_expected) == 1 and _expected[0] == "bot":
the_function(bot)
_return = []
else:
try:
# legacy support, pre-2.4
_return = the_function(bot._handlers, bot)
except TypeError as e:
# legacy support, ancient plugins
_return = the_function(bot._handlers)
if type(_return) is list:
available_commands = _return
elif function_name.startswith("_"):
pass
else:
candidate_commands.append((function_name, the_function))
if available_commands is False:
# implicit init, legacy support: assume all candidate_commands are user-available
register_user_command([function_name for function_name, function in candidate_commands])
elif available_commands is []:
# explicit init, no user-available commands
pass
else:
# explicit init, legacy support: _initialise() returned user-available commands
register_user_command(available_commands)
except Exception as e:
logger.exception("EXCEPTION during plugin init: {}".format(module_path))
continue # skip this, attempt next plugin
"""
pass 2: register filtered functions
tracking.current() and the CommandDispatcher registers might be out of sync if a
combination of decorators and register_user_command/register_admin_command is used since
decorators execute immediately upon import
"""
plugin_tracking = tracking.current()
explicit_admin_commands = plugin_tracking["commands"]["admin"]
all_commands = plugin_tracking["commands"]["all"]
registered_commands = []
for function_name, the_function in candidate_commands:
if function_name in all_commands:
is_admin = False
text_function_name = function_name
if function_name in explicit_admin_commands:
is_admin = True
text_function_name = "*" + text_function_name
command_dispatcher.register(the_function, admin=is_admin)
registered_commands.append(text_function_name)
if registered_commands:
logger.info("{} - {}".format(module, ", ".join(registered_commands)))
else:
logger.info("{} - no commands".format(module))
tracking.end()
@command.register(admin=True)
def plugininfo(bot, event, *args):
"""dumps plugin information"""
lines = []
for plugin in tracking.list:
if len(args) == 0 or args[0] in plugin["metadata"]["module"]:
lines.append("<b>{}</b>".format(plugin["metadata"]["module.path"]))
"""admin commands"""
if len(plugin["commands"]["admin"]) > 0:
lines.append("<i>admin commands:</i> {}".format(", ".join(plugin["commands"]["admin"])))
"""user-only commands"""
user_only_commands = list(set(plugin["commands"]["user"]) - set(plugin["commands"]["admin"]))
if len(user_only_commands) > 0:
lines.append("<i>user commands:</i> {}".format(", ".join(user_only_commands)))
"""handlers"""
if len(plugin["handlers"]) > 0:
lines.append("<i>handlers:</i>" + ", ".join([ "{} ({}, p={})".format(f[0].__name__, f[1], str(f[2])) for f in plugin["handlers"]]))
"""shared"""
if len(plugin["shared"]) > 0:
lines.append("<i>shared:</i>" + ", ".join([f[1].__name__ for f in plugin["shared"]]))
lines.append("")
bot.send_html_to_conversation(event.conv_id, "<br />".join(lines)) | ravrahn/HangoutsBot | hangupsbot/plugins/__init__.py | Python | gpl-3.0 | 11,524 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-02-09 20:48
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('elearning', '0010_auto_20160209_2042'),
]
operations = [
migrations.AddField(
model_name='setting',
name='logo',
field=models.CharField(max_length=256, null=True),
),
]
| tkupek/tkupek-elearning | tkupek_elearning/elearning/migrations/0011_setting_logo.py | Python | gpl-3.0 | 461 |
def is_prime(n):
for j in xrange(3, long(n**.5)+1, 2):
if n%j == 0 or n%2 == 0:
return False
return True
def lpf(n):
for i in xrange(3, long(n**.5)+1, 2):
if n%i == 0:
if is_prime(i):
s = i
return s
if __name__ == "__main__":
from time import time
start = time()
print lpf(600851475143)
print "LPF time: {}".format(time() - start)
| MikeRixWolfe/projecteuler | e003.py | Python | gpl-3.0 | 424 |
#
# rtlsdr_scan
#
# http://eartoearoak.com/software/rtlsdr-scanner
#
# Copyright 2012 -2015 Al Brown
#
# A frequency scanning GUI for the OsmoSDR rtl-sdr library at
# http://sdr.osmocom.org/trac/wiki/rtl-sdr
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from ctypes import c_ubyte, string_at
import rtlsdr
import serial
class DeviceGPS(object):
NMEA_SERIAL, GPSD, GPSD_OLD, NMEA_TCP = range(4)
TYPE = ['NMEA (Serial)', 'GPSd', 'GPSd (Legacy)', 'NMEA (Server)']
BYTES = [serial.FIVEBITS, serial.SIXBITS, serial.SEVENBITS,
serial.EIGHTBITS]
PARITIES = [serial.PARITY_NONE, serial.PARITY_EVEN, serial.PARITY_ODD,
serial.PARITY_MARK, serial.PARITY_SPACE]
STOPS = [serial.STOPBITS_ONE, serial.STOPBITS_ONE_POINT_FIVE,
serial.STOPBITS_TWO]
def __init__(self):
self.name = 'GPS'
self.type = self.GPSD
self.resource = 'localhost:2947'
self.baud = 115200
self.bytes = serial.EIGHTBITS
self.parity = serial.PARITY_NONE
self.stops = serial.STOPBITS_ONE
self.soft = False
def get_bauds(self):
if self.type == DeviceGPS.NMEA_SERIAL:
return serial.Serial.BAUDRATES
return None
def get_serial_desc(self):
port = self.resource.split('/')
return '{} {}-{}{}{:g}'.format(port[0], self.baud, self.bytes,
self.parity, self.stops)
def get_desc(self):
if self.type == DeviceGPS.NMEA_SERIAL:
return self.get_serial_desc()
return self.resource
class DeviceRTL(object):
def __init__(self):
self.isDevice = True
self.indexRtl = None
self.name = None
self.serial = ''
self.server = 'localhost'
self.port = 1234
self.gains = []
self.gain = 0
self.calibration = 0
self.lo = 0
self.offset = 250e3
self.tuner = 0
self.levelOff = 0
def set(self, device):
self.gain = device.gain
self.calibration = device.calibration
self.lo = device.lo
self.offset = device.offset
self.tuner = device.tuner
self.levelOff = device.levelOff
def get_gains_str(self):
gainsStr = []
for gain in self.gains:
gainsStr.append(str(gain))
return gainsStr
def get_closest_gain_str(self, desired):
gain = min(self.gains, key=lambda n: abs(n - desired))
return str(gain)
def get_devices_rtl(currentDevices=None, statusBar=None):
if statusBar is not None:
statusBar.set_general("Refreshing device list...")
if currentDevices is None:
currentDevices = []
devices = []
count = rtlsdr.librtlsdr.rtlsdr_get_device_count()
for dev in range(0, count):
device = DeviceRTL()
device.indexRtl = dev
device.name = format_device_rtl_name(rtlsdr.librtlsdr.rtlsdr_get_device_name(dev))
buffer1 = (c_ubyte * 256)()
buffer2 = (c_ubyte * 256)()
serial = (c_ubyte * 256)()
rtlsdr.librtlsdr.rtlsdr_get_device_usb_strings(dev, buffer1, buffer2,
serial)
device.serial = string_at(serial)
try:
sdr = rtlsdr.RtlSdr(dev)
except IOError:
continue
device.gains = sdr.valid_gains_db
device.calibration = 0.0
device.lo = 0.0
for conf in currentDevices:
if conf.isDevice and device.name == conf.name and device.serial == conf.serial:
device.set(conf)
devices.append(device)
for conf in currentDevices:
if not conf.isDevice:
devices.append(conf)
if statusBar is not None:
statusBar.set_general("")
return devices
def format_device_rtl_name(name):
remove = ["/", "\\"]
for char in remove:
name = name.replace(char, " ")
return name
if __name__ == '__main__':
print 'Please run rtlsdr_scan.py'
exit(1)
| EarToEarOak/RTLSDR-Scanner | rtlsdr_scanner/devices.py | Python | gpl-3.0 | 4,603 |
import os
from functools import reduce, lru_cache
import logging
import re
import subprocess
from randrctl import DISPLAY, XAUTHORITY
from randrctl.exception import XrandrException, ParseException
from randrctl.model import Profile, Viewport, XrandrConnection, Display
logger = logging.getLogger(__name__)
class Xrandr:
"""
Interface for xrandr application. Provides methods for calling xrandr operating with python objects such as
randrctl.profile.Profile
"""
EXECUTABLE = "/usr/bin/xrandr"
OUTPUT_KEY = "--output"
MODE_KEY = "--mode"
POS_KEY = "--pos"
ROTATE_KEY = "--rotate"
PANNING_KEY = "--panning"
RATE_KEY = "--rate"
SCALE_KEY = "--scale"
PRIMARY_KEY = "--primary"
CRTC_KEY = "--crtc"
QUERY_KEY = "-q"
VERBOSE_KEY = "--verbose"
OFF_KEY = "--off"
OUTPUT_DETAILS_REGEX = re.compile(
'(?P<primary>primary )?(?P<geometry>[\dx\+]+) (?:(?P<rotate>\w+) )?.*?(?:panning (?P<panning>[\dx\+]+))?$')
MODE_REGEX = re.compile("(\d+x\d+)\+(\d+\+\d+)")
CURRENT_MODE_REGEX = re.compile("\s*(\S+)\s+([0-9\.]+)(.*$)")
def __init__(self, display: str, xauthority: str):
env = dict(os.environ)
if display:
env[DISPLAY] = display
if xauthority:
env[XAUTHORITY] = xauthority
self.env = env
def apply(self, profile: Profile):
"""
Apply given profile by calling xrandr
"""
logger.debug("Applying profile %s", profile.name)
args = self._compose_mode_args(profile, self.get_all_outputs())
self._xrandr(*args)
@lru_cache()
def _xrandr(self, *args):
"""
Perform call to xrandr executable with passed arguments.
Returns subprocess.Popen object
"""
args = list(args)
logger.debug("Calling xrandr with args %s", args)
args.insert(0, self.EXECUTABLE)
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False, env=self.env)
err = p.stderr.readlines()
if err:
# close descriptors
p.stderr.close()
p.stdout.close()
err_str = ''.join(map(lambda x: x.decode(), err)).strip()
raise XrandrException(err_str, args)
out = list(map(lambda x: x.decode(), p.stdout.readlines()))
if out:
out.pop(0) # remove first line. It describes Screen
return out
def _compose_mode_args(self, profile: Profile, xrandr_connections: list):
"""
Composes list of arguments to xrandr to apply profile settings and disable the other outputs
"""
args = []
active_names = []
for name, o in profile.outputs.items():
active_names.append(name)
args.append(self.OUTPUT_KEY)
args.append(name)
args.append(self.MODE_KEY)
args.append(o.mode)
args.append(self.POS_KEY)
args.append(o.pos)
args.append(self.ROTATE_KEY)
args.append(o.rotate)
args.append(self.PANNING_KEY)
args.append(o.panning)
args.append(self.SCALE_KEY)
args.append(o.scale)
if o.rate:
args.append(self.RATE_KEY)
args.append(str(o.rate))
if name == profile.primary:
args.append(self.PRIMARY_KEY)
if o.crtc is not None:
args.append(self.CRTC_KEY)
args.append(str(o.crtc))
# turn off the others
for c in xrandr_connections:
if active_names.count(c.name) == 0:
args.append(self.OUTPUT_KEY)
args.append(c.name)
args.append(self.OFF_KEY)
return args
def get_all_outputs(self):
"""
Query xrandr for all supported outputs.
Performs call to xrandr with -q key and parses output.
Returns list of outputs with some properties missing (only name and status are guaranteed)
"""
outputs = []
items = self._xrandr(self.QUERY_KEY)
items = self._group_query_result(items)
logger.debug("Detected total %d outputs", len(items))
crtcs = self._get_verbose_fields('CRTC')
for i in items:
o = self._parse_xrandr_connection(i)
o.crtc = int(crtcs[o.name]) if o.name in crtcs and len(crtcs[o.name]) else None
outputs.append(o)
return outputs
def get_connected_outputs(self):
"""
Query xrandr and return list of connected outputs.
Performs call to xrandr with -q and --verbose keys.
Returns list of connected outputs with all properties set
"""
outputs = list(filter(lambda o: o.display is not None, self.get_all_outputs()))
edids = self._get_verbose_fields('EDID')
for o in outputs:
o.display.edid = edids[o.name]
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Connected outputs: %s", list(map(lambda o: o.name, outputs)))
return outputs
def _get_verbose_fields(self, field):
"""
Get particular field of all connected displays.
Return dictionary of {"connection_name": field_value}
"""
ret = dict()
items = self._xrandr(self.QUERY_KEY, self.VERBOSE_KEY)
items = self._group_query_result(items)
items = filter(lambda x: x[0].find(' connected') > 0, items)
for i in items:
name_idx = i[0].find(' ')
name = i[0][:name_idx]
ret[name] = self._field_from_query_item(i, field)
return ret
def _field_from_query_item(self, item_lines: list, field: str):
"""
Extracts display field from xrandr --verbose output
"""
val = ''
indent = ''
in_field = False
lines_collected = 0
for i, line in enumerate(item_lines):
m = re.match(r'(\s+)(.*):\s*(.*)$', line)
if m and m.group(2).lower() == field.lower():
indent = m.group(1)
in_field = True
val = m.group(3).strip()
elif in_field and m and (len(indent) >= len(m.group(1)) or m.group(1) == indent):
return val
elif in_field and not line.startswith(indent):
return val
elif in_field:
val += line.strip()
lines_collected += 1
if field == 'EDID' and lines_collected >= 8:
return val
return val
def _parse_xrandr_connection(self, item_lines: list):
"""
Creates XrandrConnection from lines returned by xrandr --query.
Example:
LVDS1 connected primary 1366x768+0+312 (normal left inverted right x axis y axis) 277mm x 156mm
1366x768 60.02*+
1024x768 60.00
"""
connection_info = item_lines[0]
name, status, state = connection_info.split(' ', 2)
if status != 'connected':
# We are not connected, do not parse the rest.
return XrandrConnection(name)
# We are connected parse connected display.
display = self._parse_display(item_lines[1:])
if not display.is_on():
# inactive output
return XrandrConnection(name, display)
parsed = self.OUTPUT_DETAILS_REGEX.match(state)
if parsed is None:
raise ParseException(name, status, state)
primary = parsed.group('primary') is not None
rotate = parsed.group('rotate')
panning = parsed.group('panning')
geometry = parsed.group('geometry')
size, pos = self._parse_geometry(geometry)
is_rotated = rotate in ['left', 'right']
if is_rotated:
size = 'x'.join(size.split('x')[::-1])
scale = '1x1'
if size != display.mode:
dw, dh = map(lambda s: int(s), display.mode.split('x'))
vw, vh = map(lambda s: int(s), size.split('x'))
sw, sh = vw / dw, vh / dh
if is_rotated:
sw, sh = sh, sw
scale = "{}x{}".format(sw, sh)
viewport = Viewport(size, pos, rotate, panning, scale)
return XrandrConnection(name, display, viewport, primary)
def _parse_display(self, lines: list):
supported_modes = []
preferred_mode = None
current_mode = None
current_rate = None
for mode_line in lines:
mode_line = mode_line.strip()
(mode, rate, extra) = self.CURRENT_MODE_REGEX.match(mode_line).groups()
current = (extra.find("*") >= 0)
preferred = (extra.find("+") >= 0)
supported_modes.append(mode)
if current:
current_mode = mode
current_rate = rate
if preferred:
preferred_mode = mode
return Display(supported_modes, preferred_mode, current_mode, current_rate)
def _group_query_result(self, query_result: list):
"""
Group input list of lines such that every line starting with a non-whitespace character is a start of a
group, and every subsequent line starting with whitespace is a member of that group.
:param query_result: list of lines
:return: list of lists of lines
"""
def group_fn(result, line):
# We append
if type(result) is str:
if line.startswith(' ') or line.startswith('\t'):
return [[result, line]]
else:
return [[result], [line]]
else:
if line.startswith(' ') or line.startswith('\t'):
last = result[len(result) - 1]
last.append(line)
return result
else:
result.append([line])
return result
# TODO rewrite in imperative code
grouped = reduce(lambda result, line: group_fn(result, line), query_result)
return grouped
def _parse_geometry(self, s: str):
"""
Parses geometry string (i.e. 1111x2222+333+444) into tuple (widthxheight, leftxtop)
"""
match = self.MODE_REGEX.match(s)
mode = match.group(1)
pos = match.group(2).replace('+', 'x')
return mode, pos
| edio/randrctl | randrctl/xrandr.py | Python | gpl-3.0 | 10,440 |
from __future__ import (absolute_import, division, print_function)
from isis_powder.hrpd_routines.hrpd_enums import HRPD_TOF_WINDOWS
absorption_correction_params = {
"cylinder_sample_height": 2.0,
"cylinder_sample_radius": 0.3,
"cylinder_position": [0., 0., 0.],
"chemical_formula": "V"
}
# Default cropping values are 5% off each end
window_10_110_params = {
"vanadium_tof_cropping": (1e4, 1.2e5),
"focused_cropping_values": [
(1.5e4, 1.08e5), # Bank 1
(1.5e4, 1.12e5), # Bank 2
(1.5e4, 1e5) # Bank 3
]
}
window_30_130_params = {
"vanadium_tof_cropping": (3e4, 1.4e5),
"focused_cropping_values": [
(3.5e4, 1.3e5), # Bank 1
(3.4e4, 1.4e5), # Bank 2
(3.3e4, 1.3e5) # Bank 3
]
}
window_100_200_params = {
"vanadium_tof_cropping": (1e5, 2.15e5),
"focused_cropping_values": [
(1e5, 2e5), # Bank 1
(8.7e4, 2.1e5), # Bank 2
(9.9e4, 2.1e5) # Bank 3
]
}
file_names = {
"grouping_file_name": "hrpd_new_072_01_corr.cal"
}
general_params = {
"spline_coefficient": 70,
"focused_bin_widths": [
-0.0005, # Bank 1
-0.0005, # Bank 2
-0.001 # Bank 3
],
"mode": "coupled"
}
def get_all_adv_variables(tof_window=HRPD_TOF_WINDOWS.window_10_110):
advanced_config_dict = {}
advanced_config_dict.update(file_names)
advanced_config_dict.update(general_params)
advanced_config_dict.update(get_tof_window_dict(tof_window=tof_window))
return advanced_config_dict
def get_tof_window_dict(tof_window):
if tof_window == HRPD_TOF_WINDOWS.window_10_110:
return window_10_110_params
if tof_window == HRPD_TOF_WINDOWS.window_30_130:
return window_30_130_params
if tof_window == HRPD_TOF_WINDOWS.window_100_200:
return window_100_200_params
raise RuntimeError("Invalid time-of-flight window: {}".format(tof_window))
| dymkowsk/mantid | scripts/Diffraction/isis_powder/hrpd_routines/hrpd_advanced_config.py | Python | gpl-3.0 | 1,942 |
from main.dataset import DataSet
import numpy as np
def test_dataset(load_image_data, image_size):
images, labels, ids, cls, _ = load_image_data
dataset = DataSet(images, labels, ids, cls)
assert sorted(list(dataset.cls)) == sorted(['cat', 'dog'] * 10)
assert dataset.cls.shape == (20,)
assert dataset.epochs_completed == 0
assert dataset.ids.shape == (20,)
assert dataset.images.shape == (20, image_size, image_size, 3)
assert dataset.images.dtype == np.float32
assert dataset.images.min() == float(0)
assert dataset.images.max() == float(1)
assert np.array_equal(dataset.labels, [[1., 0.]] * 10 + [[0., 1.]] * 10)
assert dataset.labels.shape == (20, 2)
assert dataset.labels.dtype == np.float64
assert dataset.labels.min() == float(0)
assert dataset.labels.max() == float(1)
assert dataset.num_examples == 20
def test_next_batch(load_image_data, image_size):
images, labels, ids, cls, _ = load_image_data
dataset = DataSet(images, labels, ids, cls)
image_batch, label_batch, id_batch, cls_batch = dataset.next_batch(batch_size=2)
assert list(cls_batch) == ['cat', 'cat']
assert cls_batch.shape == (2,)
assert image_batch.shape == (2, image_size, image_size, 3)
assert image_batch.dtype == np.float32
assert np.array_equal(label_batch, [[1., 0.], [1., 0.]])
assert label_batch.shape == (2, 2)
assert label_batch.dtype == np.float64
assert label_batch.min() == float(0)
assert label_batch.max() == float(1)
assert id_batch.shape == (2,)
| jhole89/convet-image-classifier | test/test_dataset.py | Python | gpl-3.0 | 1,572 |
#!/usr/bin/python3
#
# tBB is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tBB is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
This module takes care of representing and handling settings throughout tBB.
"""
import enum
import re
import datetime
valid_item_name = re.compile(r'[a-zA-Z_][a-zA-Z0-9_]*')
timedelta_parse_string = '%M:%S'
class UndefinedValueException(Exception):
def __init__(self):
super().__init__('self.value needs to be defined before converting it.')
class ConversionException(Exception):
def __init__(self, value, value_type):
super().__init__("couldn't convert '{}' to {}.".format(value, value_type))
class UnknownSettingException(Exception):
def __init__(self, setting_path):
self.setting_path = setting_path
super().__init__("defined setting '{}' is unknown.".format(setting_path))
class InconsistentSettingTypeException(Exception):
def __init__(self, setting_path, should_be, got):
self.setting_path = setting_path
self.should_be = should_be
self.got = got
super().__init__("setting type for '{}' should be {}. Got: {}.".format(setting_path, should_be, got))
class SettingsTypes(enum.Enum):
unknown = -1
string = 0 # need no conversion
integer = 1 # need no conversion
boolean = 2 # need no conversion
timedelta = 3
settings_item = 4
list = 5 # need no conversion
class SettingsItem:
def __init__(self, name, value_type):
if type(name) != str:
raise TypeError('argument name must be a string.')
if not isinstance(value_type, SettingsTypes):
raise TypeError('argument value_types must be a SettingsTypes instance.')
if re.match(valid_item_name, name) is None:
raise ValueError('settings item name is not acceptable. See tBB.settings.valid_item_name.')
else:
try:
self.__getattribute__(name)
except AttributeError:
pass
else:
raise ValueError('settings item name is not acceptable. Name reserved.')
self.name = name
self.value = None
self.value_type = value_type
def convert(self):
if self.value is None:
raise UndefinedValueException()
# static conversions: basically, only do type checking
if self.value_type == SettingsTypes.string:
if type(self.value) != str:
raise ConversionException(self.value, self.value_type)
elif self.value_type == SettingsTypes.integer:
if type(self.value) != int:
raise ConversionException(self.value, self.value_type)
elif self.value_type == SettingsTypes.boolean:
if type(self.value) != bool:
raise ConversionException(self.value, self.value_type)
# complex conversions
elif self.value_type == SettingsTypes.timedelta:
self.value = self.convert_to_timedelta(self.value)
elif self.value_type == SettingsTypes.settings_item:
self.value = self.convert_to_settings_item(self.value)
elif self.value_type == SettingsTypes.unknown: # make a guess on what it could be
if type(self.value) == int:
self.value_type = SettingsTypes.integer
elif type(self.value) == bool:
self.value_type = SettingsTypes.boolean
elif type(self.value) == list:
self.value_type = SettingsTypes.list
elif type(self.value) == dict:
try:
self.value = self.convert_to_settings_item(self.value)
except ConversionException as exc:
raise exc
else:
self.value_type = SettingsTypes.settings_item
elif type(self.value) == str:
try:
self.value = self.convert_to_timedelta(self.value)
except ConversionException:
self.value_type = SettingsTypes.string
else:
self.value_type = SettingsTypes.timedelta
@staticmethod
def convert_to_timedelta(value):
try:
tmp = datetime.datetime.strptime(value, timedelta_parse_string)
return datetime.timedelta(minutes=tmp.minute, seconds=tmp.second)
except (ValueError, TypeError) as exc:
raise ConversionException(value, SettingsTypes.timedelta) from exc
@staticmethod
def convert_to_settings_item(value):
if type(value) != dict:
raise ConversionException(value, SettingsTypes.settings_item)
children = {}
for name, elem in value.items():
new_item = SettingsItem(name=name, value_type=SettingsTypes.unknown)
new_item.value = elem
children[name] = new_item
for child in children.values():
child.convert()
return children
def __getattr__(self, item):
try:
return self.__getattribute__(item)
except AttributeError as exc:
if self.value_type == SettingsTypes.settings_item and self.value is not None:
if item in self.value.keys():
return self.value[item]
else:
raise exc
def __repr__(self):
return "<{} '{}' ({})>".format(self.__class__.__name__, self.name, self.value_type)
class Settings:
def __init__(self, tree):
if not isinstance(tree, SettingsItem):
raise TypeError("expected SettingsItem instance for argument tree. Got: '{}'.".format(tree))
self.tree = tree
def update(self, new_tree, scope=''):
if not isinstance(new_tree, SettingsItem):
raise TypeError("expected SettingsItem instance for argument tree. "
"Got: '{}'.".format(new_tree))
if type(new_tree.value) != dict:
walked_path = 'self.tree'
try:
setting = self.tree
for selector in scope.split('.')[1:]:
walked_path += '.' + selector
setting = getattr(setting, selector)
except AttributeError:
raise UnknownSettingException(walked_path)
else:
if setting.value_type != new_tree.value_type:
raise InconsistentSettingTypeException(scope, setting.value_type,
new_tree.value_type)
setting.value = new_tree.value
else:
for name in new_tree.value:
if new_tree.value_type == SettingsTypes.settings_item:
self.update(new_tree.value[name], scope=self.tree.name+scope+'.'+name)
else:
raise TypeError("expected iterators inside new_tree to be SettingsTypes.settings_item. "
"Got: {}".format(new_tree.value_type))
@staticmethod
def parse(json_data, name='toplevel'):
tree = SettingsItem(name=name, value_type=SettingsTypes.settings_item)
tree.value = json_data
tree.convert()
return tree
def __getattr__(self, item):
return self.tree.__getattr__(item)
| dpdani/tBB | tBB/settings.py | Python | gpl-3.0 | 7,813 |
import datetime
import os
import json
import re
import psycopg2 as dbapi2
from flask import Flask
from flask import redirect
from flask import request
from flask import render_template
from flask.helpers import url_for
from store import Store
from fixture import *
from sponsors import *
from curlers import *
from clubs import *
from psycopg2.tests import dbapi20
class Clubs:
def __init__(self, name, place, year, chair, number_of_members, rewardnumber):
self.name = name
self.place = place
self.year = year
self.chair = chair
self.number_of_members = number_of_members
self.rewardnumber = rewardnumber
def init_clubs_db(cursor):
cursor.execute( """CREATE TABLE IF NOT EXISTS CLUBS (
ID SERIAL,
NAME VARCHAR(80) NOT NULL,
PLACES INTEGER NOT NULL REFERENCES COUNTRIES(COUNTRY_ID) ON DELETE CASCADE ON UPDATE CASCADE,
YEAR NUMERIC(4) NOT NULL,
CHAIR VARCHAR(80) NOT NULL,
NUMBER_OF_MEMBERS INTEGER NOT NULL,
REWARDNUMBER INTEGER,
PRIMARY KEY(ID)
)""")
add_test_data(cursor)
def add_test_data(cursor):
cursor.execute("""
INSERT INTO CLUBS
(NAME, PLACES, YEAR, CHAIR, NUMBER_OF_MEMBERS, REWARDNUMBER) VALUES (
'Orlando Curling Club',
1,
2014,
'Bryan Pittard',
'7865',
'0');
INSERT INTO CLUBS
(NAME, PLACES, YEAR, CHAIR, NUMBER_OF_MEMBERS, REWARDNUMBER) VALUES (
'Wausau Curling Club',
1,
1896,
'Jennie Moran',
'54403',
'11');
INSERT INTO CLUBS
(NAME, PLACES, YEAR, CHAIR, NUMBER_OF_MEMBERS, REWARDNUMBER) VALUES (
'Fenerbahçe',
3,
2011,
'Aziz Yıldırım',
'9002',
'1');
INSERT INTO CLUBS
(NAME, PLACES, YEAR, CHAIR, NUMBER_OF_MEMBERS, REWARDNUMBER) VALUES (
'Galatasaray',
3,
2000,
'Dursun Aydın Ozbek',
'17864',
'5'
)""")
def add_club(app, request, club):
connection = dbapi2.connect(app.config['dsn'])
try:
cursor = connection.cursor()
try:
cursor = connection.cursor()
cursor.execute("""INSERT INTO CLUBS
(NAME, PLACES, YEAR, CHAIR, NUMBER_OF_MEMBERS, REWARDNUMBER) VALUES (
%s,
%s,
%s,
%s,
%s,
%s
)""", (club.name, club.place, club.year,
club.chair, club.number_of_members, club.rewardnumber))
except:
cursor.rollback()
finally:
cursor.close()
except:
connection.rollback()
finally:
connection.commit()
connection.close()
def delete_club(app, id):
connection = dbapi2.connect(app.config['dsn'])
try:
cursor = connection.cursor()
try:
cursor.execute('DELETE FROM CLUBS WHERE ID = %s', (id,))
except:
cursor.rollback()
finally:
cursor.close()
except:
connection.rollback()
finally:
connection.commit()
connection.close()
def get_clubs_page(app):
if request.method == 'GET':
now = datetime.datetime.now()
clubs = get_all_clubs(app)
countries = get_country_names(app)
return render_template('clubs.html',
clubs=clubs, countries=countries,
current_time=now.ctime())
elif "add" in request.form:
club = Clubs(request.form['name'],
request.form['place'],
request.form['year'],
request.form['chair'],
request.form['number_of_members'],
request.form['rewardnumber'])
add_club(app, request, club)
return redirect(url_for('clubs_page'))
elif "delete" in request.form:
for line in request.form:
if "checkbox" in line:
delete_club(app, int(line[9:]))
return redirect(url_for('clubs_page'))
elif 'search' in request.form:
clubs = search_club(app, request.form['club_to_search'])
return render_template('clubs_search_page.html', clubs = clubs)
def get_clubs_edit_page(app,club_id):
if request.method == 'GET':
now = datetime.datetime.now()
club = get_club(app, club_id)
countries = get_country_names(app)
return render_template('clubs_edit_page.html', current_time=now.ctime(), club=club, countries=countries)
if request.method == 'POST':
club = Clubs(request.form['name'],
request.form['place'],
request.form['year'],
request.form['chair'],
request.form['number_of_members'],
request.form['rewardnumber'])
update_club(app, request.form['id'], club)
return redirect(url_for('clubs_page'))
def get_country_names(app):
connection=dbapi2.connect(app.config['dsn'])
try:
cursor = connection.cursor()
try:
cursor.execute('SELECT COUNTRY_ID,COUNTRY_NAME FROM COUNTRIES')
countries = cursor.fetchall()
except dbapi2.Error as e:
print(e.pgerror)
finally:
cursor.close()
except:
connection.rollback()
finally:
connection.close()
return countries
def get_club(app, club_id):
club=None
connection = dbapi2.connect(app.config['dsn'])
try:
cursor = connection.cursor()
try:
cursor.execute('''
SELECT C.ID, C.NAME, S.COUNTRY_NAME, C.YEAR, C.CHAIR, C.NUMBER_OF_MEMBERS, C.REWARDNUMBER
FROM CLUBS AS C,COUNTRIES AS S
WHERE (
C.ID=%s AND C.PLACES=S.COUNTRY_ID
)
''', club_id);
club = cursor.fetchone()
except dbapi2.Error as e:
print(e.pgerror)
cursor.rollback()
finally:
cursor.close()
except dbapi2.Error as e:
print(e.pgerror)
connection.rollback()
finally:
connection.close()
return club
def update_club(app, id, club):
connection = dbapi2.connect(app.config['dsn'])
try:
cursor = connection.cursor()
try:
cursor.execute("""
UPDATE CLUBS
SET NAME = %s,
PLACES = %s,
YEAR = %s,
CHAIR=%s,
NUMBER_OF_MEMBERS=%s,
REWARDNUMBER= %s
WHERE ID= %s
""", (club.name, club.place, club.year,
club.chair, club.number_of_members, club.rewardnumber, id))
except:
cursor.rollback()
finally:
cursor.close()
except:
connection.rollback()
finally:
connection.commit()
connection.close()
def get_all_clubs(app):
connection = dbapi2.connect(app.config['dsn'])
try:
cursor=connection.cursor()
try:
cursor.execute('''
SELECT C.ID, C.NAME, K.COUNTRY_NAME, C.YEAR, C.CHAIR, C.NUMBER_OF_MEMBERS, C.REWARDNUMBER
FROM CLUBS AS C, COUNTRIES AS K
WHERE C.PLACES=K.COUNTRY_ID
''')
print(1)
clubs = cursor.fetchall()
except:
cursor.rollback()
finally:
cursor.close()
except dbapi2.Error as e:
print(e.pgerror)
connection.rollback()
finally:
connection.close()
return clubs
def search_club(app, name):
connection = dbapi2.connect(app.config['dsn'])
try:
cursor = connection.cursor()
try:
cursor.execute("""
SELECT C.ID, C.NAME, S.COUNTRY_NAME, C.YEAR, C.CHAIR, C.NUMBER_OF_MEMBERS, C.REWARDNUMBER
FROM CLUBS AS C , COUNTRIES AS S
WHERE(
UPPER(C.NAME)=UPPER(%s) AND
C.PLACES=S.COUNTRY_ID
)""", (name,))
clubs = cursor.fetchall()
except dbapi2.Error as e:
print(e.pgerror)
finally:
cursor.close()
except bapi2.Error as e:
print(e.pgerror)
connection.rollback()
finally:
connection.close()
return clubs
| itucsdb1509/itucsdb1509 | clubs.py | Python | gpl-3.0 | 8,354 |
#
# AFLTV XBMC Plugin
# Copyright (C) 2013 Kagenoshin
#
# AFLTV is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# AFLTV is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with AFLTV. If not, see <http://www.gnu.org/licenses/>.
#
# main imports
import sys, os, urllib2, urllib
import config
import utils
try:
import xbmc, xbmcgui, xbmcplugin, xbmcaddon
except ImportError:
pass
def make_list():
try:
items = []
__addon__ = xbmcaddon.Addon()
# Add the other feeds listed in the config file
for channel in config.CHANNELS:
items.append({'name': channel['name'], 'channel': channel['channel']})
items.append({'name': 'Settings', 'channel': 'settings'})
# fill media list
ok = fill_media_list(items)
except:
# oops print error message
print "ERROR: %s (%d) - %s" % (sys.exc_info()[2].tb_frame.f_code.co_name, sys.exc_info()[2].tb_lineno, sys.exc_info()[1])
ok = False
# send notification we're finished, successfully or unsuccessfully
xbmcplugin.endOfDirectory(handle=int(sys.argv[1]), succeeded=ok)
def fill_media_list(items):
try:
ok = True
# enumerate through the list of categories and add the item to the media list
for i in items:
url = "%s?channel=%s" % (sys.argv[0], i['channel'])
#thumbnail = get_thumbnail(c.channel)
icon = "defaultfolder.png"
listitem = xbmcgui.ListItem(i['name'], iconImage=icon)
#listitem.setInfo('video',{'episode':s.get_num_episodes()})
# add the item to the media list
ok = xbmcplugin.addDirectoryItem(
handle=int(sys.argv[1]),
url=url,
listitem=listitem,
isFolder=True,
totalItems=len(config.CHANNELS) + 1
)
# if user cancels, call raise to exit loop
if (not ok):
raise
#xbmcplugin.setContent(handle=int(sys.argv[1]), content='tvshows')
except:
# user cancelled dialog or an error occurred
d = xbmcgui.Dialog()
d.ok('AFL Video Error', 'AFL Video encountered an error:', ' %s (%d) - %s' % (sys.exc_info()[ 2 ].tb_frame.f_code.co_name, sys.exc_info()[ 2 ].tb_lineno, sys.exc_info()[ 1 ]) )
# user cancelled dialog or an error occurred
print "ERROR: %s (%d) - %s" % (sys.exc_info()[ 2 ].tb_frame.f_code.co_name, sys.exc_info()[ 2 ].tb_lineno, sys.exc_info()[ 1 ],)
ok = False
return ok | Kagenoshin/plugin.video.afltv | resources/lib/index.py | Python | gpl-3.0 | 2,731 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'PurpleRobotDevice.first_reading_timestamp'
db.add_column(u'purple_robot_app_purplerobotdevice', 'first_reading_timestamp',
self.gf('django.db.models.fields.BigIntegerField')(default=0),
keep_default=False)
def backwards(self, orm):
# Deleting field 'PurpleRobotDevice.first_reading_timestamp'
db.delete_column(u'purple_robot_app_purplerobotdevice', 'first_reading_timestamp')
models = {
u'purple_robot_app.purplerobotalert': {
'Meta': {'object_name': 'PurpleRobotAlert'},
'action_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'dismissed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'generated': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'manually_dismissed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'probe': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'severity': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tags': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'})
},
u'purple_robot_app.purplerobotconfiguration': {
'Meta': {'object_name': 'PurpleRobotConfiguration'},
'added': ('django.db.models.fields.DateTimeField', [], {}),
'contents': ('django.db.models.fields.TextField', [], {'max_length': '1048576'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '1024'})
},
u'purple_robot_app.purplerobotdevice': {
'Meta': {'object_name': 'PurpleRobotDevice'},
'config_last_fetched': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'config_last_user_agent': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'configuration': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'devices'", 'null': 'True', 'to': u"orm['purple_robot_app.PurpleRobotConfiguration']"}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '1048576', 'null': 'True', 'blank': 'True'}),
'device_group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'devices'", 'null': 'True', 'to': u"orm['purple_robot_app.PurpleRobotDeviceGroup']"}),
'device_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '256', 'db_index': 'True'}),
'first_reading_timestamp': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'hash_key': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mute_alerts': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'performance_metadata': ('django.db.models.fields.TextField', [], {'default': "'{}'", 'max_length': '1048576'}),
'test_device': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'purple_robot_app.purplerobotdevicegroup': {
'Meta': {'object_name': 'PurpleRobotDeviceGroup'},
'configuration': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'groups'", 'null': 'True', 'to': u"orm['purple_robot_app.PurpleRobotConfiguration']"}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '1048576', 'null': 'True', 'blank': 'True'}),
'group_id': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '256'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'})
},
u'purple_robot_app.purplerobotdevicenote': {
'Meta': {'object_name': 'PurpleRobotDeviceNote'},
'added': ('django.db.models.fields.DateTimeField', [], {}),
'device': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notes'", 'to': u"orm['purple_robot_app.PurpleRobotDevice']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'max_length': '1024'})
},
u'purple_robot_app.purplerobotevent': {
'Meta': {'object_name': 'PurpleRobotEvent'},
'event': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logged': ('django.db.models.fields.DateTimeField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'payload': ('django.db.models.fields.TextField', [], {'max_length': '8388608', 'null': 'True', 'blank': 'True'}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'db_index': 'True'})
},
u'purple_robot_app.purplerobotexportjob': {
'Meta': {'object_name': 'PurpleRobotExportJob'},
'destination': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'export_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'probes': ('django.db.models.fields.TextField', [], {'max_length': '8196', 'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'state': ('django.db.models.fields.CharField', [], {'default': "'pending'", 'max_length': '512'}),
'users': ('django.db.models.fields.TextField', [], {'max_length': '8196', 'null': 'True', 'blank': 'True'})
},
u'purple_robot_app.purplerobotpayload': {
'Meta': {'object_name': 'PurpleRobotPayload'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'errors': ('django.db.models.fields.TextField', [], {'max_length': '65536', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'payload': ('django.db.models.fields.TextField', [], {'max_length': '8388608'}),
'process_tags': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'db_index': 'True'})
},
u'purple_robot_app.purplerobotreading': {
'Meta': {'object_name': 'PurpleRobotReading', 'index_together': "[['probe', 'user_id'], ['logged', 'user_id'], ['probe', 'logged', 'user_id']]"},
'attachment': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'guid': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '1024', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logged': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'payload': ('django.db.models.fields.TextField', [], {'max_length': '8388608'}),
'probe': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'size': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'db_index': 'True'})
},
u'purple_robot_app.purplerobotreport': {
'Meta': {'object_name': 'PurpleRobotReport'},
'generated': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mime_type': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'probe': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'report_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '1024'})
},
u'purple_robot_app.purplerobottest': {
'Meta': {'object_name': 'PurpleRobotTest'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'frequency': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {}),
'probe': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'report': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '1024'})
}
}
complete_apps = ['purple_robot_app'] | cbitstech/Purple-Robot-Django | migrations/0037_auto__add_field_purplerobotdevice_first_reading_timestamp.py | Python | gpl-3.0 | 10,672 |
#!/usr/bin/python
import Colors
import Shapes
from abc import ABCMeta, abstractmethod
class AbstractFactory(object):
__metaclass__ = ABCMeta
@abstractmethod
def get_color(self, color):
pass
@abstractmethod
def get_shape(self, shape):
pass
class ShapeFactory(AbstractFactory):
def get_color(self, color):
raise 'Use color factory'
def get_shape(self, shape):
shape = shape.lower()
if shape == 'circle':
return Shapes.Circle()
elif shape == 'rectangle':
return Shapes.Rectangle()
elif shape == 'square':
return Shapes.Square()
else:
raise InvalidShapeException('unknown shape: {0}'.format(shape))
class ColorFactory(AbstractFactory):
def get_color(self, color):
color = color.lower()
if color == 'red':
return Colors.Red()
elif color == 'green':
return Colors.Green()
elif color == 'blue':
return Colors.Blue()
else:
raise InvalidColorException('unknown color: {0}'.format(color))
def get_shape(self, shape):
raise 'use shape factory'
class FactoryProducer(object):
@staticmethod
def get_factory(factory_name):
factory_name = factory_name.lower()
if factory_name == 'shape':
return ShapeFactory()
elif factory_name == 'color':
return ColorFactory()
else:
raise InvalidFactoryException('unknown factory specified')
class InvalidFactoryException(Exception):
pass
class InvalidColorException(Exception):
pass
class InvalidShapeException(Exception):
pass
| Sunhick/design-patterns | Creational/AbstractFactory/GeometryFactory/GeometryFactory.py | Python | gpl-3.0 | 1,700 |
# -*- encoding: utf-8 -*-
"""Test class for Architecture UI"""
from fauxfactory import gen_string
from nailgun import entities
from robottelo.datafactory import generate_strings_list, invalid_values_list
from robottelo.decorators import run_only_on, tier1
from robottelo.test import UITestCase
from robottelo.ui.factory import make_arch
from robottelo.ui.locators import common_locators
from robottelo.ui.session import Session
def valid_arch_os_names():
"""Returns a tuple of arch/os names for creation tests"""
return(
{u'name': gen_string('alpha'), u'os_name': gen_string('alpha')},
{u'name': gen_string('html'), u'os_name': gen_string('html')},
{u'name': gen_string('utf8'), u'os_name': gen_string('utf8')},
{u'name': gen_string('alphanumeric'),
u'os_name': gen_string('alphanumeric')}
)
class ArchitectureTestCase(UITestCase):
"""Implements Architecture tests from UI"""
@run_only_on('sat')
@tier1
def test_positive_create_with_os(self):
"""@Test: Create a new Architecture with OS
@Feature: Architecture - Positive Create
@Assert: Architecture is created
"""
with Session(self.browser) as session:
for test_data in valid_arch_os_names():
with self.subTest(test_data):
entities.OperatingSystem(
name=test_data['os_name']).create()
make_arch(session, name=test_data['name'],
os_names=[test_data['os_name']])
self.assertIsNotNone(
self.architecture.search(test_data['name']))
@run_only_on('sat')
@tier1
def test_positive_create_with_name(self):
"""@Test: Create a new Architecture with different data
@Feature: Architecture - Positive Create
@Assert: Architecture is created
"""
with Session(self.browser) as session:
for name in generate_strings_list():
with self.subTest(name):
make_arch(session, name=name)
self.assertIsNotNone(self.architecture.search(name))
@run_only_on('sat')
@tier1
def test_negative_create_with_invalid_name(self):
"""@Test: Try to create architecture and use whitespace, blank, tab
symbol or too long string of different types as its name value
@Feature: Architecture - Negative Create
@Assert: Architecture is not created
"""
with Session(self.browser) as session:
for invalid_name in invalid_values_list(interface='ui'):
with self.subTest(invalid_name):
make_arch(session, name=invalid_name)
self.assertIsNotNone(self.architecture.wait_until_element(
common_locators['name_haserror']))
@run_only_on('sat')
@tier1
def test_negative_create_with_same_name(self):
"""@Test: Create a new Architecture with same name
@Feature: Architecture - Negative Create
@Assert: Architecture is not created
"""
with Session(self.browser) as session:
for name in generate_strings_list():
with self.subTest(name):
make_arch(session, name=name)
self.assertIsNotNone(self.architecture.search(name))
make_arch(session, name=name)
self.assertIsNotNone(self.architecture.wait_until_element(
common_locators['name_haserror']))
@run_only_on('sat')
@tier1
def test_positive_delete(self):
"""@Test: Delete an existing Architecture
@Feature: Architecture - Delete
@Assert: Architecture is deleted
"""
os = entities.OperatingSystem(name=gen_string('alpha')).create()
with Session(self.browser) as session:
for name in generate_strings_list():
with self.subTest(name):
entities.Architecture(
name=name, operatingsystem=[os]).create()
session.nav.go_to_architectures()
self.architecture.delete(name)
@run_only_on('sat')
@tier1
def test_positive_update_name_and_os(self):
"""@Test: Update Architecture with new name and OS
@Feature: Architecture - Update
@Assert: Architecture is updated
"""
old_name = gen_string('alpha')
with Session(self.browser) as session:
make_arch(session, name=old_name)
self.assertIsNotNone(self.architecture.search(old_name))
for new_name in generate_strings_list():
with self.subTest(new_name):
os_name = gen_string('alpha')
entities.OperatingSystem(name=os_name).create()
self.architecture.update(
old_name, new_name, new_os_names=[os_name])
self.assertIsNotNone(self.architecture.search(new_name))
old_name = new_name # for next iteration
| tkolhar/robottelo | tests/foreman/ui/test_architecture.py | Python | gpl-3.0 | 5,128 |
# nxt.server module -- LEGO Mindstorms NXT socket interface module
# Copyright (C) 2009 Marcus Wanner
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
'''Use for a socket-interface NXT driver. Command and protocol docs at:
http://code.google.com/p/nxt-python/wiki/ServerUsage'''
import nxt.locator
from nxt.motor import *
from nxt.sensor import *
from nxt.compass import *
import socket, string, sys
global brick
host = ''
port = 54174
outport = 54374
def _process_port(nxtport):
if nxtport == 'A' or nxtport == 'a':
nxtport = PORT_A
elif nxtport == 'B' or nxtport == 'b':
nxtport = PORT_B
elif nxtport == 'C' or nxtport == 'c':
nxtport = PORT_C
elif nxtport == 'ALL' or nxtport == 'All' or nxtport == 'all':
nxtport = PORT_ALL
elif nxtport == '1':
nxtport = PORT_1
elif nxtport == '2':
nxtport = PORT_2
elif nxtport == '3':
nxtport = PORT_3
elif nxtport == '4':
nxtport = PORT_4
else:
raise ValueError, 'Invalid port: '+nxtport
return nxtport
def _process_command(cmd):
global brick
retcode = 0
retmsg = ''
#act on messages, these conditions can be in no particular order
#it should send a return code on port 54374. 0 for success, 1 for failure
#then an error message
if cmd.startswith('find_brick'):
try:
brick = nxt.locator.find_one_brick()
brick = brick.connect()
retmsg = 'Connected to brick.'
retcode = 0
except:
retcode = 1
retmsg = str(sys.exc_info()[1])
elif cmd.startswith('get_touch_sample'):
try:
port = string.split(cmd, ':')[1]
port = _process_port(port)
retmsg = str(TouchSensor(brick, port).get_sample())
retcode = 0
except:
retcode = 1
retmsg = str(sys.exc_info()[1])
elif cmd.startswith('get_sound_sample'):
try:
port = string.split(cmd, ':')[1]
port = _process_port(port)
retmsg = str(SoundSensor(brick, port).get_sample())
retcode = 0
except:
retcode = 1
retmsg = str(sys.exc_info()[1])
elif cmd.startswith('get_light_sample'):
try:
data = string.split(cmd, ':')[1]
data = string.split(data, ',')
if len(data) > 1:
#there is emit light data
port, emit = data
else:
port, emit = data[0], False
port = _process_port(port)
light = LightSensor(brick, port)
light.set_illuminated(emit)
retmsg = str(light.get_sample())
light.set_illuminated(False)
retcode = 0
except:
retcode = 1
retmsg = str(sys.exc_info()[1])
elif cmd.startswith('get_ultrasonic_sample'):
try:
port = string.split(cmd, ':')[1]
port = _process_port(port)
retmsg = str(UltrasonicSensor(brick, port).get_sample())
retcode = 0
except:
retcode = 1
retmsg = str(sys.exc_info()[1])
elif cmd.startswith('get_accelerometer_sample'):
try:
port = string.split(cmd, ':')[1]
port = _process_port(port)
retmsg = str(AccelerometerSensor(brick, port).get_sample())
retcode = 0
except:
retcode = 1
retmsg = str(sys.exc_info()[1])
elif cmd.startswith('get_compass_sample'):
try:
port = string.split(cmd, ':')[1]
port = _process_port(port)
retmsg = str(CompassSensor(brick, port).get_sample())
retcode = 0
except:
retcode = 1
retmsg = str(sys.exc_info()[1])
elif cmd.startswith('update_motor:'):
try:
#separate the information from the command keyword
info = string.split(cmd, ':')[1]
[port, power, tacholim] = string.split(info, ',')
portarray = []
if port.count('(') > 0 and port.count(')') > 0:
#there are more than 1 ports, separate them
port = port.strip('()')
#port.strip(')')
port.replace(' ', '')
for separateport in string.split(port, ';'):
portarray.append(separateport)
else:
#one port, just use that
portarray.append(port)
#process the port
for currentport in portarray:
processedport = _process_port(currentport)
Motor(brick, processedport).update(int(power), int(tacholim))
retmsg = 'Motor command succeded.'
retcode = 0
except:
retcode = 1
retmsg = str(sys.exc_info()[1])
elif cmd.startswith('run_motor:'):
try:
#separate the information from the command keyword
info = string.split(cmd, ':')[1]
[port, power, regulated] = string.split(info, ',')
port = _process_port(port)
Motor(brick, port).run(int(power), int(regulated))
retmsg = 'Motor run command succeded.'
except:
retcode = 1
retmsg = str(sys.exc_info()[1])
elif cmd.startswith('stop_motor:'):
try:
#separate the information from the command keyword
info = string.split(cmd, ':')[1]
[port, braking] = string.split(info, ',')
port = _process_port(port)
Motor(brick, port).stop(int(braking))
retmsg = 'Motor stop command succeded.'
except:
retcode = 1
retmsg = str(sys.exc_info()[1])
elif cmd.startswith('play_tone:'):
try:
#separate the information from the command keyword
info = string.split(cmd, ':')[1]
[freq, dur] = string.split(info, ',')
#call the function
brick.play_tone_and_wait(int(freq), int(dur))
retmsg = 'Tone command succeded.'
retcode = 0
except:
retcode = 1
retmsg = str(sys.exc_info()[1])
#close_brick
elif cmd == 'close_brick':
try:
brick.close()
retcode = 0
except:
retcode = 1
retmsg = str(sys.exc_info()[1])
#command not recognised
else:
retmsg = 'Command not found.'
retcode = 1
#then return 1 or 0 and a message
return retcode, retmsg
def serve_forever(password=None, authorizedips = []):
'''Serve clients until the window is closed or there is an unhandled error.
If you supply a password, then any ip that wants to control the NXT will have
to send the password once to be authorized before any of the commands it sends
will be carried out.
authorizedips is a list of the ips that can have access to the NXT without
supplying a password. Normally, this is left blank.'''
#make sockets
outsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
insock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
insock.bind((host, port))
while 1:
#get a message from port on any host
inmsg, (clientip,assignedport) = insock.recvfrom(100) #no commands can be longer than 100 chars
#print a helpful message to the console.
print 'Got command '+inmsg+' from '+clientip
#process command
if password:
#password protection enabled
try:
authorizedips.index(clientip)
#ip is authorized, and is therefore in the list of authorized ip
code, message = _process_command(inmsg) #process the command as normal
except ValueError:
#ip not authorized, and therefore cannot be found in the list of authorized ips
if inmsg == str(password):
#command is the correct password
authorizedips.append(clientip)
code = 0
message = 'Authorization successful.'
else:
#command is not the password
code = 1
message = 'NXT access on this server is password protected, please send correct password to be authorized.'
else:
#not password protected
code, message = _process_command(inmsg)
#send return code to the computer that send the request
outsock.sendto(str(code) + message, (clientip, 54374))
#print a summany of the response
print 'Sent return code '+str(code)+' with message "'+message+'" to '+clientip
print ''
#do again
#serve automatically if the script is started
#by double-clicking or by command line.
if __name__ == '__main__':
try:
password = sys.argv[1]
except:
password = None
serve_forever(password)
| skorokithakis/nxt-python | nxt/server.py | Python | gpl-3.0 | 9,781 |
# THIS IS THE PYTHON CODE FOR PiFACE OUTPUT OFF
#
# Copyright (C) 2014 Tim Massey
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Also add information on how to contact you by electronic and paper mail.
#!/usr/bin/python
import pifacedigitalio
pifacedigital = pifacedigitalio.PiFaceDigital()
pifacedigital.output_pins[7].turn_off()
| timm-tem/RPi_mediaserver | piface/output7off.py | Python | gpl-3.0 | 966 |
# Authors: John Dennis <jdennis@redhat.com>
#
# Copyright (C) 2011 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#-------------------------------------------------------------------------------
# Module exports
__all__ = ['log_mgr', 'root_logger', 'standard_logging_setup',
'IPA_ROOT_LOGGER_NAME', 'ISO8601_UTC_DATETIME_FMT',
'LOGGING_FORMAT_STDERR', 'LOGGING_FORMAT_STDOUT', 'LOGGING_FORMAT_FILE']
#-------------------------------------------------------------------------------
import sys
import re
import copy
from log_manager import LogManager, parse_log_level
#-------------------------------------------------------------------------------
# Our root logger, all loggers will be descendents of this.
IPA_ROOT_LOGGER_NAME = 'ipa'
# Format string for time.strftime() to produce a ISO 8601 date time
# formatted string in the UTC time zone.
ISO8601_UTC_DATETIME_FMT = '%Y-%m-%dT%H:%M:%SZ'
# Logging format string for use with logging stderr handlers
LOGGING_FORMAT_STDERR = 'ipa: %(levelname)s: %(message)s'
# Logging format string for use with logging stdout handlers
LOGGING_FORMAT_STDOUT = '[%(asctime)s %(name)s] <%(levelname)s>: %(message)s'
# Logging format string for use with logging file handlers
LOGGING_FORMAT_FILE = '\t'.join([
'%(asctime)s',
'%(process)d',
'%(threadName)s',
'%(name)s',
'%(levelname)s',
'%(message)s',
])
# Used by standard_logging_setup() for console message
LOGGING_FORMAT_STANDARD_CONSOLE = '%(name)-12s: %(levelname)-8s %(message)s'
# Used by standard_logging_setup() for file message
LOGGING_FORMAT_STANDARD_FILE = '%(asctime)s %(levelname)s %(message)s'
#-------------------------------------------------------------------------------
class IPALogManager(LogManager):
'''
Subclass the LogManager to enforce some IPA specfic logging
conventions.
* Default to timestamps in UTC.
* Default to ISO 8601 timestamp format.
* Default the message format.
'''
log_logger_level_config_re = re.compile(r'^log_logger_level_(debug|info|warn|warning|error|critical|\d+)$')
def __init__(self, configure_state=None):
'''
:parameters:
configure_state
Used by clients of the log manager to track the
configuration state, may be any object.
'''
super(IPALogManager, self).__init__(IPA_ROOT_LOGGER_NAME, configure_state)
def configure_from_env(self, env, configure_state=None):
'''
Read the loggger configuration from the Env config. The
following items may be configured:
Logger Levels
*log_logger_XXX = comma separated list of regexps*
Logger levels can be explicitly specified for specific loggers as
opposed to a global logging level. Specific loggers are indiciated
by a list of regular expressions bound to a level. If a logger's
name matches the regexp then it is assigned that level. The keys
in the Env config must begin with "log_logger_level\_" and then be
followed by a symbolic or numeric log level, for example::
log_logger_level_debug = ipapython\.dn\..*
log_logger_level_35 = ipalib\.plugins\.dogtag
The first line says any logger belonging to the ipapython.dn module
will have it's level configured to debug.
The second line say the ipa.plugins.dogtag logger will be
configured to level 35.
Note: logger names are a dot ('.') separated list forming a path
in the logger tree. The dot character is also a regular
expression metacharacter (matches any character) therefore you
will usually need to escape the dot in the logger names by
preceeding it with a backslash.
The return value of this function is a dict with the following
format:
logger_regexps
List of (regexp, level) tuples
:parameters:
env
Env object configuration values are read from.
configure_state
If other than None update the log manger's configure_state
variable to this object. Clients of the log manager can
use configure_state to track the state of the log manager.
'''
logger_regexps = []
config = {'logger_regexps' : logger_regexps,
}
for attr in ('debug', 'verbose'):
value = getattr(env, attr, None)
if value is not None:
config[attr] = value
for attr in list(env):
# Get logger level configuration
match = IPALogManager.log_logger_level_config_re.search(attr)
if match:
value = match.group(1)
level = parse_log_level(value)
value = getattr(env, attr)
regexps = re.split('\s*,\s*', value)
# Add the regexp, it maps to the configured level
for regexp in regexps:
logger_regexps.append((regexp, level))
continue
self.configure(config, configure_state)
return config
def create_log_handlers(self, configs, logger=None, configure_state=None):
'Enforce some IPA specific configurations'
configs = copy.copy(configs)
for cfg in configs:
if not 'time_zone_converter' in cfg:
cfg['time_zone_converter'] = 'utc'
if not 'datefmt' in cfg:
cfg['datefmt'] = ISO8601_UTC_DATETIME_FMT
if not 'format' in cfg:
cfg['format'] = LOGGING_FORMAT_STDOUT
return super(IPALogManager, self).create_log_handlers(configs, logger, configure_state)
#-------------------------------------------------------------------------------
def standard_logging_setup(filename=None, verbose=False, debug=False,
filemode='w', console_format=LOGGING_FORMAT_STANDARD_CONSOLE):
handlers = []
# File output is always logged at debug level
if filename is not None:
file_handler = dict(name='file',
filename=filename,
filemode=filemode,
permission=0o600,
level='debug',
format=LOGGING_FORMAT_STANDARD_FILE)
handlers.append(file_handler)
if log_mgr.handlers.has_key('console'):
log_mgr.remove_handler('console')
level = 'error'
if verbose:
level = 'info'
if debug:
level = 'debug'
console_handler = dict(name='console',
stream=sys.stderr,
level=level,
format=console_format)
handlers.append(console_handler)
# default_level must be debug becuase we want the file handler to
# always log at the debug level.
log_mgr.configure(dict(default_level='debug',
handlers=handlers),
configure_state='standard')
return log_mgr.root_logger
#-------------------------------------------------------------------------------
# Single shared instance of log manager
#
# By default always starts with stderr console handler at error level
# so messages generated before logging is fully configured have some
# place to got and won't get lost.
log_mgr = IPALogManager()
log_mgr.configure(dict(default_level='error',
handlers=[dict(name='console',
stream=sys.stderr)]),
configure_state='default')
root_logger = log_mgr.root_logger
| hroncok/freeipa | ipapython/ipa_log_manager.py | Python | gpl-3.0 | 8,302 |
import sys
import regex as re
from BeautifulSoup import BeautifulStoneSoup as bss
# REQUIRES BeautifulSoup3. BS4 breaks on Python recursion errors when it gets badly damaged texts.
def cleanup(infile, outfile):
# main routine. takes a file handle for input and output; called at the bottom of the file.
text = infile.read()
# custom regexes for things that BeautifulSoup can't handle go here. Keep to a minimum. Below examples are for the Encyc.
# text = re.sub(r"<\?>",r"<gap reason='omitted' unit='character' />",text)
# text = re.sub(r"<\->",r"<gap reason='omitted' unit='bracket' />",text)
# text = re.sub(r"<MVO_PIM=\"(.*?)\">",r'<figure><graphic url="\g<1>"></graphic></figure>',text)
# text = re.sub(r"<omit=(.*?)>",r"<gap reason='omitted' unit='\g<1>' />",text)
print(len(text))
soup = bss(text, selfClosingTags=self_closing)
for tag in soup.findAll():
if tag.name in fix_case:
tag.name = fix_case[tag.name]
print(soup, file=outfile)
outfile.close()
# Annoyingly, BeautifulSoup requires you to declare ALL self-closing tags yourself; it will badly mangle your text if you miss one, so get this right.
self_closing = ["p", "index", "pb", "milestone", "col", "c", "omit", "hr1", "hr2", "hr3", "file_group", "gap", "volume"]
# BeautifulSoup lowercases all element names; to get things closer to standard TEI, I've included a list here which I use to restore them after parsing
capitalized_elements = [
"accMat",
"addName",
"addrLine",
"addSpan",
"adminInfo",
"altGrp",
"altIdent",
"altIdentifier",
"appInfo",
"attDef",
"attList",
"attRef",
"biblFull",
"biblScope",
"biblStruct",
"binaryObject",
"bindingDesc",
"calendarDesc",
"castGroup",
"castItem",
"castList",
"catDesc",
"catRef",
"charDecl",
"charName",
"charProp",
"citedRange",
"classCode",
"classDecl",
"classRef",
"classSpec",
"constraintSpec",
"cRefPattern",
"custEvent",
"custodialHist",
"damageSpan",
"decoDesc",
"decoNote",
"defaultVal",
"delSpan",
"dictScrap",
"divGen",
"docAuthor",
"docDate",
"docEdition",
"docImprint",
"docTitle",
"editionStmt",
"editorialDecl",
"egXML",
"eLeaf",
"elementRef",
"elementSpec",
"encodingDesc",
"entryFree",
"eTree",
"fDecl",
"fDescr",
"figDesc",
"fileDesc",
"finalRubric",
"fLib",
"floatingText",
"fsConstraints",
"fsdDecl",
"fsDecl",
"fsDescr",
"fsdLink",
"fvLib",
"genName",
"geoDecl",
"geogFeat",
"geogName",
"glyphName",
"gramGrp",
"handDesc",
"handNote",
"handNotes",
"handShift",
"headItem",
"headLabel",
"iNode",
"interpGrp",
"iType",
"joinGrp",
"lacunaEnd",
"lacunaStart",
"langKnowledge",
"langKnown",
"langUsage",
"layoutDesc",
"linkGrp",
"listApp",
"listBibl",
"listChange",
"listEvent",
"listForest",
"listNym",
"listOrg",
"listPerson",
"listPlace",
"listPrefixDef",
"listRef",
"listRelation",
"listTranspose",
"listWit",
"localName",
"locusGrp",
"macroRef",
"macroSpec",
"measureGrp",
"memberOf",
"metDecl",
"metSym",
"moduleRef",
"moduleSpec",
"msContents",
"msDesc",
"msIdentifier",
"msItem",
"msItemStruct",
"msName",
"msPart",
"musicNotation",
"nameLink",
"notatedMusic",
"notesStmt",
"objectDesc",
"objectType",
"oRef",
"orgName",
"origDate",
"origPlace",
"oVar",
"particDesc",
"persName",
"personGrp",
"physDesc",
"placeName",
"postBox",
"postCode",
"pRef",
"prefixDef",
"profileDesc",
"projectDesc",
"publicationStmt",
"pubPlace",
"pVar",
"rdgGrp",
"recordHist",
"recordingStmt",
"refsDecl",
"refState",
"relatedItem",
"relationGrp",
"respStmt",
"revisionDesc",
"roleDesc",
"roleName",
"samplingDecl",
"schemaSpec",
"scriptDesc",
"scriptNote",
"scriptStmt",
"sealDesc",
"secFol",
"seriesStmt",
"settingDesc",
"soCalled",
"socecStatus",
"sourceDesc",
"sourceDoc",
"spanGrp",
"specDesc",
"specGrp",
"specGrpRef",
"specList",
"spGrp",
"stdVals",
"styleDefDecl",
"substJoin",
"superEntry",
"supportDesc",
"surfaceGrp",
"tagsDecl",
"tagUsage",
"TEI",
"teiCorpus",
"teiHeader",
"textClass",
"textDesc",
"textLang",
"titlePage",
"titlePart",
"titleStmt",
"typeDesc",
"typeNote",
"unicodeName",
"valDesc",
"valItem",
"valList",
"vAlt",
"variantEncoding",
"vColl",
"vDefault",
"vLabel",
"vMerge",
"vNot",
"vRange",
"witDetail",
"witEnd",
"witStart",
"interpGrp",
"spanGrp",
"addrLine",
"biblScope",
"biblStruct",
"binaryObject",
"citedRange",
"divGen",
"headItem",
"headLabel",
"listBibl",
"measureGrp",
"postBox",
"postCode",
"pubPlace",
"relatedItem",
"respStmt",
"soCalled",
"teiCorpus",
"textLang",
"particDesc",
"settingDesc",
"textDesc",
"dictScrap",
"entryFree",
"gramGrp",
"iType",
"oRef",
"oVar",
"pRef",
"pVar",
"superEntry",
"castGroup",
"castItem",
"castList",
"roleDesc",
"spGrp",
"figDesc",
"notatedMusic",
"charDecl",
"charName",
"charProp",
"glyphName",
"localName",
"unicodeName",
"appInfo",
"biblFull",
"cRefPattern",
"calendarDesc",
"catDesc",
"catRef",
"classCode",
"classDecl",
"editionStmt",
"editorialDecl",
"encodingDesc",
"fileDesc",
"geoDecl",
"handNote",
"langUsage",
"listChange",
"listPrefixDef",
"notesStmt",
"prefixDef",
"profileDesc",
"projectDesc",
"publicationStmt",
"refState",
"refsDecl",
"revisionDesc",
"samplingDecl",
"scriptNote",
"seriesStmt",
"sourceDesc",
"stdVals",
"styleDefDecl",
"tagUsage",
"tagsDecl",
"teiHeader",
"textClass",
"titleStmt",
"typeNote",
"fDecl",
"fDescr",
"fLib",
"fsConstraints",
"fsDecl",
"fsDescr",
"fsdDecl",
"fsdLink",
"fvLib",
"vAlt",
"vColl",
"vDefault",
"vLabel",
"vMerge",
"vNot",
"vRange",
"altGrp",
"joinGrp",
"linkGrp",
"accMat",
"adminInfo",
"altIdentifier",
"bindingDesc",
"custEvent",
"custodialHist",
"decoDesc",
"decoNote",
"finalRubric",
"handDesc",
"layoutDesc",
"locusGrp",
"msContents",
"msDesc",
"msIdentifier",
"msItem",
"msItemStruct",
"msName",
"msPart",
"musicNotation",
"objectDesc",
"objectType",
"origDate",
"origPlace",
"physDesc",
"recordHist",
"scriptDesc",
"sealDesc",
"secFol",
"supportDesc",
"typeDesc",
"addName",
"genName",
"geogFeat",
"geogName",
"langKnowledge",
"langKnown",
"listEvent",
"listNym",
"listOrg",
"listPerson",
"listPlace",
"listRelation",
"nameLink",
"orgName",
"persName",
"personGrp",
"placeName",
"relationGrp",
"roleName",
"socecStatus",
"eLeaf",
"eTree",
"iNode",
"listForest",
"recordingStmt",
"scriptStmt",
"altIdent",
"attDef",
"attList",
"attRef",
"classRef",
"classSpec",
"constraintSpec",
"defaultVal",
"egXML",
"elementRef",
"elementSpec",
"listRef",
"macroRef",
"macroSpec",
"memberOf",
"moduleRef",
"moduleSpec",
"schemaSpec",
"specDesc",
"specGrp",
"specGrpRef",
"specList",
"valDesc",
"valItem",
"valList",
"lacunaEnd",
"lacunaStart",
"listApp",
"listWit",
"rdgGrp",
"variantEncoding",
"witDetail",
"witEnd",
"witStart",
"TEI",
"docAuthor",
"docDate",
"docEdition",
"docImprint",
"docTitle",
"floatingText",
"titlePage",
"titlePart",
"addSpan",
"damageSpan",
"delSpan",
"handNotes",
"handShift",
"listTranspose",
"sourceDoc",
"substJoin",
"surfaceGrp",
"metDecl",
"metSym",
]
fix_case = {}
for el in capitalized_elements:
fix_case[el.lower()] = el
for filename in sys.argv[1:]:
print("Cleaning %s" % filename, file=sys.stderr)
filenameout = filename + ".xml"
infile = open(filename)
outfile = open(filenameout, "w")
cleanup(infile, outfile)
| ARTFL-Project/PhiloLogic4 | extras/utilities/tei_cleanup.py | Python | gpl-3.0 | 8,791 |
import os
from . import SoccerFSA
from . import DataStates
class SoccerPlayer(SoccerFSA.SoccerFSA):
def __init__(self, brain):
SoccerFSA.SoccerFSA.__init__(self,brain)
self.addStates(DataStates)
self.setName('pData')
self.postDistance = 50
self.lastDistance = 0
# Specify which object is being studied
self.objects = (self.brain.ygrp, self.brain.yglp)
def savePostInfo(self):
both_zero = True
for obj in self.objects:
if obj.dist != 0.0:
both_zero = False
break
if both_zero:
return
filename = "/home/root/postDistData" + str(self.postDistance) + ".csv"
# need to remove it if it exists already and make way
# for new data
if self.lastDistance != self.postDistance and \
os.path.exists(filename):
self.lastDistance = self.postDistance
os.remove(filename)
csv = open(filename,'a+')
csv.write("dist,bearing\n")
else :
csv = open(filename,'a+')
for obj in self.objects:
if obj.dist !=0.0 and abs(obj.dist - self.postDistance) < 100:
csv.write(str(obj.dist) + "," + str(obj.bearing) + '\n')
print obj.dist, obj.bearing
csv.close()
| northern-bites/nao-man | noggin/players/pData.py | Python | gpl-3.0 | 1,354 |
# -*- coding: utf-8 -*-
__author__= "Luis C. Pérez Tato (LCPT) and Ana Ortega (AOO)"
__copyright__= "Copyright 2015, LCPT and AOO"
__license__= "GPL"
__version__= "3.0"
__email__= "l.pereztato@gmail.com"
import xc_base
import geom
import xc
from solution import predefined_solutions
from model import predefined_spaces
from materials import typical_materials
import math
b= 0.4
h= 0.8
A= b*h
E= 200000*9.81/1e-4 # Estimated concrete elastic modulus.
nu= 0.3 # Poisson's ratio
G= E/(2*(1+nu)) # Shear modulus
Iy= (1/12.0*h*b**3) # Cross section moment of inertia (m4)
Iz= (1/12.0*b*h**3) # Cross section moment of inertia (m4)
J= 0.721e-8 # Cross section torsion constant (m4)
L= 1 # Element length expressed in meters.
F= 1.0e3 # Load magnitude (kN)
# Problem type
feProblem= xc.FEProblem()
preprocessor= feProblem.getPreprocessor
nodes= preprocessor.getNodeHandler
modelSpace= predefined_spaces.StructuralMechanics3D(nodes)
nodes.defaultTag= 1 #First node number.
nod= nodes.newNodeXYZ(0.0,0.0,0.0)
nod= nodes.newNodeXYZ(L,0.0,0)
# Materials
sectionProperties= xc.CrossSectionProperties3d()
sectionProperties.A= A; sectionProperties.E= E; sectionProperties.G= G
sectionProperties.Iz= Iz; sectionProperties.Iy= Iy; sectionProperties.J= J
sectionProperties.rotate(math.radians(90))
section= typical_materials.defElasticSectionFromMechProp3d(preprocessor, "section",sectionProperties)
lin= modelSpace.newLinearCrdTransf("lin",xc.Vector([0,1,0]))
# Elements definition
elements= preprocessor.getElementHandler
elements.defaultTransformation= "lin"
elements.defaultMaterial= "section"
elements.defaultTag= 1 #Tag for the next element.
beam3d= elements.newElement("ElasticBeam3d",xc.ID([1,2]))
sectionAngle= 0
fuerte= beam3d.getVDirStrongAxisGlobalCoord(True) # initialGeometry= True
debil= beam3d.getVDirWeakAxisGlobalCoord(True) # initialGeometry= True
sectionAngle= beam3d.getStrongAxisAngle()
ratio1= ((debil[0])**2+(debil[2])**2)
ratio2= ((fuerte[0])**2+(fuerte[1])**2)
# Constraints
modelSpace.fixNode000_000(1)
# Loads definition
loadHandler= preprocessor.getLoadHandler
lPatterns= loadHandler.getLoadPatterns
#Load modulation.
ts= lPatterns.newTimeSeries("constant_ts","ts")
lPatterns.currentTimeSeries= "ts"
lp0= lPatterns.newLoadPattern("default","0")
lp0.newNodalLoad(2,xc.Vector([0,-F,F,0,0,0]))
#We add the load case to domain.
lPatterns.addToDomain("0")
# Solution
analisis= predefined_solutions.simple_static_linear(feProblem)
result= analisis.analyze(1)
deltaYTeor= (-F*L**3/(3*E*Iz))
deltaZTeor= (F*L**3/(3*E*Iy))
nodes= preprocessor.getNodeHandler
nod2= nodes.getNode(2)
deltaY= nod2.getDisp[1]
deltaZ= nod2.getDisp[2] # Node 2 yAxis displacement
ratio3= (deltaY-deltaYTeor)/deltaYTeor
ratio4= (deltaZ-deltaZTeor)/deltaZTeor
ratio5= (deltaY/deltaZ)+(Iy/Iz)
'''
print "deltaY/deltaZ= ",deltaY/deltaZ
print "Iy/Iz= ",(Iy/Iz)
print "fuerte: ",fuerte
print "ratio1= ",ratio1
print "debil: ",debil
print "ratio2= ",ratio2
print "deltaY= ",deltaY
print "deltaYTeor= ",deltaYTeor
print "ratio3= ",ratio3
print "deltaZ= ",deltaZ
print "deltaZTeor= ",deltaZTeor
print "ratio4= ",ratio4
print "ratio5= ",ratio5
'''
import os
from miscUtils import LogMessages as lmsg
fname= os.path.basename(__file__)
if (ratio1 < 1e-15) & (ratio2 < 1e-15) & (abs(sectionAngle) < 1e-12) & (ratio3 < 1e-5) & (ratio4 < 1e-6) & (ratio5 < 1e-6):
print "test ",fname,": ok."
else:
lmsg.error(fname+' ERROR.')
| lcpt/xc | verif/tests/elements/crd_transf/test_element_axis_03.py | Python | gpl-3.0 | 3,424 |
import json
from treeherder.log_parser.parsers import (EmptyPerformanceData,
PerformanceParser)
def test_performance_log_parsing_malformed_perfherder_data():
"""
If we have malformed perfherder data lines, we should just ignore
them and still be able to parse the valid ones
"""
parser = PerformanceParser()
# invalid json
parser.parse_line("PERFHERDER_DATA: {oh noes i am not valid json}", 1)
try:
# Empty performance data
parser.parse_line("PERFHERDER_DATA: {}", 2)
except EmptyPerformanceData:
pass
valid_perfherder_data = {
"framework": {"name": "talos"}, "suites": [{
"name": "basic_compositor_video",
"subtests": [{
"name": "240p.120fps.mp4_scale_fullscreen_startup",
"value": 1234
}]
}]
}
parser.parse_line('PERFHERDER_DATA: {}'.format(
json.dumps(valid_perfherder_data)), 3)
assert parser.get_artifact() == [valid_perfherder_data]
| KWierso/treeherder | tests/log_parser/test_performance_parser.py | Python | mpl-2.0 | 1,056 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import print_function
import argparse
import csv
import logging
import logging.config
import os
import sys
import requests
from socorro.lib.treelib import tree_get
from socorro.signature import SignatureGenerator
DESCRIPTION = """
Given one or more crash ids via command line or stdin (one per line), pulls down information from
Socorro, generates signatures, and prints signature information.
"""
EPILOG = """
Note: In order for the SignatureJitCategory rule to work, you need a valid API token from
Socorro that has "View Personally Identifiable Information" permission.
"""
logger = logging.getLogger('socorro.signature')
# FIXME(willkg): This hits production. We might want it configurable.
API_URL = 'https://crash-stats.mozilla.com/api/'
def setup_logging(logging_level):
dc = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'bare': {
'format': '%(levelname)s: %(message)s'
}
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'bare',
},
},
'loggers': {
'socorro': {
'propagate': False,
'handlers': ['console'],
'level': logging_level,
},
},
}
logging.config.dictConfig(dc)
class OutputBase:
"""Base class for outputter classes
Outputter classes are context managers. If they require start/top or begin/end semantics, they
should implement ``__enter__`` and ``__exit__``.
Otherwise they can just implement ``data`` and should be fine.
"""
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def warning(self, line):
"""Prints out a warning line to stderr
:arg str line: the line to print to stderr
"""
print('WARNING: %s' % line, file=sys.stderr)
def data(self, crash_id, old_sig, new_sig, notes):
"""Outputs a data point
:arg str crash_id: the crash id for the signature generated
:arg str old_sig: the old signature retrieved in the processed crash
:arg str new_sig: the new generated signature
:arg list notes: any processor notes
"""
pass
class TextOutput(OutputBase):
def data(self, crash_id, old_sig, new_sig, notes):
print('Crash id: %s' % crash_id)
print('Original: %s' % old_sig)
print('New: %s' % new_sig)
print('Same?: %s' % (old_sig == new_sig))
if notes:
print('Notes: (%d)' % len(notes))
for note in notes:
print(' %s' % note)
class CSVOutput(OutputBase):
def __enter__(self):
self.out = csv.writer(sys.stdout, quoting=csv.QUOTE_ALL)
self.out.writerow(['crashid', 'old', 'new', 'same?', 'notes'])
return self
def __exit__(self, exc_type, exc_value, traceback):
self.out = None
def data(self, crash_id, old_sig, new_sig, notes):
self.out.writerow([crash_id, old_sig, new_sig, str(old_sig == new_sig), notes])
def fetch(endpoint, crash_id, api_token=None):
kwargs = {
'params': {
'crash_id': crash_id
}
}
if api_token:
kwargs['headers'] = {
'Auth-Token': api_token
}
return requests.get(API_URL + endpoint, **kwargs)
def main(args):
"""Takes crash data via args and generates a Socorro signature
"""
parser = argparse.ArgumentParser(description=DESCRIPTION, epilog=EPILOG)
parser.add_argument(
'-v', '--verbose', help='increase output verbosity', action='store_true'
)
parser.add_argument(
'--format', help='specify output format: csv, text (default)'
)
parser.add_argument(
'--different-only', dest='different', action='store_true',
help='limit output to just the signatures that changed',
)
parser.add_argument(
'crashids', metavar='crashid', nargs='*', help='crash id to generate signatures for'
)
args = parser.parse_args()
if args.format == 'csv':
outputter = CSVOutput
else:
outputter = TextOutput
if args.verbose:
logging_level = 'DEBUG'
else:
logging_level = 'INFO'
api_token = os.environ.get('SOCORRO_API_TOKEN', '')
setup_logging(logging_level)
generator = SignatureGenerator(debug=args.verbose)
crashids_iterable = args.crashids or sys.stdin
with outputter() as out:
for crash_id in crashids_iterable:
crash_id = crash_id.strip()
resp = fetch('/RawCrash/', crash_id, api_token)
if resp.status_code == 404:
out.warning('%s: does not exist.' % crash_id)
continue
if resp.status_code == 429:
out.warning('API rate limit reached. %s' % resp.content)
# FIXME(willkg): Maybe there's something better we could do here. Like maybe wait a
# few minutes.
return 1
if resp.status_code == 500:
out.warning('HTTP 500: %s' % resp.content)
continue
raw_crash = resp.json()
# If there's an error in the raw crash, then something is wrong--probably with the API
# token. So print that out and exit.
if 'error' in raw_crash:
out.warning('Error fetching raw crash: %s' % raw_crash['error'])
return 1
raw_crash_minimal = {
'JavaStackTrace': raw_crash.get('JavaStackTrace', None),
'OOMAllocationSize': raw_crash.get('OOMAllocationSize', None),
'AbortMessage': raw_crash.get('AbortMessage', None),
'AsyncShutdownTimeout': raw_crash.get('AsyncShutdownTimeout', None),
'ipc_channel_error': raw_crash.get('ipc_channel_error', None),
'additional_minidumps': raw_crash.get('additional_minidumps', None),
'IPCMessageName': raw_crash.get('IPCMessageName', None),
'MozCrashReason': raw_crash.get('MozCrashReason', None),
}
resp = fetch('/ProcessedCrash/', crash_id, api_token)
if resp.status_code == 404:
out.warning('%s: does not have processed crash.' % crash_id)
continue
if resp.status_code == 429:
out.warning('API rate limit reached. %s' % resp.content)
# FIXME(willkg): Maybe there's something better we could do here. Like maybe wait a
# few minutes.
return 1
if resp.status_code == 500:
out.warning('HTTP 500: %s' % resp.content)
continue
processed_crash = resp.json()
# If there's an error in the processed crash, then something is wrong--probably with the
# API token. So print that out and exit.
if 'error' in processed_crash:
out.warning('Error fetching processed crash: %s' % processed_crash['error'])
return 1
old_signature = processed_crash['signature']
processed_crash_minimal = {
'hang_type': processed_crash.get('hang_type', None),
'json_dump': {
'threads': tree_get(processed_crash, 'json_dump.threads', default=[]),
'system_info': {
'os': tree_get(processed_crash, 'json_dump.system_info.os', default=''),
},
'crash_info': {
'crashing_thread': tree_get(
processed_crash, 'json_dump.crash_info.crashing_thread', default=None
),
},
},
# NOTE(willkg): Classifications aren't available via the public API.
'classifications': {
'jit': {
'category': tree_get(processed_crash, 'classifications.jit.category', ''),
},
},
'mdsw_status_string': processed_crash.get('mdsw_status_string', None),
# This needs to be an empty string--the signature generator fills it in.
'signature': ''
}
# We want to generate fresh signatures, so we remove the "normalized" field from stack
# frames because this is essentially cached data from processing
for thread in processed_crash_minimal['json_dump'].get('threads', []):
for frame in thread.get('frames', []):
if 'normalized' in frame:
del frame['normalized']
ret = generator.generate(raw_crash_minimal, processed_crash_minimal)
if not args.different or old_signature != ret['signature']:
out.data(crash_id, old_signature, ret['signature'], ret['notes'])
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| Tayamarn/socorro | socorro/signature/__main__.py | Python | mpl-2.0 | 9,386 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import sys
import traceback
from StringIO import StringIO
import re
import datetime
from urllib import urlencode
from collections import defaultdict
from django import http
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import redirect_to_login
from django.db import transaction
from django.core.urlresolvers import reverse
from django.conf import settings
from django.shortcuts import redirect, get_object_or_404
from django.contrib import messages
from django.db.models import Q
from django.template import Context, loader
from django.core.mail import get_connection, EmailMessage
from django.core.validators import validate_email
from django.core.exceptions import ValidationError
from django.shortcuts import render
from django.views.decorators.http import require_POST
from django.contrib.sites.models import RequestSite
from django.core.cache import cache
from django.db.models import Min, Count
import vobject
from .models import Entry, Hours, BlacklistedUser, FollowingUser, UserKey
from pto.apps.users.models import UserProfile, User
from pto.apps.users.utils import ldap_lookup
from .utils import parse_datetime, DatetimeParseError
from .utils.countrytotals import UnrecognizedCountryError, get_country_totals
import utils
import forms
from .decorators import json_view
from .csv_export import UnicodeWriter as CSVUnicodeWriter
def valid_email(value):
try:
validate_email(value)
return True
except ValidationError:
return False
def handler500(request):
data = {}
if settings.TRACEBACKS_ON_500:
err_type, err_value, err_traceback = sys.exc_info()
out = StringIO()
traceback.print_exc(file=out)
traceback_formatted = out.getvalue()
data['err_type'] = err_type
data['err_value'] = err_value
data['err_traceback'] = traceback_formatted
data['_report_traceback'] = True
else:
data['_report_traceback'] = False
return render(request, '500.html', data, status=500)
def home(request): # aka dashboard
data = {}
data['mobile'] = request.MOBILE # thank you django-mobility (see settings)
if data['mobile']:
# unless an explicit cookie it set, redirect to /mobile/
if not request.COOKIES.get('no-mobile', False):
return redirect(reverse('mobile.home'))
# now do what the login_required would usually do
if not request.user.is_authenticated():
path = request.get_full_path()
return redirect_to_login(path)
data['page_title'] = "Dashboard"
profile = request.user.get_profile()
if profile and profile.country in ('GB', 'FR', 'DE'):
first_day = 1 # 1=Monday
else:
first_day = 0 # default to 0=Sunday
data['first_day'] = first_day
if 'all-rightnow' in request.GET:
MAX_RIGHT_NOWS = 9999
else:
MAX_RIGHT_NOWS = 20
## Commented out whilst we decide whether to keep it at all
#right_nows, right_now_users = get_right_nows()
#data['right_nows'] = right_nows
#data['right_now_users'] = right_now_users
#if len(right_now_users) > MAX_RIGHT_NOWS:
# data['right_now_too_many'] = (len(data['right_now_users'])
# - MAX_RIGHT_NOWS)
# data['right_now_users'] = data['right_now_users'][:MAX_RIGHT_NOWS]
#else:
# data['right_now_too_many'] = None
data.update(get_taken_info(request.user))
data['calendar_url'] = _get_user_calendar_url(request)
cache_key = 'recently_created_%s' % request.user.pk
recently_created = cache.get(cache_key)
if recently_created:
data['recently_created'] = recently_created
cache.delete(cache_key)
return render(request, 'dates/home.html', data)
def _get_user_calendar_url(request):
user_key, __ = UserKey.objects.get_or_create(user=request.user)
base_url = '%s://%s' % (request.is_secure() and 'https' or 'http',
RequestSite(request).domain)
return base_url + reverse('dates.calendar_vcal', args=(user_key.key,))
def get_taken_info(user):
data = {}
profile = user.get_profile()
if profile.country:
data['country'] = profile.country
try:
data['country_totals'] = get_country_totals(profile.country)
except UnrecognizedCountryError:
data['unrecognized_country'] = True
today = datetime.date.today()
start_date = datetime.date(today.year, 1, 1)
last_date = datetime.date(today.year + 1, 1, 1)
from django.db.models import Sum
qs = Entry.objects.filter(
user=user,
start__gte=start_date,
end__lt=last_date
)
agg = qs.aggregate(Sum('total_hours'))
total_hours = agg['total_hours__sum']
if total_hours is None:
total_hours = 0
data['taken'] = _friendly_format_hours(total_hours)
return data
def _friendly_format_hours(total_hours):
days = 1.0 * total_hours / settings.WORK_DAY
hours = total_hours % settings.WORK_DAY
if not total_hours:
return '0 days'
elif total_hours < settings.WORK_DAY:
return '%s hours' % total_hours
elif total_hours == settings.WORK_DAY:
return '1 day'
else:
if not hours:
return '%d days' % days
else:
return '%s days' % days
def get_right_nows():
right_now_users = []
right_nows = defaultdict(list)
_today = datetime.date.today()
for entry in (Entry.objects
.filter(start__lte=_today,
end__gte=_today,
total_hours__gte=0)
.order_by('user__first_name',
'user__last_name',
'user__username')):
if entry.user not in right_now_users:
right_now_users.append(entry.user)
left = (entry.end - _today).days + 1
right_nows[entry.user].append((left, entry))
return right_nows, right_now_users
def get_upcomings(max_days=14):
users = []
upcoming = defaultdict(list)
today = datetime.date.today()
max_future = today + datetime.timedelta(days=max_days)
for entry in (Entry.objects
.filter(start__gt=today,
start__lt=max_future,
total_hours__gte=0)
.order_by('user__first_name',
'user__last_name',
'user__username')):
if entry.user not in users:
users.append(entry.user)
days = (entry.start - today).days + 1
upcoming[entry.user].append((days, entry))
return upcoming, users
def make_entry_title(entry, this_user, include_details=True):
if entry.user != this_user:
if entry.user.first_name:
title = '%s %s - ' % (entry.user.first_name,
entry.user.last_name)
else:
title = '%s - ' % entry.user.username
else:
title = ''
days = 0
for hour in Hours.objects.filter(entry=entry):
if hour.hours == 8:
days += 1
elif hour.hours == 4:
days += 0.5
if days > 1:
if int(days) == days:
title += '%d days' % days
else:
title += '%s days' % days
if Hours.objects.filter(entry=entry, birthday=True).exists():
title += ' (includes birthday)'
elif (days == 1 and entry.total_hours == 0 and
Hours.objects.filter(entry=entry, birthday=True)):
title += 'Birthday!'
elif days == 1 and entry.total_hours == 8:
title += '1 day'
else:
title += '%s hours' % entry.total_hours
if entry.details:
if days == 1:
max_length = 20
else:
max_length = 40
if include_details:
title += ', '
if len(entry.details) > max_length:
title += entry.details[:max_length] + '...'
else:
title += entry.details
return title
@json_view
def calendar_events(request):
if not request.user.is_authenticated():
return http.HttpResponseForbidden('Must be logged in')
if not request.GET.get('start'):
return http.HttpResponseBadRequest('Argument start missing')
if not request.GET.get('end'):
return http.HttpResponseBadRequest('Argument end missing')
try:
start = parse_datetime(request.GET['start'])
except DatetimeParseError:
return http.HttpResponseBadRequest('Invalid start')
try:
end = parse_datetime(request.GET['end'])
except DatetimeParseError:
return http.HttpResponseBadRequest('Invalid end')
entries = []
COLORS = ("#EAA228", "#c5b47f", "#579575", "#839557", "#958c12",
"#953579", "#4b5de4", "#d8b83f", "#ff5800", "#0085cc",
"#c747a3", "#cddf54", "#FBD178", "#26B4E3", "#bd70c7")
user_ids = [request.user.pk]
colors = {}
colors_fullnames = []
colors[request.user.pk] = None
colors_fullnames.append((request.user.pk, 'Me myself and I', '#3366CC'))
for i, user_ in enumerate(get_observed_users(request.user, max_depth=2)):
user_ids.append(user_.pk)
colors[user_.pk] = COLORS[i]
full_name = user_.get_full_name()
if not full_name:
full_name = user_.username
colors_fullnames.append((
user_.pk,
full_name,
colors[user_.pk]
))
_managers = {}
def can_see_details(user):
if request.user.is_superuser:
return True
if request.user.pk == user.pk:
return True
if user.pk not in _managers:
_profile = user.get_profile()
_manager = None
if _profile and _profile.manager_user:
_manager = _profile.manager_user.pk
_managers[user.pk] = _manager
return _managers[user.pk] == request.user.pk
visible_user_ids = set()
for entry in (Entry.objects
.filter(user__in=user_ids,
total_hours__gte=0,
total_hours__isnull=False)
.select_related('user')
.exclude(Q(end__lt=start) | Q(start__gt=end))):
visible_user_ids.add(entry.user.pk)
entries.append({
'id': entry.pk,
'title': make_entry_title(entry, request.user,
include_details=can_see_details(entry.user)),
'start': entry.start.strftime('%Y-%m-%d'),
'end': entry.end.strftime('%Y-%m-%d'),
'color': colors[entry.user.pk],
'mine': entry.user.pk == request.user.pk,
})
colors = [dict(name=x, color=y) for (pk, x, y) in colors_fullnames
if pk in visible_user_ids]
return {'events': entries, 'colors': colors}
def get_minions(user, depth=1, max_depth=2):
minions = []
for minion in (UserProfile.objects.filter(manager_user=user)
.select_related('manager_user')
.order_by('manager_user')):
minions.append(minion.user)
if depth < max_depth:
minions.extend(get_minions(minion.user,
depth=depth + 1,
max_depth=max_depth))
return minions
def get_siblings(user):
profile = user.get_profile()
if not profile.manager_user:
return []
users = []
for profile in (UserProfile.objects
.filter(manager_user=profile.manager_user)
.exclude(pk=user.pk)
.select_related('user')):
users.append(profile.user)
return users
def get_followed_users(user):
users = []
for each in (FollowingUser.objects
.filter(follower=user)
.select_related('following')):
users.append(each.following)
return users
def get_observed_users(this_user, depth=1, max_depth=2):
users = []
def is_blacklisted(user):
return (BlacklistedUser.objects
.filter(observer=this_user, observable=user)
.exists())
for user in get_minions(this_user, depth=depth, max_depth=max_depth):
if user not in users:
if not is_blacklisted(user):
users.append(user)
for user in get_siblings(this_user):
if user not in users:
if not is_blacklisted(user):
users.append(user)
profile = this_user.get_profile()
manager = profile.manager_user
if manager and manager not in users:
if not is_blacklisted(manager):
users.append(manager)
for user in get_followed_users(this_user):
if user not in users:
users.append(user)
return users
@transaction.commit_on_success
@login_required
def notify(request):
data = {}
data['page_title'] = "Notify about new vacation"
if request.method == 'POST':
form = forms.AddForm(request.user, data=request.POST)
if form.is_valid():
start = form.cleaned_data['start']
end = form.cleaned_data['end']
details = form.cleaned_data['details'].strip()
notify = form.cleaned_data['notify']
entry = Entry.objects.create(
user=request.user,
start=start,
end=end,
details=details,
)
clean_unfinished_entries(entry)
messages.info(request, 'Entry added, now specify hours')
url = reverse('dates.hours', args=[entry.pk])
request.session['notify_extra'] = notify
return redirect(url)
else:
initial = {}
if request.GET.get('start'):
try:
initial['start'] = parse_datetime(request.GET['start'])
except DatetimeParseError:
pass
if request.GET.get('end'):
try:
initial['end'] = parse_datetime(request.GET['end'])
except DatetimeParseError:
pass
form = forms.AddForm(request.user, initial=initial)
profile = request.user.get_profile()
manager = None
if profile and profile.manager:
manager = ldap_lookup.fetch_user_details(profile.manager)
data['hr_managers'] = [x.user for x in
(UserProfile.objects
.filter(hr_manager=True)
.select_related('user'))]
data['manager'] = manager
data['all_managers'] = [x for x in data['hr_managers'] if x]
if manager:
data['all_managers'].append(manager)
data['form'] = form
return render(request, 'dates/notify.html', data)
@transaction.commit_on_success
@login_required
def cancel_notify(request):
Entry.objects.filter(user=request.user, total_hours__isnull=True).delete()
return redirect(reverse('dates.home'))
def clean_unfinished_entries(good_entry):
# delete all entries that don't have total_hours and touch on the
# same dates as this good one
bad_entries = (Entry.objects
.filter(user=good_entry.user,
total_hours__isnull=True)
.exclude(pk=good_entry.pk))
for entry in bad_entries:
entry.delete()
@transaction.commit_on_success
@login_required
def hours(request, pk):
data = {}
entry = get_object_or_404(Entry, pk=pk)
if entry.user != request.user:
if not (request.user.is_staff or request.user.is_superuser):
return http.HttpResponseForbidden('insufficient access')
if request.method == 'POST':
form = forms.HoursForm(entry, data=request.POST)
if form.is_valid():
total_hours, is_edit = save_entry_hours(entry, form)
extra_users = request.session.get('notify_extra', '')
extra_users = [x.strip() for x
in extra_users.split(';')
if x.strip()]
success, email_addresses = send_email_notification(
entry,
extra_users,
is_edit=is_edit,
)
assert success
#messages.info(request,
# '%s hours of vacation logged.' % total_hours
#)
recently_created = make_entry_title(entry, request.user)
cache_key = 'recently_created_%s' % request.user.pk
cache.set(cache_key, recently_created, 60)
url = reverse('dates.emails_sent', args=[entry.pk])
url += '?' + urlencode({'e': email_addresses}, True)
return redirect(url)
else:
initial = {}
for date in utils.get_weekday_dates(entry.start, entry.end):
try:
#hours_ = Hours.objects.get(entry=entry, date=date)
hours_ = Hours.objects.get(date=date, entry__user=entry.user)
initial[date.strftime('d-%Y%m%d')] = hours_.hours
except Hours.DoesNotExist:
initial[date.strftime('d-%Y%m%d')] = settings.WORK_DAY
form = forms.HoursForm(entry, initial=initial)
data['form'] = form
if entry.total_hours:
data['total_hours'] = entry.total_hours
else:
total_days = 0
for date in utils.get_weekday_dates(entry.start, entry.end):
try:
hours_ = Hours.objects.get(entry=entry, date=date)
print hours_.hours
if hours_.hours == settings.WORK_DAY:
total_days += 1
elif hours_.hours:
total_days += .5
except Hours.DoesNotExist:
total_days += 1
data['total_days'] = total_days
notify = request.session.get('notify_extra', [])
data['notify'] = notify
return render(request, 'dates/hours.html', data)
def save_entry_hours(entry, form):
assert form.is_valid()
total_hours = 0
for date in utils.get_weekday_dates(entry.start, entry.end):
hours = int(form.cleaned_data[date.strftime('d-%Y%m%d')])
birthday = False
if hours == -1:
birthday = True
hours = 0
assert hours >= 0 and hours <= settings.WORK_DAY, hours
try:
hours_ = Hours.objects.get(entry__user=entry.user,
date=date)
if hours_.hours:
# this nullifies the previous entry on this date
reverse_entry = Entry.objects.create(
user=hours_.entry.user,
start=date,
end=date,
details=hours_.entry.details,
total_hours=hours_.hours * -1,
)
Hours.objects.create(
entry=reverse_entry,
hours=hours_.hours * -1,
date=date,
)
#hours_.hours = hours # nasty stuff!
#hours_.birthday = birthday
#hours_.save()
except Hours.DoesNotExist:
# nothing to credit
pass
Hours.objects.create(
entry=entry,
hours=hours,
date=date,
birthday=birthday,
)
total_hours += hours
#raise NotImplementedError
is_edit = entry.total_hours is not None
#if entry.total_hours is not None:
entry.total_hours = total_hours
entry.save()
return total_hours, is_edit
def send_email_notification(entry, extra_users, is_edit=False):
email_addresses = []
for profile in (UserProfile.objects
.filter(hr_manager=True,
user__email__isnull=False)):
email_addresses.append(profile.user.email)
profile = entry.user.get_profile()
if profile and profile.manager:
manager = ldap_lookup.fetch_user_details(profile.manager)
if manager.get('mail'):
email_addresses.append(manager['mail'])
if extra_users:
email_addresses.extend(extra_users)
email_addresses = list(set(email_addresses)) # get rid of dupes
if not email_addresses:
email_addresses = [settings.FALLBACK_TO_ADDRESS]
if is_edit:
subject = settings.EMAIL_SUBJECT_EDIT
else:
subject = settings.EMAIL_SUBJECT
subject = subject % dict(
first_name=entry.user.first_name,
last_name=entry.user.last_name,
username=entry.user.username,
email=entry.user.email,
)
message = template = loader.get_template('dates/notification.txt')
context = {
'entry': entry,
'user': entry.user,
'is_edit': is_edit,
'settings': settings,
'start_date': entry.start.strftime(settings.DEFAULT_DATE_FORMAT),
}
body = template.render(Context(context)).strip()
connection = get_connection()
message = EmailMessage(
subject=subject,
body=body,
from_email=entry.user.email,
to=email_addresses,
cc=entry.user.email and [entry.user.email] or None,
connection=connection
)
success = message.send()
return success, email_addresses
@login_required
def emails_sent(request, pk):
data = {}
entry = get_object_or_404(Entry, pk=pk)
if entry.user != request.user:
if not (request.user.is_staff or request.user.is_superuser):
return http.HttpResponseForbidden('insufficient access')
emails = request.REQUEST.getlist('e')
if isinstance(emails, basestring):
emails = [emails]
data['emails'] = emails
data['emailed_users'] = []
for email in emails:
record = ldap_lookup.fetch_user_details(email)
if record:
data['emailed_users'].append(record)
else:
data['emailed_users'].append(email)
show_fireworks = not request.COOKIES.get('no_fw', False)
data['show_fireworks'] = show_fireworks
return render(request, 'dates/emails_sent.html', data)
@login_required
def list_(request):
data = {}
form = forms.ListFilterForm(date_format='%d %B %Y',
data=request.GET)
if form.is_valid():
data['filters'] = form.cleaned_data
data['today'] = datetime.date.today()
entries_base = Entry.objects.all()
try:
data['first_date'] = entries_base.order_by('start')[0].start
data['last_date'] = entries_base.order_by('-end')[0].end
data['first_filed_date'] = (entries_base
.order_by('add_date')[0]
.add_date)
except IndexError:
# first run, not so important
data['first_date'] = datetime.date(2000, 1, 1)
data['last_date'] = datetime.date(2000, 1, 1)
data['first_filed_date'] = datetime.date(2000, 1, 1)
data['form'] = form
data['query_string'] = request.META.get('QUERY_STRING')
return render(request, 'dates/list.html', data)
@login_required
def list_csv(request):
entries = get_entries_from_request(request.GET)
response = http.HttpResponse(mimetype='text/csv')
writer = CSVUnicodeWriter(response)
writer.writerow((
'ID',
'EMAIL',
'FIRST NAME',
'LAST NAME',
'ADDED',
'START',
'END',
'DAYS',
'DETAILS',
'CITY',
'COUNTRY',
'START DATE',
))
profiles = {} # basic memoization
for entry in entries:
if entry.user.pk not in profiles:
profiles[entry.user.pk] = entry.user.get_profile()
profile = profiles[entry.user.pk]
writer.writerow((
str(entry.pk),
entry.user.email,
entry.user.first_name,
entry.user.last_name,
entry.add_date.strftime('%Y-%m-%d'),
entry.start.strftime('%Y-%m-%d'),
entry.end.strftime('%Y-%m-%d'),
str(entry.total_days),
entry.details,
profile.city,
profile.country,
(profile.start_date and
profile.start_date.strftime('%Y-%m-%d') or ''),
))
return response
@json_view
@login_required
def list_json(request):
entries = get_entries_from_request(request.GET)
_managers = {}
def can_see_details(user):
if request.user.is_superuser:
return True
if request.user.pk == user.pk:
return True
if user.pk not in _managers:
_profile = user.get_profile()
_manager = None
if _profile and _profile.manager_user:
_manager = _profile.manager_user.pk
_managers[user.pk] = _manager
return _managers[user.pk] == request.user.pk
data = []
profiles = {}
for entry in entries:
if entry.user.pk not in profiles:
profiles[entry.user.pk] = entry.user.get_profile()
profile = profiles[entry.user.pk]
if entry.total_hours < 0:
details = '*automatic edit*'
elif can_see_details(entry.user):
details = entry.details
else:
details = ''
row = [entry.user.email,
entry.user.first_name,
entry.user.last_name,
entry.add_date.strftime('%Y-%m-%d'),
entry.total_days,
entry.start.strftime('%Y-%m-%d'),
entry.end.strftime('%Y-%m-%d'),
profile.city,
profile.country,
details,
#edit_link,
#hours_link
]
data.append(row)
return {'aaData': data}
def get_entries_from_request(data):
form = forms.ListFilterForm(date_format='%d %B %Y', data=data)
if not form.is_valid():
return Entry.objects.none()
fdata = form.cleaned_data
entries = (Entry.objects.exclude(total_hours=None)
.select_related('user'))
if fdata.get('date_from'):
entries = entries.filter(end__gte=fdata.get('date_from'))
if fdata.get('date_to'):
entries = entries.filter(start__lte=fdata.get('date_to'))
if fdata.get('date_filed_from'):
entries = entries.filter(
add_date__gte=fdata.get('date_filed_from'))
if fdata.get('date_filed_to'):
entries = entries.filter(
add_date__lt=fdata.get('date_filed_to') +
datetime.timedelta(days=1))
if fdata.get('name'):
name = fdata['name'].strip()
if valid_email(name):
entries = entries.filter(user__email__iexact=name)
else:
entries = entries.filter(
Q(user__first_name__istartswith=name.split()[0]) |
Q(user__last_name__iendswith=name.split()[-1])
)
if fdata.get('country'):
country = fdata['country'].strip()
_users = UserProfile.objects.filter(country=country).values('user_id')
entries = entries.filter(user__id__in=_users)
return entries
@login_required
def following(request):
data = {}
observed = []
_followed = get_followed_users(request.user)
_minions_1 = get_minions(request.user, depth=1, max_depth=1)
_minions_2 = get_minions(request.user, depth=1, max_depth=2)
_manager = request.user.get_profile().manager_user
for user in sorted(get_observed_users(request.user, max_depth=2),
lambda x, y: cmp(x.first_name.lower(),
y.first_name.lower())):
if user in _minions_1:
reason = 'direct manager of'
elif user in _minions_2:
reason = 'indirect manager of'
elif user == _manager:
reason = 'your manager'
elif user in _followed:
reason = 'curious'
else:
reason = 'teammate'
observed.append((user, reason))
not_observed = (BlacklistedUser.objects
.filter(observer=request.user)
.order_by('observable__first_name'))
data['observed'] = observed
data['not_observed'] = [x.observable for x in not_observed]
return render(request, 'dates/following.html', data)
@json_view
@login_required
@transaction.commit_on_success
@require_POST
def save_following(request):
search = request.POST.get('search')
if not search:
return http.HttpResponseBadRequest('Missing search')
if (-1 < search.rfind('<') < search.rfind('@') < search.rfind('>')):
try:
email = re.findall('<([\w\.\-]+@[\w\.\-]+)>', search)[0]
email = email.strip()
validate_email(email)
except (ValidationError, IndexError):
email = None
elif search.isdigit():
try:
email = User.objects.get(pk=search).email
except User.DoesNotExist:
email = None # will deal with this later
else:
found = []
result = ldap_lookup.search_users(search, 30, autocomplete=True)
for each in result:
try:
found.append(User.objects.get(email__iexact=each['mail']))
except User.DoesNotExist:
pass
if len(found) > 1:
return http.HttpResponseBadRequest('More than one user found')
elif not found:
return http.HttpResponseBadRequest('No user found')
else:
email = found[0].email
# if no email is found in the search, it's an error
if not email:
return http.HttpResponseBadRequest('No email found')
try:
user = User.objects.get(email__iexact=email)
except User.DoesNotExist:
return http.HttpResponseBadRequest('No user by that email found')
FollowingUser.objects.get_or_create(
follower=request.user,
following=user,
)
# find a reason why we're following this user
_minions_1 = get_minions(request.user, depth=1, max_depth=1)
_minions_2 = get_minions(request.user, depth=1, max_depth=2)
if user in _minions_1:
reason = 'direct manager of'
elif user in _minions_2:
reason = 'indirect manager of'
elif user == request.user.get_profile().manager_user:
reason = 'your manager'
elif (request.user.get_profile().manager_user
and user in _minions_1):
reason = 'teammate'
else:
reason = 'curious'
name = ('%s %s' % (user.first_name,
user.last_name)).strip()
if not name:
name = user.username
data = {
'id': user.pk,
'name': name,
'reason': reason,
}
return data
@json_view
@login_required
@transaction.commit_on_success
@require_POST
def save_unfollowing(request):
remove = request.POST.get('remove')
try:
user = User.objects.get(pk=remove)
except (ValueError, User.DoesNotExist):
return http.HttpResponseBadRequest('Invalid user ID')
for f in (FollowingUser.objects
.filter(follower=request.user, following=user)):
f.delete()
data = {}
if user in get_observed_users(request.user, max_depth=2):
# if not blacklisted, this user will automatically re-appear
BlacklistedUser.objects.get_or_create(
observer=request.user,
observable=user
)
data['id'] = user.pk
name = ('%s %s' % (user.first_name,
user.last_name)).strip()
if not name:
name = user.username
data['name'] = name
return data
def calendar_vcal(request, key):
base_url = '%s://%s' % (request.is_secure() and 'https' or 'http',
RequestSite(request).domain)
home_url = base_url + '/'
cal = vobject.iCalendar()
cal.add('x-wr-calname').value = 'Mozilla Vacation'
try:
user = UserKey.objects.get(key=key).user
except UserKey.DoesNotExist:
# instead of raising a HTTP error, respond a calendar
# that urges the user to update the stale URL
event = cal.add('vevent')
event.add('summary').value = (
"Calendar expired. Visit %s#calendarurl to get the "
"new calendar URL" % home_url
)
today = datetime.date.today()
event.add('dtstart').value = today
event.add('dtend').value = today
event.add('url').value = '%s#calendarurl' % (home_url,)
event.add('description').value = ("The calendar you used has expired "
"and is no longer associated with any user")
return _render_vcalendar(cal, key)
# always start on the first of this month
today = datetime.date.today()
#first = datetime.date(today.year, today.month, 1)
user_ids = [user.pk]
for user_ in get_observed_users(user, max_depth=2):
user_ids.append(user_.pk)
entries = (Entry.objects
.filter(user__in=user_ids,
total_hours__gte=0,
total_hours__isnull=False,
end__gte=today)
.select_related('user')
)
_list_base_url = base_url + reverse('dates.list')
def make_list_url(entry):
name = entry.user.get_full_name()
if not name:
name = entry.user.username
data = {
'date_from': entry.start.strftime('%d %B %Y'),
'date_to': entry.end.strftime('%d %B %Y'),
'name': name
}
return _list_base_url + '?' + urlencode(data, True)
for entry in entries:
event = cal.add('vevent')
event.add('summary').value = '%s Vacation' % make_entry_title(entry, user,
include_details=False)
event.add('dtstart').value = entry.start
event.add('dtend').value = entry.end
#url = (home_url + '?cal_y=%d&cal_m=%d' %
# (slot.date.year, slot.date.month))
event.add('url').value = make_list_url(entry)
#event.add('description').value = entry.details
event.add('description').value = "Log in to see the details"
return _render_vcalendar(cal, key)
def _render_vcalendar(cal, key):
#return http.HttpResponse(cal.serialize(),
# mimetype='text/plain;charset=utf-8'
# )
resp = http.HttpResponse(cal.serialize(),
mimetype='text/calendar;charset=utf-8'
)
filename = '%s.ics' % (key,)
resp['Content-Disposition'] = 'inline; filename="%s"' % filename
return resp
@login_required
@transaction.commit_on_success
def reset_calendar_url(request):
for each in UserKey.objects.filter(user=request.user):
each.delete()
return redirect(reverse('dates.home') + '#calendarurl')
@login_required
def about_calendar_url(request):
data = {}
data['calendar_url'] = _get_user_calendar_url(request)
return render(request, 'dates/about-calendar-url.html', data)
@login_required
def duplicate_report(request):
data = {
'filter_errors': None,
}
if request.method == 'POST':
raise NotImplementedError
else:
form = forms.DuplicateReportFilterForm(date_format='%d %B %Y',
data=request.GET)
user = request.user
filter_ = dict(user=user)
if form.is_valid():
if form.cleaned_data['user']:
user = form.cleaned_data['user']
if user != request.user:
if not (request.user.is_superuser
or request.user.is_staff):
if user != request.user:
return http.HttpResponse(
"Only available for admins")
filter_['user'] = user
if form.cleaned_data['since']:
filter_['start__gte'] = form.cleaned_data['since']
data['since'] = form.cleaned_data['since']
else:
data['filter_errors'] = form.errors
data['first_date'] = (Entry.objects
.filter(user=user)
.aggregate(Min('start'))
['start__min'])
start_dates = (Entry.objects
.filter(**filter_)
.values("start")
.annotate(Count("start"))
.order_by('-start__count'))
groups = []
for each in start_dates:
if each['start__count'] <= 1:
break
entries = Entry.objects.filter(user=user, start=each['start'])
details = [x.details for x in entries]
note = "Probably not a mistake"
if len(set(details)) == 1:
note = ("Probably a duplicate! "
"The details are the same for each entry")
else:
note = "Possibly not a duplicate since the details different"
groups.append((entries, note))
data['groups'] = groups
if 'since' not in data:
data['since'] = data['first_date']
return render(request, 'dates/duplicate-report.html', data)
| mozilla/pto | pto/apps/dates/views.py | Python | mpl-2.0 | 37,380 |
from flask import render_template
class NginxConfigRenderer():
def __init__(self, manifold):
self.manifold = manifold
self.app = manifold.app
def render(self, minions):
with self.app.app_context():
return render_template('nginx/nginx.conf',
manifold=self.manifold,
minions=minions)
def write(self, minions):
content = self.render(minions)
conf_path = self.manifold.config.NGINX_CONF_PATH
with open(conf_path, 'w') as f:
f.write(content)
| lepistone/manifold | nginx.py | Python | agpl-3.0 | 596 |
# Copyright (c) 2014 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
import colander
from pyramid.view import view_config
from dace.processinstance.core import DEFAULTMAPPING_ACTIONS_VIEWS
from pontus.default_behavior import Cancel
from pontus.form import FormView
from pontus.schema import Schema
from pontus.widget import Select2Widget
from pontus.view import BasicView
from pontus.view_operation import MultipleView
from deform_treepy.utilities.tree_utility import tree_to_keywords
from lac.content.processes.admin_process.behaviors import (
ManageKeywords)
from lac.content.lac_application import (
CreationCulturelleApplication)
from lac import _
from lac.content.keyword import ROOT_TREE
class ManageKeywordsViewStudyReport(BasicView):
title = 'Alert for keywords'
name = 'alertforkeywordsmanagement'
template = 'lac:views/admin_process/templates/alert_event_keywords.pt'
def update(self):
result = {}
values = {}
body = self.content(args=values, template=self.template)['body']
item = self.adapt_item(body, self.viewid)
result['coordinates'] = {self.coordinates: [item]}
return result
@colander.deferred
def targets_choice(node, kw):
request = node.bindings['request']
site = request.get_site_folder
keywords = [kw.split('/') for kw in tree_to_keywords(site.tree)]
keywords = list(set([item for sublist in keywords for item in sublist]))
if ROOT_TREE in keywords:
keywords.remove(ROOT_TREE)
values = [(v, v) for v in sorted(keywords)]
return Select2Widget(
values=values,
multiple=True
)
class ManageKeywordsSchema(Schema):
targets = colander.SchemaNode(
colander.Set(),
widget=targets_choice,
title=_("Keywords")
)
source = colander.SchemaNode(
colander.String(),
title=_("New keyword")
)
class ManageKeywordsFormView(FormView):
title = _('Manage keywords')
schema = ManageKeywordsSchema()
behaviors = [ManageKeywords, Cancel]
formid = 'formmanagekeywords'
name = 'formmanagekeywords'
@view_config(
name='managekeywords',
context=CreationCulturelleApplication,
renderer='pontus:templates/views_templates/grid.pt',
)
class ManageKeywordsView(MultipleView):
title = _('Manage keywords')
name = 'managekeywords'
viewid = 'managekeywords'
template = 'daceui:templates/mergedmultipleview.pt'
views = (ManageKeywordsViewStudyReport, ManageKeywordsFormView)
validators = [ManageKeywords.get_validator()]
DEFAULTMAPPING_ACTIONS_VIEWS.update({ManageKeywords: ManageKeywordsView})
| ecreall/lagendacommun | lac/views/admin_process/manage_keywords.py | Python | agpl-3.0 | 2,736 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2008-2013 AvanzOSC S.L. All Rights Reserved
# Date: 01/07/2013
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
{
"name": "AvanzOSC - tree_grid extension",
"version": "1.0",
"depends": ["tree_grid","sale","purchase","stock","account","avanzosc_calculate_coeficient_udv_automatically"],
"author": "AvanzOSC S.L.",
"category": "Generic",
"description": """
Este módulo añade la unidad de venta, y cantidad de venta editables en los tree de
líneas de pedido de compra, y de venta, líneas de factura, y líneas de albaranes.
""",
"init_xml": [],
'update_xml': ['sale_order_view_ext.xml',
'purchase_order_view_ext.xml',
'stock_picking_view_ext.xml',
'account_invoice_view_ext.xml',
'product_product_view_ext.xml'
],
'demo_xml': [],
'installable': True,
'active': False,
# 'certificate': 'certificate',
} | avanzosc/avanzosc6.1 | avanzosc_tree_grid_ext/__openerp__.py | Python | agpl-3.0 | 1,850 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2019 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Samy Bucher <samy.bucher@outlook.com>
#
# The licence is in the file __manifest__.py
#
##############################################################################
from odoo import models
from odoo import fields
class Correspondence(models.Model):
_inherit = 'correspondence'
gift_id = fields.Many2one('sponsorship.gift', 'Gift')
| ecino/compassion-switzerland | sponsorship_switzerland/models/correspondence.py | Python | agpl-3.0 | 583 |
from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from rest_framework.authtoken.models import Token
class Command(BaseCommand):
help = """Create a default admin user if it doesn't exist.
you SHOULD change the password, the email and the token afterwards!"""
username = 'admin'
password = 'admin'
email = 'root@example.com'
def handle(self, *args, **options):
verbosity = options.get('verbosity')
admin = User.objects.filter(username=self.username)
if not admin:
user = User.objects.create_user(username=self.username,
email=self.email,
password=self.password)
user.is_superuser = True
user.is_staff = True
user.save()
if verbosity:
print('User "%s" created"' % self.username)
if Token.objects.get(user=user):
print('Token created for User "%s"' % self.username)
| Parisson/TimeSide | timeside/server/management/commands/timeside-create-admin-user.py | Python | agpl-3.0 | 1,192 |
# -*- encoding: utf-8 -*-
###############################################################################
# #
# Copyright (C) 2015 Trustcode - www.trustcode.com.br #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
from . import sale_order
| Trust-Code/trust-addons | trust_sale/models/__init__.py | Python | agpl-3.0 | 1,413 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2015 Serv. Tecnol. Avanzados (http://www.serviciosbaeza.com)
# Pedro M. Baeza <pedro.baeza@serviciosbaeza.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, api
class Website(models.Model):
_inherit = 'website'
@api.multi
def sale_get_order(self, force_create=False, code=None,
update_pricelist=None):
res = super(Website, self).sale_get_order(
force_create=force_create, code=code,
update_pricelist=update_pricelist)
return res if res is not None else self.env['sale.order']
| eLBati/website | website_event_register_free_with_sale/models/website.py | Python | agpl-3.0 | 1,450 |
# Generated file. Do not edit
__author__="drone"
from Abs import Abs
from And import And
from Average import Average
from Ceil import Ceil
from Cube import Cube
from Divide import Divide
from Double import Double
from Equal import Equal
from Even import Even
from Floor import Floor
from Greaterorequal import Greaterorequal
from Greaterthan import Greaterthan
from Half import Half
from If import If
from Increment import Increment
from Lessorequal import Lessorequal
from Lessthan import Lessthan
from Max import Max
from Min import Min
from Module import Module
from Multiply import Multiply
from Negate import Negate
from Not import Not
from Odd import Odd
from One import One
from Positive import Positive
from Quadruple import Quadruple
from Sign import Sign
from Sub import Sub
from Sum import Sum
from Two import Two
from Zero import Zero
__all__ = ['Abs', 'And', 'Average', 'Ceil', 'Cube', 'Divide', 'Double', 'Equal', 'Even', 'Floor', 'Greaterorequal', 'Greaterthan', 'Half', 'If', 'Increment', 'Lessorequal', 'Lessthan', 'Max', 'Min', 'Module', 'Multiply', 'Negate', 'Not', 'Odd', 'One', 'Positive', 'Quadruple', 'Sign', 'Sub', 'Sum', 'Two', 'Zero']
| gcobos/rft | app/primitives/__init__.py | Python | agpl-3.0 | 1,163 |
"""
Helpers for accessing comprehensive theming related variables.
This file is imported at startup. Imports of models or things which import models will break startup on Django 1.9+. If
you need models here, please import them inside the function which uses them.
"""
import os
import re
from logging import getLogger
import crum
from django.conf import settings
from edx_toggles.toggles import SettingToggle
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.theming.helpers_dirs import (
Theme,
get_project_root_name_from_settings,
get_theme_base_dirs_from_settings,
get_theme_dirs,
get_themes_unchecked
)
from openedx.core.lib.cache_utils import request_cached
logger = getLogger(__name__) # pylint: disable=invalid-name
@request_cached()
def get_template_path(relative_path, **kwargs): # lint-amnesty, pylint: disable=unused-argument
"""
The calculated value is cached for the lifetime of the current request.
"""
return relative_path
def is_request_in_themed_site():
"""
This is a proxy function to hide microsite_configuration behind comprehensive theming.
"""
# We need to give priority to theming/site-configuration over microsites
return configuration_helpers.is_site_configuration_enabled()
def get_template_path_with_theme(relative_path):
"""
Returns template path in current site's theme if it finds one there otherwise returns same path.
Example:
>> get_template_path_with_theme('header.html')
'/red-theme/lms/templates/header.html'
Parameters:
relative_path (str): template's path relative to the templates directory e.g. 'footer.html'
Returns:
(str): template path in current site's theme
"""
relative_path = os.path.normpath(relative_path)
theme = get_current_theme()
if not theme:
return relative_path
# strip `/` if present at the start of relative_path
template_name = re.sub(r'^/+', '', relative_path)
template_path = theme.template_path / template_name
absolute_path = theme.path / "templates" / template_name
if absolute_path.exists():
return str(template_path)
else:
return relative_path
def get_all_theme_template_dirs():
"""
Returns template directories for all the themes.
Example:
>> get_all_theme_template_dirs()
[
'/edx/app/edxapp/edx-platform/themes/red-theme/lms/templates/',
]
Returns:
(list): list of directories containing theme templates.
"""
themes = get_themes()
template_paths = list()
for theme in themes:
template_paths.extend(theme.template_dirs)
return template_paths
def get_project_root_name():
"""
Return root name for the current project
Example:
>> get_project_root_name()
'lms'
# from studio
>> get_project_root_name()
'cms'
Returns:
(str): component name of platform e.g lms, cms
"""
return get_project_root_name_from_settings(settings.PROJECT_ROOT)
def strip_site_theme_templates_path(uri):
"""
Remove site template theme path from the uri.
Example:
>> strip_site_theme_templates_path('/red-theme/lms/templates/header.html')
'header.html'
Arguments:
uri (str): template path from which to remove site theme path. e.g. '/red-theme/lms/templates/header.html'
Returns:
(str): template path with site theme path removed.
"""
theme = get_current_theme()
if not theme:
return uri
templates_path = "/".join([
theme.theme_dir_name,
get_project_root_name(),
"templates"
])
uri = re.sub(r'^/*' + templates_path + '/*', '', uri)
return uri
def get_current_request():
"""
Return current request instance.
Returns:
(HttpRequest): returns current request
"""
return crum.get_current_request()
def get_current_site():
"""
Return current site.
Returns:
(django.contrib.sites.models.Site): returns current site
"""
request = get_current_request()
if not request:
return None
return getattr(request, 'site', None)
def get_current_site_theme():
"""
Return current site theme object. Returns None if theming is disabled.
Returns:
(ecommerce.theming.models.SiteTheme): site theme object for the current site.
"""
# Return None if theming is disabled
if not is_comprehensive_theming_enabled():
return None
request = get_current_request()
if not request:
return None
return getattr(request, 'site_theme', None)
def get_current_theme():
"""
Return current theme object. Returns None if theming is disabled.
Returns:
(ecommerce.theming.models.SiteTheme): site theme object for the current site.
"""
# Return None if theming is disabled
if not is_comprehensive_theming_enabled():
return None
site_theme = get_current_site_theme()
if not site_theme:
return None
try:
return Theme(
name=site_theme.theme_dir_name,
theme_dir_name=site_theme.theme_dir_name,
themes_base_dir=get_theme_base_dir(site_theme.theme_dir_name),
project_root=get_project_root_name()
)
except ValueError as error:
# Log exception message and return None, so that open source theme is used instead
logger.exception(u'Theme not found in any of the themes dirs. [%s]', error)
return None
def current_request_has_associated_site_theme():
"""
True if current request has an associated SiteTheme, False otherwise.
Returns:
True if current request has an associated SiteTheme, False otherwise
"""
request = get_current_request()
site_theme = getattr(request, 'site_theme', None)
return bool(site_theme and site_theme.id)
def get_theme_base_dir(theme_dir_name, suppress_error=False):
"""
Returns absolute path to the directory that contains the given theme.
Args:
theme_dir_name (str): theme directory name to get base path for
suppress_error (bool): if True function will return None if theme is not found instead of raising an error
Returns:
(str): Base directory that contains the given theme
"""
for themes_dir in get_theme_base_dirs():
if theme_dir_name in get_theme_dirs(themes_dir):
return themes_dir
if suppress_error:
return None
raise ValueError(
u"Theme '{theme}' not found in any of the following themes dirs, \nTheme dirs: \n{dir}".format(
theme=theme_dir_name,
dir=get_theme_base_dirs(),
))
def theme_exists(theme_name, themes_dir=None):
"""
Returns True if a theme exists with the specified name.
"""
for theme in get_themes(themes_dir=themes_dir):
if theme.theme_dir_name == theme_name:
return True
return False
def get_themes(themes_dir=None):
"""
get a list of all themes known to the system.
Args:
themes_dir (str): (Optional) Path to themes base directory
Returns:
list of themes known to the system.
"""
if not is_comprehensive_theming_enabled():
return []
if themes_dir is None:
themes_dir = get_theme_base_dirs_unchecked()
return get_themes_unchecked(themes_dir, settings.PROJECT_ROOT)
def get_theme_base_dirs_unchecked():
"""
Return base directories that contains all the themes.
Example:
>> get_theme_base_dirs_unchecked()
['/edx/app/ecommerce/ecommerce/themes']
Returns:
(List of Paths): Base theme directory paths
"""
theme_dirs = getattr(settings, "COMPREHENSIVE_THEME_DIRS", None)
return get_theme_base_dirs_from_settings(theme_dirs)
def get_theme_base_dirs():
"""
Return base directories that contains all the themes.
Ensures comprehensive theming is enabled.
Example:
>> get_theme_base_dirs()
['/edx/app/ecommerce/ecommerce/themes']
Returns:
(List of Paths): Base theme directory paths
"""
# Return an empty list if theming is disabled
if not is_comprehensive_theming_enabled():
return []
return get_theme_base_dirs_unchecked()
def is_comprehensive_theming_enabled():
"""
Returns boolean indicating whether comprehensive theming functionality is enabled or disabled.
Example:
>> is_comprehensive_theming_enabled()
True
Returns:
(bool): True if comprehensive theming is enabled else False
"""
ENABLE_COMPREHENSIVE_THEMING = SettingToggle("ENABLE_COMPREHENSIVE_THEMING", default=False)
if ENABLE_COMPREHENSIVE_THEMING.is_enabled() and current_request_has_associated_site_theme():
return True
return ENABLE_COMPREHENSIVE_THEMING.is_enabled()
def get_config_value_from_site_or_settings(name, site=None, site_config_name=None):
"""
Given a configuration setting name, try to get it from the site configuration and then fall back on the settings.
If site_config_name is not specified then "name" is used as the key for both collections.
Args:
name (str): The name of the setting to get the value of.
site: The site that we are trying to fetch the value for.
site_config_name: The name of the setting within the site configuration.
Returns:
The value stored in the configuration.
"""
from openedx.core.djangoapps.site_configuration.models import SiteConfiguration
if site_config_name is None:
site_config_name = name
if site is None:
site = get_current_site()
site_configuration = None
if site is not None:
try:
site_configuration = getattr(site, "configuration", None)
except SiteConfiguration.DoesNotExist:
pass
value_from_settings = getattr(settings, name, None)
if site_configuration is not None:
return site_configuration.get_value(site_config_name, default=value_from_settings)
else:
return value_from_settings
| stvstnfrd/edx-platform | openedx/core/djangoapps/theming/helpers.py | Python | agpl-3.0 | 10,226 |
# from . import test_partner_import
from . import test_product_import
| iw3hxn/LibrERP | data_migration/tests/__init__.py | Python | agpl-3.0 | 70 |
from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect
from django.core.paginator import Paginator
from uwcs_website.choob.models import *
def quotes_page(request):
quoters = map(lambda q:q[0],QuoteObject.objects.all().values_list('quoter').distinct())
quoted = map(lambda q:q[0],QuoteLine.objects.all().values_list('nick').distinct())
return render_to_response('choob/quotes.html',{
'breadcrumbs': [('/','home'),('/irc/','irc')],
'user':request.user,
'quoters':quoters,
'quoted':quoted,
})
PER_PAGE = 20
def quotes_f(request,page_num,url,f):
'''
Generic quotes controller for making lists of quotes
type(f) = String -> [QuoteObject]
'''
if request.method == 'POST':
val = request.POST['val']
paginator = Paginator(f(val),PER_PAGE)
return render_to_response('choob/quote_list.html',{
'breadcrumbs': [('/','home'),('/irc/','irc')],
'user':request.user,
'page':paginator.page(page_num),
'value':val,
'url':url,
})
else:
return HttpResponseRedirect('/irc/all_quotes/')
def all_quotes(request,page_num):
paginator = Paginator(QuoteObject.objects.all(),PER_PAGE)
return render_to_response('choob/quote_list.html',{
'breadcrumbs': [('/','home'),('/irc/','irc'),('/irc/all_quotes/1/','all')],
'user':request.user,
'page':paginator.page(page_num),
})
# this is clearly not idiomatic in languages without currying
# perhaps someone can suggest something else
def quotes_by(request,page):
return quotes_f(request,page,'quotes_by',
lambda n:QuoteObject.objects.filter(quoter__exact=n))
def quotes_from(request,page):
return quotes_f(request,page,'quotes_from',
lambda n:QuoteObject.objects.filter(quoteline__nick__exact=n))
def quotes_with(request,page):
return quotes_f(request,page,'quotes_with',
lambda v:QuoteObject.objects.filter(quoteline__message__contains=v))
| UWCS/uwcs-website | uwcs_website/choob/views.py | Python | agpl-3.0 | 2,057 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-11-22 07:11
from __future__ import unicode_literals
from django.core.management.sql import emit_post_migrate_signal
from django.db import migrations
def add_executive_group(apps, schema_editor):
# create group
db_alias = schema_editor.connection.alias
emit_post_migrate_signal(2, False, db_alias)
Group = apps.get_model('auth', 'Group')
Permission = apps.get_model('auth', 'Permission')
executive_group, created = Group.objects.get_or_create(name='executive')
if created:
# Learning unit
can_access_learningunit = Permission.objects.get(codename='can_access_learningunit')
executive_group.permissions.add(can_access_learningunit)
class Migration(migrations.Migration):
dependencies = [
('base', '0207_auto_20171220_1035'),
]
operations = [
migrations.RunPython(add_executive_group),
] | uclouvain/osis_louvain | base/migrations/0208_create_role_executive.py | Python | agpl-3.0 | 935 |
from __future__ import division
from os.path import expanduser, exists
from os import chdir, mkdir
import pickle
import numpy as np
from scipy import sparse
import networkx as nx
home = expanduser('~')
chdir(home+'/workspace/vichakshana/vichakshana/')
#CELERY
from mycelery import app
class SASD():
def __init__(self, keyword):
self.keyword = keyword
self.sasd_data = pickle.load(file(home+'/data/text-analysis/vichakshana/SASD/'+keyword+'.pickle'))
self.normalize_sasd_data()
self.cocitation_g = nx.read_graphml(home+'/data/text-analysis/vichakshana/page_graphs/'
+ keyword+'_entitylinks_core_cocitation.graphml', node_type=unicode)
self.fileindex_reverse = pickle.load(file(home+'/data/text-analysis/fileindex/'+keyword+'_fileindex.pickle'))
self.fileindex = {v: k for k, v in self.fileindex_reverse.items()}
self.fileindex_sorted = sorted(self.fileindex.items(), key=lambda x: x[1])
def normalize_sasd_data(self):
max_score = max([i['relevance_score'] for i in self.sasd_data])
for i in self.sasd_data:
i['relevance_score'] /= max_score
def compute_shortest_paths(self):
import graph_tool.all as gt
graph_file = home+'/data/text-analysis/vichakshana/page_graphs/' + self.keyword + '_entitylinks_core.graphml'
g = gt.load_graph(graph_file, fmt='xml')
distance_data = gt.shortest_distance(g)
vertices = list(g.vertices())
rows = []
cols = []
distances = []
for src_v in vertices:
for i in xrange(len(vertices)):
if distance_data[src_v][i] > 100:
continue
rows.append(self.fileindex[unicode(g.vertex_properties['_graphml_vertex_id'][src_v],
encoding='utf-8')])
cols.append(self.fileindex[unicode(g.vertex_properties['_graphml_vertex_id'][vertices[i]],
encoding='utf-8')])
distances.append(distance_data[src_v][i])
n = max(self.fileindex.values())+1 # since the indexing starts with 0
shortest_paths = sparse.coo_matrix((distances, (rows, cols)), shape=(n, n))
shortest_paths = sparse.csr_matrix(shortest_paths).todense()
if not exists(home+'/data/text-analysis/vichakshana/page_graphs/'+self.keyword+'_shortest_paths/'):
mkdir(home+'/data/text-analysis/vichakshana/page_graphs/'+self.keyword+'_shortest_paths/')
for i in xrange(shortest_paths.shape[0]):
pickle.dump(shortest_paths[i], file(home+'/data/text-analysis/vichakshana/page_graphs/'
+ self.keyword+'_shortest_paths/'+str(i)+'.pickle', 'w'))
def get_sasd(self, page_a, page_b, shortest_paths):
"""
There are three parts in this distance:
1. Corelevance based
2. Shortest paths: extended version of direct links
3. Indirect links: cocitation and bibcouling graphs' direct edges
Each distance varies between 0-1.
The returned distance is a weighted average of all these distances.
"""
if page_a != page_b:
#1. Corelevance
co_relevance = []
for group in self.sasd_data:
if page_a in group['pages'] and page_b in group['pages']:
co_relevance.append(group['relevance_score'])
if len(co_relevance) > 0:
similarity_corel = len(co_relevance)+np.average(co_relevance)
else:
similarity_corel = 0
distance_corel = 1/(1+similarity_corel)
#2. Shortest paths
shortest_path_length = shortest_paths[0, self.fileindex[page_b]]-1
if shortest_path_length == -1:
shortest_path_length = np.inf
similarity_shortest_path = 1/(1+shortest_path_length*shortest_path_length)
distance_shortest_path = 1-similarity_shortest_path
#3. Indirect links
try:
cocitation_weight = self.cocitation_g[page_a][page_b]['weight']
except KeyError:
cocitation_weight = 0
distance_cocitation = 1-cocitation_weight
#Finally, the weighted version
distance = 0.25*distance_shortest_path + 0.25*distance_cocitation + 0.50*distance_corel
#print co_relevance, distance_corel, distance_shortest_path, distance_cocitation
else:
distance = 1
return distance
#CELERY
@app.task
def get_sasd_celery(self, page_a):
pages = self.fileindex.keys()
distances = []
shortest_paths = pickle.load(file(home+'/data/text-analysis/vichakshana/page_graphs/'
+ self.keyword+'_shortest_paths/'+str(self.fileindex[page_a])+'.pickle'))
for page_b in pages:
distances.append((self.fileindex[page_b], self.get_sasd(page_a, page_b, shortest_paths)))
if not exists(home+'/data/text-analysis/vichakshana/SASD/'+self.keyword+'/'):
mkdir(home+'/data/text-analysis/vichakshana/SASD/'+self.keyword)
pickle.dump(distances, file(home+'/data/text-analysis/vichakshana/SASD/' +
self.keyword+'/'+str(self.fileindex[page_a])+'.pickle', 'w'))
def get_sasd_cluster(self, index_a):
if exists(home+'/data/text-analysis/vichakshana/SASD/' + self.keyword+'/'+str(index_a)+'.pickle'):
return
page_a = self.fileindex_reverse[index_a]
pages = self.fileindex.keys()
distances = []
shortest_paths = pickle.load(file(home+'/data/text-analysis/vichakshana/page_graphs/'
+ self.keyword+'_shortest_paths/'+str(index_a)+'.pickle'))
for page_b in pages:
distances.append((self.fileindex[page_b], self.get_sasd(page_a, page_b, shortest_paths)))
if not exists(home+'/data/text-analysis/vichakshana/SASD/'+self.keyword+'/'):
mkdir(home+'/data/text-analysis/vichakshana/SASD/'+self.keyword)
pickle.dump(distances, file(home+'/data/text-analysis/vichakshana/SASD/' +
self.keyword+'/'+str(self.fileindex[page_a])+'.pickle', 'w'))
def compute_sasds(self, n_jobs=0):
submitted_jobs = 0
for page_a, index_a in self.fileindex_sorted:
#CELERY
if exists(home+'/data/text-analysis/vichakshana/SASD/' + self.keyword+'/'+str(index_a)+'.pickle'):
continue
self.get_sasd_celery.apply_async((self, page_a,))
if n_jobs > 0:
submitted_jobs += 1
if submitted_jobs >= n_jobs:
print index_a
break
def get_related(self, page, n=10, distance_threshold=0.5):
distance_data = pickle.load(file(home+'/data/text-analysis/vichakshana/SASD/' +
self.keyword+'/'+str(self.fileindex[page])+'.pickle'))
distance_data = sorted(distance_data, key=lambda x: x[1])
related_entities = []
for i in distance_data:
if i[1] >= distance_threshold:
break
if len(related_entities) < n:
related_entities.append((self.fileindex_reverse[i[0]], i[1]))
return related_entities
| gopalkoduri/vichakshana-public | vichakshana/SASD.py | Python | agpl-3.0 | 7,505 |
# -*- coding: utf-8 -*-
#
# OpenCraft -- tools to aid developing and hosting free software projects
# Copyright (C) 2015 OpenCraft <xavier@opencraft.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Worker tasks for instance hosting & management
"""
# Imports #####################################################################
from huey.djhuey import crontab, periodic_task, task
from django.conf import settings
from django.template.defaultfilters import truncatewords
from instance.github import get_username_list_from_team, get_pr_list_from_username
from instance.models.instance import OpenEdXInstance
# Logging #####################################################################
import logging
logger = logging.getLogger(__name__)
# Tasks #######################################################################
@task()
def provision_instance(instance_pk):
"""
Run provisioning on an existing instance
"""
logger.info('Retreiving instance: pk=%s', instance_pk)
instance = OpenEdXInstance.objects.get(pk=instance_pk)
logger.info('Running provisioning on %s', instance)
instance.provision()
@periodic_task(crontab(minute='*/1'))
def watch_pr():
"""
Automatically create/update sandboxes for PRs opened by members of the watched
organization on the watched repository
"""
team_username_list = get_username_list_from_team(settings.WATCH_ORGANIZATION)
for username in team_username_list:
for pr in get_pr_list_from_username(username, settings.WATCH_FORK):
pr_sub_domain = 'pr{number}.sandbox'.format(number=pr.number)
instance, created = OpenEdXInstance.objects.get_or_create(
sub_domain=pr_sub_domain,
fork_name=pr.fork_name,
branch_name=pr.branch_name,
)
truncated_title = truncatewords(pr.title, 4)
instance.name = 'PR#{pr.number}: {truncated_title} ({pr.username}) - {i.reference_name}'\
.format(pr=pr, i=instance, truncated_title=truncated_title)
instance.github_pr_number = pr.number
instance.ansible_extra_settings = pr.extra_settings
instance.save()
if created:
logger.info('New PR found, creating sandbox: %s', pr)
provision_instance(instance.pk)
| brousch/opencraft | instance/tasks.py | Python | agpl-3.0 | 2,970 |
#!/usr/bin/env python3
import os, sys, logging, urllib, time, string, json, argparse, collections, datetime, re, bz2, math
from concurrent.futures import ThreadPoolExecutor, wait
import lz4
pool = ThreadPoolExecutor(max_workers=16)
logging.basicConfig(level=logging.DEBUG)
sys.path.append(os.path.join(os.path.dirname(__file__), "lib", "python"))
from carta import (logger, POI)
from mongoengine import *
connect('carta')
zoomspacing = [round(0.0001*(1.6**n), 4) for n in range(21, 1, -1)]
def compute_occlusions(box):
SW, NE = box
points = list(POI.objects(at__geo_within_box=(SW, NE)))
print("Starting", SW, NE, len(points))
for i, p1 in enumerate(points):
for j, p2 in enumerate(points[i+1:]):
coords1, coords2 = p1.at['coordinates'], p2.at['coordinates']
dist = math.sqrt(abs(coords1[0]-coords2[0])**2 + abs(coords1[1]-coords2[1])**2)
occluded_point = p1 if p1.rank < p2.rank else p2
for zoom, spacing in enumerate(zoomspacing):
if dist < spacing:
continue
break
occluded_point.min_zoom = max(occluded_point.min_zoom, zoom)
p1.save()
print("Finished", SW, NE, len(points))
step = 2
boxes = []
for lat in range(-90, 90, step):
for lng in range(-180, 180, step):
boxes.append([(lng, lat), (lng+step, lat+step)])
for result in pool.map(compute_occlusions, boxes):
pass
# docs_by_rank = sorted(POI.objects(at__geo_within_center=(doc.at['coordinates'], spacing)),
# key=lambda point: point.rank or 0,
# reverse=True)
# for doc in POI.objects(at__geo_within_center=(doc.at['coordinates'], 1), min_zoom__gt=0).order_by('-rank'):
# for doc2 in POI.objects(at__geo_within_center=(doc.at['coordinates'], zoomspacing[doc.min_zoom]), min_zoom__lte=doc.min_zoom).order_by('-rank'):
| kislyuk/cartographer | postproc_db.py | Python | agpl-3.0 | 1,920 |
# -*- coding: utf-8 -*-
import logging
from moulinette.utils.text import random_ascii
from moulinette.core import MoulinetteError, MoulinetteAuthenticationError
from moulinette.authentication import BaseAuthenticator
logger = logging.getLogger("moulinette.authenticator.yoloswag")
# Dummy authenticator implementation
session_secret = random_ascii()
class Authenticator(BaseAuthenticator):
"""Dummy authenticator used for tests"""
name = "dummy"
def __init__(self, *args, **kwargs):
pass
def _authenticate_credentials(self, credentials=None):
if not credentials == self.name:
raise MoulinetteError("invalid_password", raw_msg=True)
return
def set_session_cookie(self, infos):
from bottle import response
assert isinstance(infos, dict)
# This allows to generate a new session id or keep the existing one
current_infos = self.get_session_cookie(raise_if_no_session_exists=False)
new_infos = {"id": current_infos["id"]}
new_infos.update(infos)
response.set_cookie(
"moulitest",
new_infos,
secure=True,
secret=session_secret,
httponly=True,
# samesite="strict", # Bottle 0.12 doesn't support samesite, to be added in next versions
)
def get_session_cookie(self, raise_if_no_session_exists=True):
from bottle import request
try:
infos = request.get_cookie("moulitest", secret=session_secret, default={})
except Exception:
if not raise_if_no_session_exists:
return {"id": random_ascii()}
raise MoulinetteAuthenticationError("unable_authenticate")
if not infos and raise_if_no_session_exists:
raise MoulinetteAuthenticationError("unable_authenticate")
if "id" not in infos:
infos["id"] = random_ascii()
return infos
def delete_session_cookie(self):
from bottle import response
response.set_cookie("moulitest", "", max_age=-1)
response.delete_cookie("moulitest")
| YunoHost/moulinette | test/src/authenticators/dummy.py | Python | agpl-3.0 | 2,129 |
# -*- coding: utf-8 -*-
"""
Models for Credit Eligibility for courses.
Credit courses allow students to receive university credit for
successful completion of a course on EdX
"""
from __future__ import absolute_import
import datetime
import logging
from collections import defaultdict
import pytz
import six
from config_models.models import ConfigurationModel
from django.conf import settings
from django.core.cache import cache
from django.core.validators import RegexValidator
from django.db import IntegrityError, models, transaction
from django.dispatch import receiver
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy
from edx_django_utils.cache import RequestCache
from jsonfield.fields import JSONField
from model_utils.models import TimeStampedModel
from opaque_keys.edx.django.models import CourseKeyField
from openedx.core.lib.cache_utils import request_cached
CREDIT_PROVIDER_ID_REGEX = r"[a-z,A-Z,0-9,\-]+"
log = logging.getLogger(__name__)
@python_2_unicode_compatible
class CreditProvider(TimeStampedModel):
"""
This model represents an institution that can grant credit for a course.
Each provider is identified by unique ID (e.g., 'ASU'). CreditProvider also
includes a `url` where the student will be sent when he/she will try to
get credit for course. Eligibility duration will be use to set duration
for which credit eligible message appears on dashboard.
.. no_pii:
"""
provider_id = models.CharField(
max_length=255,
unique=True,
validators=[
RegexValidator(
regex=CREDIT_PROVIDER_ID_REGEX,
message="Only alphanumeric characters and hyphens (-) are allowed",
code="invalid_provider_id",
)
],
help_text=ugettext_lazy(
"Unique identifier for this credit provider. "
"Only alphanumeric characters and hyphens (-) are allowed. "
"The identifier is case-sensitive."
)
)
active = models.BooleanField(
default=True,
help_text=ugettext_lazy("Whether the credit provider is currently enabled.")
)
display_name = models.CharField(
max_length=255,
help_text=ugettext_lazy("Name of the credit provider displayed to users")
)
enable_integration = models.BooleanField(
default=False,
help_text=ugettext_lazy(
"When true, automatically notify the credit provider "
"when a user requests credit. "
"In order for this to work, a shared secret key MUST be configured "
"for the credit provider in secure auth settings."
)
)
provider_url = models.URLField(
default="",
help_text=ugettext_lazy(
"URL of the credit provider. If automatic integration is "
"enabled, this will the the end-point that we POST to "
"to notify the provider of a credit request. Otherwise, the "
"user will be shown a link to this URL, so the user can "
"request credit from the provider directly."
)
)
provider_status_url = models.URLField(
default="",
help_text=ugettext_lazy(
"URL from the credit provider where the user can check the status "
"of his or her request for credit. This is displayed to students "
"*after* they have requested credit."
)
)
provider_description = models.TextField(
default="",
help_text=ugettext_lazy(
"Description for the credit provider displayed to users."
)
)
fulfillment_instructions = models.TextField(
null=True,
blank=True,
help_text=ugettext_lazy(
"Plain text or html content for displaying further steps on "
"receipt page *after* paying for the credit to get credit for a "
"credit course against a credit provider."
)
)
eligibility_email_message = models.TextField(
default="",
help_text=ugettext_lazy(
"Plain text or html content for displaying custom message inside "
"credit eligibility email content which is sent when user has met "
"all credit eligibility requirements."
)
)
receipt_email_message = models.TextField(
default="",
help_text=ugettext_lazy(
"Plain text or html content for displaying custom message inside "
"credit receipt email content which is sent *after* paying to get "
"credit for a credit course."
)
)
thumbnail_url = models.URLField(
default="",
max_length=255,
help_text=ugettext_lazy(
"Thumbnail image url of the credit provider."
)
)
CREDIT_PROVIDERS_CACHE_KEY = "credit.providers.list"
@classmethod
def get_credit_providers(cls, providers_list=None):
"""
Retrieve a list of all credit providers or filter on providers_list, represented
as dictionaries.
Arguments:
provider_list (list of strings or None): contains list of ids if required results
to be filtered, None for all providers.
Returns:
list of providers represented as dictionaries.
"""
# Attempt to retrieve the credit provider list from the cache if provider_list is None
# The cache key is invalidated when the provider list is updated
# (a post-save signal handler on the CreditProvider model)
# This doesn't happen very often, so we would expect a *very* high
# cache hit rate.
credit_providers = cache.get(cls.CREDIT_PROVIDERS_CACHE_KEY)
if credit_providers is None:
# Cache miss: construct the provider list and save it in the cache
credit_providers = CreditProvider.objects.filter(active=True)
credit_providers = [
{
"id": provider.provider_id,
"display_name": provider.display_name,
"url": provider.provider_url,
"status_url": provider.provider_status_url,
"description": provider.provider_description,
"enable_integration": provider.enable_integration,
"fulfillment_instructions": provider.fulfillment_instructions,
"thumbnail_url": provider.thumbnail_url,
}
for provider in credit_providers
]
cache.set(cls.CREDIT_PROVIDERS_CACHE_KEY, credit_providers)
if providers_list:
credit_providers = [provider for provider in credit_providers if provider['id'] in providers_list]
return credit_providers
@classmethod
def get_credit_provider(cls, provider_id):
"""
Retrieve a credit provider with provided 'provider_id'.
"""
try:
return CreditProvider.objects.get(active=True, provider_id=provider_id)
except cls.DoesNotExist:
return None
def __str__(self):
"""Unicode representation of the credit provider. """
return self.provider_id
@receiver(models.signals.post_save, sender=CreditProvider)
@receiver(models.signals.post_delete, sender=CreditProvider)
def invalidate_provider_cache(sender, **kwargs): # pylint: disable=unused-argument
"""Invalidate the cache of credit providers. """
cache.delete(CreditProvider.CREDIT_PROVIDERS_CACHE_KEY)
@python_2_unicode_compatible
class CreditCourse(models.Model):
"""
Model for tracking a credit course.
.. no_pii:
"""
course_key = CourseKeyField(max_length=255, db_index=True, unique=True)
enabled = models.BooleanField(default=False)
CREDIT_COURSES_CACHE_KEY = "credit.courses.set"
@classmethod
def is_credit_course(cls, course_key):
"""
Check whether the course has been configured for credit.
Args:
course_key (CourseKey): Identifier of the course.
Returns:
bool: True iff this is a credit course.
"""
credit_courses = cache.get(cls.CREDIT_COURSES_CACHE_KEY)
if credit_courses is None:
credit_courses = set(
six.text_type(course.course_key)
for course in cls.objects.filter(enabled=True)
)
cache.set(cls.CREDIT_COURSES_CACHE_KEY, credit_courses)
return six.text_type(course_key) in credit_courses
@classmethod
def get_credit_course(cls, course_key):
"""
Get the credit course if exists for the given 'course_key'.
Args:
course_key(CourseKey): The course identifier
Raises:
DoesNotExist if no CreditCourse exists for the given course key.
Returns:
CreditCourse if one exists for the given course key.
"""
return cls.objects.get(course_key=course_key, enabled=True)
def __str__(self):
"""Unicode representation of the credit course. """
return six.text_type(self.course_key)
@receiver(models.signals.post_save, sender=CreditCourse)
@receiver(models.signals.post_delete, sender=CreditCourse)
def invalidate_credit_courses_cache(sender, **kwargs): # pylint: disable=unused-argument
"""Invalidate the cache of credit courses. """
cache.delete(CreditCourse.CREDIT_COURSES_CACHE_KEY)
@python_2_unicode_compatible
class CreditRequirement(TimeStampedModel):
"""
This model represents a credit requirement.
Each requirement is uniquely identified by its 'namespace' and
'name' fields.
The 'name' field stores the unique name or location (in case of XBlock)
for a requirement, which serves as the unique identifier for that
requirement.
The 'display_name' field stores the display name of the requirement.
The 'criteria' field dictionary provides additional information, clients
may need to determine whether a user has satisfied the requirement.
.. no_pii:
"""
course = models.ForeignKey(CreditCourse, related_name="credit_requirements", on_delete=models.CASCADE)
namespace = models.CharField(max_length=255)
name = models.CharField(max_length=255)
display_name = models.CharField(max_length=255, default="")
order = models.PositiveIntegerField(default=0)
criteria = JSONField()
active = models.BooleanField(default=True)
CACHE_NAMESPACE = u"credit.CreditRequirement.cache."
class Meta(object):
unique_together = ('namespace', 'name', 'course')
ordering = ["order"]
def __str__(self):
return u'{course_id} - {name}'.format(course_id=self.course.course_key, name=self.display_name)
@classmethod
def add_or_update_course_requirement(cls, credit_course, requirement, order):
"""
Add requirement to a given course.
Args:
credit_course(CreditCourse): The identifier for credit course
requirement(dict): Requirement dict to be added
Returns:
(CreditRequirement, created) tuple
"""
credit_requirement, created = cls.objects.get_or_create(
course=credit_course,
namespace=requirement["namespace"],
name=requirement["name"],
defaults={
"display_name": requirement["display_name"],
"criteria": requirement["criteria"],
"order": order,
"active": True
}
)
if not created:
credit_requirement.criteria = requirement["criteria"]
credit_requirement.active = True
credit_requirement.order = order
credit_requirement.display_name = requirement["display_name"]
credit_requirement.save()
return credit_requirement, created
@classmethod
@request_cached(namespace=CACHE_NAMESPACE)
def get_course_requirements(cls, course_key, namespace=None, name=None):
"""
Get credit requirements of a given course.
Args:
course_key (CourseKey): The identifier for a course
Keyword Arguments
namespace (str): Optionally filter credit requirements by namespace.
name (str): Optionally filter credit requirements by name.
Returns:
QuerySet of CreditRequirement model
"""
# order credit requirements according to their appearance in courseware
requirements = CreditRequirement.objects.filter(course__course_key=course_key, active=True)
if namespace is not None:
requirements = requirements.filter(namespace=namespace)
if name is not None:
requirements = requirements.filter(name=name)
return requirements
@classmethod
def disable_credit_requirements(cls, requirement_ids):
"""
Mark the given requirements inactive.
Args:
requirement_ids(list): List of ids
Returns:
None
"""
cls.objects.filter(id__in=requirement_ids).update(active=False)
@classmethod
def get_course_requirement(cls, course_key, namespace, name):
"""
Get credit requirement of a given course.
Args:
course_key(CourseKey): The identifier for a course
namespace(str): Namespace of credit course requirements
name(str): Name of credit course requirement
Returns:
CreditRequirement object if exists, None otherwise.
"""
try:
return cls.objects.get(
course__course_key=course_key, active=True, namespace=namespace, name=name
)
except cls.DoesNotExist:
return None
@receiver(models.signals.post_save, sender=CreditRequirement)
@receiver(models.signals.post_delete, sender=CreditRequirement)
def invalidate_credit_requirement_cache(sender, **kwargs): # pylint: disable=unused-argument
"""Invalidate the cache of credit requirements. """
RequestCache(namespace=CreditRequirement.CACHE_NAMESPACE).clear()
class CreditRequirementStatus(TimeStampedModel):
"""
This model represents the status of each requirement.
For a particular credit requirement, a user can either:
1) Have satisfied the requirement (example: approved in-course reverification)
2) Have failed the requirement (example: denied in-course reverification)
3) Neither satisfied nor failed (example: the user hasn't yet attempted in-course reverification).
Cases (1) and (2) are represented by having a CreditRequirementStatus with
the status set to "satisfied" or "failed", respectively.
In case (3), no CreditRequirementStatus record will exist for the requirement and user.
.. no_pii:
"""
REQUIREMENT_STATUS_CHOICES = (
("satisfied", "satisfied"),
("failed", "failed"),
("declined", "declined"),
)
username = models.CharField(max_length=255, db_index=True)
requirement = models.ForeignKey(CreditRequirement, related_name="statuses", on_delete=models.CASCADE)
status = models.CharField(max_length=32, choices=REQUIREMENT_STATUS_CHOICES)
# Include additional information about why the user satisfied or failed
# the requirement. This is specific to the type of requirement.
# For example, the minimum grade requirement might record the user's
# final grade when the user completes the course. This allows us to display
# the grade to users later and to send the information to credit providers.
reason = JSONField(default={})
class Meta(object):
unique_together = ('username', 'requirement')
verbose_name_plural = ugettext_lazy('Credit requirement statuses')
@classmethod
def get_statuses(cls, requirements, username):
"""
Get credit requirement statuses of given requirement and username
Args:
requirements(list of CreditRequirements): The identifier for a requirement
username(str): username of the user
Returns:
Queryset 'CreditRequirementStatus' objects
"""
return cls.objects.filter(requirement__in=requirements, username=username)
@classmethod
@transaction.atomic
def add_or_update_requirement_status(cls, username, requirement, status="satisfied", reason=None):
"""
Add credit requirement status for given username.
Args:
username(str): Username of the user
requirement(CreditRequirement): 'CreditRequirement' object
status(str): Status of the requirement
reason(dict): Reason of the status
"""
requirement_status, created = cls.objects.get_or_create(
username=username,
requirement=requirement,
defaults={"reason": reason, "status": status}
)
if not created:
# do not update status to `failed` if user has `satisfied` the requirement
if status == 'failed' and requirement_status.status == 'satisfied':
log.info(
u'Can not change status of credit requirement "%s" from satisfied to failed ',
requirement_status.requirement_id
)
return
requirement_status.status = status
requirement_status.reason = reason
requirement_status.save()
@classmethod
@transaction.atomic
def remove_requirement_status(cls, username, requirement):
"""
Remove credit requirement status for given username.
Args:
username(str): Username of the user
requirement(CreditRequirement): 'CreditRequirement' object
"""
try:
requirement_status = cls.objects.get(username=username, requirement=requirement)
requirement_status.delete()
except cls.DoesNotExist:
log_msg = (
u'The requirement status {requirement} does not exist for username {username}.'.format(
requirement=requirement,
username=username
)
)
log.error(log_msg)
return
@classmethod
def retire_user(cls, retirement):
"""
Retire a user by anonymizing
Args:
retirement: UserRetirementStatus of the user being retired
"""
requirement_statuses = cls.objects.filter(
username=retirement.original_username
).update(
username=retirement.retired_username,
reason={},
)
return requirement_statuses > 0
def default_deadline_for_credit_eligibility():
"""
The default deadline to use when creating a new CreditEligibility model.
"""
return datetime.datetime.now(pytz.UTC) + datetime.timedelta(
days=getattr(settings, "CREDIT_ELIGIBILITY_EXPIRATION_DAYS", 365)
)
@python_2_unicode_compatible
class CreditEligibility(TimeStampedModel):
"""
A record of a user's eligibility for credit for a specific course.
.. no_pii:
"""
username = models.CharField(max_length=255, db_index=True)
course = models.ForeignKey(CreditCourse, related_name="eligibilities", on_delete=models.CASCADE)
# Deadline for when credit eligibility will expire.
# Once eligibility expires, users will no longer be able to purchase
# or request credit.
# We save the deadline as a database field just in case
# we need to override the deadline for particular students.
deadline = models.DateTimeField(
default=default_deadline_for_credit_eligibility,
help_text=ugettext_lazy("Deadline for purchasing and requesting credit.")
)
class Meta(object):
unique_together = ('username', 'course')
verbose_name_plural = "Credit eligibilities"
@classmethod
def update_eligibility(cls, requirements, username, course_key):
"""
Update the user's credit eligibility for a course.
A user is eligible for credit when the user has satisfied
all requirements for credit in the course.
Arguments:
requirements (Queryset): Queryset of `CreditRequirement`s to check.
username (str): Identifier of the user being updated.
course_key (CourseKey): Identifier of the course.
Returns: tuple
"""
# Check all requirements for the course to determine if the user
# is eligible. We need to check all the *requirements*
# (not just the *statuses*) in case the user doesn't yet have
# a status for a particular requirement.
status_by_req = defaultdict(lambda: False)
for status in CreditRequirementStatus.get_statuses(requirements, username):
status_by_req[status.requirement.id] = status.status
is_eligible = all(status_by_req[req.id] == "satisfied" for req in requirements)
# If we're eligible, then mark the user as being eligible for credit.
if is_eligible:
try:
CreditEligibility.objects.create(
username=username,
course=CreditCourse.objects.get(course_key=course_key),
)
return is_eligible, True
except IntegrityError:
return is_eligible, False
else:
return is_eligible, False
@classmethod
def get_user_eligibilities(cls, username):
"""
Returns the eligibilities of given user.
Args:
username(str): Username of the user
Returns:
CreditEligibility queryset for the user
"""
return cls.objects.filter(
username=username,
course__enabled=True,
deadline__gt=datetime.datetime.now(pytz.UTC)
).select_related('course')
@classmethod
def is_user_eligible_for_credit(cls, course_key, username):
"""
Check if the given user is eligible for the provided credit course
Args:
course_key(CourseKey): The course identifier
username(str): The username of the user
Returns:
Bool True if the user eligible for credit course else False
"""
return cls.objects.filter(
course__course_key=course_key,
course__enabled=True,
username=username,
deadline__gt=datetime.datetime.now(pytz.UTC),
).exists()
def __str__(self):
"""Unicode representation of the credit eligibility. """
return u"{user}, {course}".format(
user=self.username,
course=self.course.course_key,
)
@python_2_unicode_compatible
class CreditRequest(TimeStampedModel):
"""
A request for credit from a particular credit provider.
When a user initiates a request for credit, a CreditRequest record will be created.
Each CreditRequest is assigned a unique identifier so we can find it when the request
is approved by the provider. The CreditRequest record stores the parameters to be sent
at the time the request is made. If the user re-issues the request
(perhaps because the user did not finish filling in forms on the credit provider's site),
the request record will be updated, but the UUID will remain the same.
.. no_pii:
"""
uuid = models.CharField(max_length=32, unique=True, db_index=True)
username = models.CharField(max_length=255, db_index=True)
course = models.ForeignKey(CreditCourse, related_name="credit_requests", on_delete=models.CASCADE)
provider = models.ForeignKey(CreditProvider, related_name="credit_requests", on_delete=models.CASCADE)
parameters = JSONField()
REQUEST_STATUS_PENDING = "pending"
REQUEST_STATUS_APPROVED = "approved"
REQUEST_STATUS_REJECTED = "rejected"
REQUEST_STATUS_CHOICES = (
(REQUEST_STATUS_PENDING, "Pending"),
(REQUEST_STATUS_APPROVED, "Approved"),
(REQUEST_STATUS_REJECTED, "Rejected"),
)
status = models.CharField(
max_length=255,
choices=REQUEST_STATUS_CHOICES,
default=REQUEST_STATUS_PENDING
)
class Meta(object):
# Enforce the constraint that each user can have exactly one outstanding
# request to a given provider. Multiple requests use the same UUID.
unique_together = ('username', 'course', 'provider')
get_latest_by = 'created'
@classmethod
def retire_user(cls, retirement):
"""
Obfuscates CreditRecord instances associated with `original_username`.
Empties the records' `parameters` field and replaces username with its
anonymized value, `retired_username`.
"""
num_updated_credit_requests = cls.objects.filter(
username=retirement.original_username
).update(
username=retirement.retired_username,
parameters={},
)
return num_updated_credit_requests > 0
@classmethod
def credit_requests_for_user(cls, username):
"""
Retrieve all credit requests for a user.
Arguments:
username (unicode): The username of the user.
Returns: list
Example Usage:
>>> CreditRequest.credit_requests_for_user("bob")
[
{
"uuid": "557168d0f7664fe59097106c67c3f847",
"timestamp": 1434631630,
"course_key": "course-v1:HogwartsX+Potions101+1T2015",
"provider": {
"id": "HogwartsX",
"display_name": "Hogwarts School of Witchcraft and Wizardry",
},
"status": "pending" # or "approved" or "rejected"
}
]
"""
return [
{
"uuid": request.uuid,
"timestamp": request.parameters.get("timestamp"),
"course_key": request.course.course_key,
"provider": {
"id": request.provider.provider_id,
"display_name": request.provider.display_name
},
"status": request.status
}
for request in cls.objects.select_related('course', 'provider').filter(username=username)
]
@classmethod
def get_user_request_status(cls, username, course_key):
"""
Returns the latest credit request of user against the given course.
Args:
username(str): The username of requesting user
course_key(CourseKey): The course identifier
Returns:
CreditRequest if any otherwise None
"""
try:
return cls.objects.filter(
username=username, course__course_key=course_key
).select_related('course', 'provider').latest()
except cls.DoesNotExist:
return None
def __str__(self):
"""Unicode representation of a credit request."""
return u"{course}, {provider}, {status}".format(
course=self.course.course_key,
provider=self.provider.provider_id,
status=self.status,
)
@python_2_unicode_compatible
class CreditConfig(ConfigurationModel):
"""
Manage credit configuration
.. no_pii:
"""
CACHE_KEY = 'credit.providers.api.data'
cache_ttl = models.PositiveIntegerField(
verbose_name=ugettext_lazy("Cache Time To Live"),
default=0,
help_text=ugettext_lazy(
"Specified in seconds. Enable caching by setting this to a value greater than 0."
)
)
@property
def is_cache_enabled(self):
"""Whether responses from the commerce API will be cached."""
return self.enabled and self.cache_ttl > 0
def __str__(self):
"""Unicode representation of the config. """
return 'Credit Configuration'
| ESOedX/edx-platform | openedx/core/djangoapps/credit/models.py | Python | agpl-3.0 | 28,155 |
# The Hazard Library
# Copyright (C) 2012-2014, GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Module :mod:`openquake.hazardlib.mfd.evenly_discretized` defines an evenly
discretized MFD.
"""
from openquake.hazardlib.mfd.base import BaseMFD
from openquake.baselib.slots import with_slots
@with_slots
class EvenlyDiscretizedMFD(BaseMFD):
"""
Evenly discretized MFD is defined as a precalculated histogram.
:param min_mag:
Positive float value representing the middle point of the first
bin in the histogram.
:param bin_width:
A positive float value -- the width of a single histogram bin.
:param occurrence_rates:
The list of non-negative float values representing the actual
annual occurrence rates. The resulting histogram has as many bins
as this list length.
"""
MODIFICATIONS = set(('set_mfd',))
_slots_ = 'min_mag bin_width occurrence_rates'.split()
def __init__(self, min_mag, bin_width, occurrence_rates):
self.min_mag = min_mag
self.bin_width = bin_width
self.occurrence_rates = occurrence_rates
self.check_constraints()
def check_constraints(self):
"""
Checks the following constraints:
* Bin width is positive.
* Occurrence rates list is not empty.
* Each number in occurrence rates list is non-negative.
* Minimum magnitude is positive.
"""
if not self.bin_width > 0:
raise ValueError('bin width must be positive')
if not self.occurrence_rates:
raise ValueError('at least one bin must be specified')
if not all(value >= 0 for value in self.occurrence_rates):
raise ValueError('all occurrence rates must not be negative')
if not any(value > 0 for value in self.occurrence_rates):
raise ValueError('at least one occurrence rate must be positive')
if not self.min_mag >= 0:
raise ValueError('minimum magnitude must be non-negative')
def get_annual_occurrence_rates(self):
"""
Returns the predefined annual occurrence rates.
"""
return [(self.min_mag + i * self.bin_width, occurrence_rate)
for i, occurrence_rate in enumerate(self.occurrence_rates)]
def get_min_max_mag(self):
"""
Returns the minumun and maximum magnitudes
"""
return self.min_mag, self.min_mag + self. bin_width * (
len(self.occurrence_rates) - 1)
def modify_set_mfd(self, min_mag, bin_width, occurrence_rates):
"""
Applies absolute modification of the MFD from the ``min_mag``,
``bin_width`` and ``occurrence_rates`` modification.
:param min_mag:
Positive float value representing the middle point of the first
bin in the histogram.
:param bin_width:
A positive float value -- the width of a single histogram bin.
:param occurrence_rates:
The list of non-negative float values representing the actual
annual occurrence rates. The resulting histogram has as many bins
as this list length.
"""
self.min_mag = min_mag
self.bin_width = bin_width
self.occurrence_rates = occurrence_rates
self.check_constraints()
| silviacanessa/oq-hazardlib | openquake/hazardlib/mfd/evenly_discretized.py | Python | agpl-3.0 | 3,967 |
"""
Configuration for the ``student`` Django application.
"""
from __future__ import absolute_import
from django.apps import AppConfig
from django.contrib.auth.signals import user_logged_in
class StudentConfig(AppConfig):
"""
Default configuration for the ``student`` application.
"""
name = 'student'
def ready(self):
from django.contrib.auth.models import update_last_login as django_update_last_login
user_logged_in.disconnect(django_update_last_login)
from .signals.receivers import update_last_login
user_logged_in.connect(update_last_login)
| procangroup/edx-platform | common/djangoapps/student/apps.py | Python | agpl-3.0 | 603 |
from django.conf.urls import patterns, include, url
from volunteers import views
urlpatterns = patterns('',
#url(r'^$', views.index, name='volunteer_index'),
#url(r'^(?P<volunteer_id>\d+)/$', views.volunteer_detail, name='volunteer_detail'),
#url(r'^AddTasks/$', views.add_tasks, name='add_tasks'),
#url(r'^(?P<volunteer_id>\d+)/edit/$', views.volunteer_edit, name='volunteer_edit'),
)
| cateee/fosdem-volunteers | volunteers/urls.py | Python | agpl-3.0 | 403 |
# Copyright 2009-2011 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
__metaclass__ = type
import doctest
import unittest
from lp.testing.layers import DatabaseFunctionalLayer
def test_suite():
suite = unittest.TestSuite()
suite.layer = DatabaseFunctionalLayer
suite.addTest(doctest.DocTestSuite('lp.app.widgets.textwidgets'))
suite.addTest(doctest.DocTestSuite('lp.app.widgets.date'))
return suite
| abramhindle/UnnaturalCodeFork | python/testdata/launchpad/lib/lp/app/widgets/tests/test_widget_doctests.py | Python | agpl-3.0 | 498 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import base64
import re
from openerp import tools
from openerp.osv import osv
from openerp.osv import fields
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
# main mako-like expression pattern
EXPRESSION_PATTERN = re.compile('(\$\{.+?\})')
class mail_compose_message(osv.TransientModel):
""" Generic message composition wizard. You may inherit from this wizard
at model and view levels to provide specific features.
The behavior of the wizard depends on the composition_mode field:
- 'reply': reply to a previous message. The wizard is pre-populated
via ``get_message_data``.
- 'comment': new post on a record. The wizard is pre-populated via
``get_record_data``
- 'mass_mail': wizard in mass mailing mode where the mail details can
contain template placeholders that will be merged with actual data
before being sent to each recipient.
"""
_name = 'mail.compose.message'
_inherit = 'mail.message'
_description = 'Email composition wizard'
_log_access = True
def default_get(self, cr, uid, fields, context=None):
""" Handle composition mode. Some details about context keys:
- comment: default mode, model and ID of a record the user comments
- default_model or active_model
- default_res_id or active_id
- reply: active_id of a message the user replies to
- default_parent_id or message_id or active_id: ID of the
mail.message we reply to
- message.res_model or default_model
- message.res_id or default_res_id
- mass_mail: model and IDs of records the user mass-mails
- active_ids: record IDs
- default_model or active_model
"""
if context is None:
context = {}
result = super(mail_compose_message, self).default_get(cr, uid, fields, context=context)
# get some important values from context
composition_mode = context.get('default_composition_mode', context.get('mail.compose.message.mode'))
model = context.get('default_model', context.get('active_model'))
res_id = context.get('default_res_id', context.get('active_id'))
message_id = context.get('default_parent_id', context.get('message_id', context.get('active_id')))
active_ids = context.get('active_ids')
# get default values according to the composition mode
if composition_mode == 'reply':
vals = self.get_message_data(cr, uid, message_id, context=context)
elif composition_mode == 'comment' and model and res_id:
vals = self.get_record_data(cr, uid, model, res_id, context=context)
elif composition_mode == 'mass_mail' and model and active_ids:
vals = {'model': model, 'res_id': res_id}
else:
vals = {'model': model, 'res_id': res_id}
if composition_mode:
vals['composition_mode'] = composition_mode
for field in vals:
if field in fields:
result[field] = vals[field]
# TDE HACK: as mailboxes used default_model='res.users' and default_res_id=uid
# (because of lack of an accessible pid), creating a message on its own
# profile may crash (res_users does not allow writing on it)
# Posting on its own profile works (res_users redirect to res_partner)
# but when creating the mail.message to create the mail.compose.message
# access rights issues may rise
# We therefore directly change the model and res_id
if result.get('model') == 'res.users' and result.get('res_id') == uid:
result['model'] = 'res.partner'
result['res_id'] = self.pool.get('res.users').browse(cr, uid, uid).partner_id.id
return result
def _get_composition_mode_selection(self, cr, uid, context=None):
return [('comment', 'Comment a document'), ('reply', 'Reply to a message'), ('mass_mail', 'Mass mailing')]
_columns = {
'composition_mode': fields.selection(
lambda s, *a, **k: s._get_composition_mode_selection(*a, **k),
string='Composition mode'),
'partner_ids': fields.many2many('res.partner',
'mail_compose_message_res_partner_rel',
'wizard_id', 'partner_id', 'Additional contacts'),
'attachment_ids': fields.many2many('ir.attachment',
'mail_compose_message_ir_attachments_rel',
'wizard_id', 'attachment_id', 'Attachments'),
'filter_id': fields.many2one('ir.filters', 'Filters'),
}
_defaults = {
'composition_mode': 'comment',
'body': lambda self, cr, uid, ctx={}: '',
'subject': lambda self, cr, uid, ctx={}: False,
'partner_ids': lambda self, cr, uid, ctx={}: [],
}
def _notify(self, cr, uid, newid, context=None):
""" Override specific notify method of mail.message, because we do
not want that feature in the wizard. """
return
def get_record_data(self, cr, uid, model, res_id, context=None):
""" Returns a defaults-like dict with initial values for the composition
wizard when sending an email related to the document record
identified by ``model`` and ``res_id``.
:param str model: model name of the document record this mail is
related to.
:param int res_id: id of the document record this mail is related to
"""
doc_name_get = self.pool.get(model).name_get(cr, uid, [res_id], context=context)
if doc_name_get:
record_name = doc_name_get[0][1]
else:
record_name = False
return {'model': model, 'res_id': res_id, 'record_name': record_name}
def get_message_data(self, cr, uid, message_id, context=None):
""" Returns a defaults-like dict with initial values for the composition
wizard when replying to the given message (e.g. including the quote
of the initial message, and the correct recipients).
:param int message_id: id of the mail.message to which the user
is replying.
"""
if not message_id:
return {}
if context is None:
context = {}
message_data = self.pool.get('mail.message').browse(cr, uid, message_id, context=context)
# create subject
re_prefix = _('Re:')
reply_subject = tools.ustr(message_data.subject or '')
if not (reply_subject.startswith('Re:') or reply_subject.startswith(re_prefix)) and message_data.subject:
reply_subject = "%s %s" % (re_prefix, reply_subject)
# get partner_ids from original message
partner_ids = [partner.id for partner in message_data.partner_ids] if message_data.partner_ids else []
partner_ids += context.get('default_partner_ids', [])
# update the result
result = {
'record_name': message_data.record_name,
'model': message_data.model,
'res_id': message_data.res_id,
'parent_id': message_data.id,
'subject': reply_subject,
'partner_ids': partner_ids,
}
return result
#------------------------------------------------------
# Wizard validation and send
#------------------------------------------------------
def send_mail(self, cr, uid, ids, context=None):
""" Process the wizard content and proceed with sending the related
email(s), rendering any template patterns on the fly if needed. """
if context is None:
context = {}
active_ids = context.get('active_ids')
is_log = context.get('mail_compose_log', False)
for wizard in self.browse(cr, uid, ids, context=context):
mass_mail_mode = wizard.composition_mode == 'mass_mail'
active_model_pool = self.pool.get(wizard.model if wizard.model else 'mail.thread')
# wizard works in batch mode: [res_id] or active_ids
res_ids = active_ids if mass_mail_mode and wizard.model and active_ids else [wizard.res_id]
for res_id in res_ids:
# default values, according to the wizard options
post_values = {
'subject': wizard.subject,
'body': wizard.body,
'parent_id': wizard.parent_id and wizard.parent_id.id,
'partner_ids': [partner.id for partner in wizard.partner_ids],
'attachments': [(attach.datas_fname or attach.name, base64.b64decode(attach.datas)) for attach in wizard.attachment_ids],
}
# mass mailing: render and override default values
if mass_mail_mode and wizard.model:
email_dict = self.render_message(cr, uid, wizard, res_id, context=context)
new_partner_ids = email_dict.pop('partner_ids', [])
post_values['partner_ids'] += new_partner_ids
new_attachments = email_dict.pop('attachments', [])
post_values['attachments'] += new_attachments
post_values.update(email_dict)
# post the message
subtype = 'mail.mt_comment'
if is_log:
subtype = False
active_model_pool.message_post(cr, uid, [res_id], type='comment', subtype=subtype, context=context, **post_values)
return {'type': 'ir.actions.act_window_close'}
def render_message(self, cr, uid, wizard, res_id, context=None):
""" Generate an email from the template for given (wizard.model, res_id)
pair. This method is meant to be inherited by email_template that
will produce a more complete dictionary. """
return {
'subject': self.render_template(cr, uid, wizard.subject, wizard.model, res_id, context),
'body': self.render_template(cr, uid, wizard.body, wizard.model, res_id, context),
}
def render_template(self, cr, uid, template, model, res_id, context=None):
""" Render the given template text, replace mako-like expressions ``${expr}``
with the result of evaluating these expressions with an evaluation context
containing:
* ``user``: browse_record of the current user
* ``object``: browse_record of the document record this mail is
related to
* ``context``: the context passed to the mail composition wizard
:param str template: the template text to render
:param str model: model name of the document record this mail is related to.
:param int res_id: id of the document record this mail is related to.
"""
if context is None:
context = {}
def merge(match):
exp = str(match.group()[2:-1]).strip()
result = eval(exp, {
'user': self.pool.get('res.users').browse(cr, uid, uid, context=context),
'object': self.pool.get(model).browse(cr, uid, res_id, context=context),
'context': dict(context), # copy context to prevent side-effects of eval
})
return result and tools.ustr(result) or ''
return template and EXPRESSION_PATTERN.sub(merge, template)
| jss-emr/openerp-7-src | openerp/addons/mail/wizard/mail_compose_message.py | Python | agpl-3.0 | 12,527 |
from django.contrib.auth.decorators import login_required
import logging
from certificates.models import GeneratedCertificate
from certificates.models import CertificateStatuses as status
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse
import json
#@begin:View certificate page use
#@date:2013-11-28
from django.shortcuts import redirect
from django_future.csrf import ensure_csrf_cookie
from mitxmako.shortcuts import render_to_response, render_to_string
from mitxmako.shortcuts import marketing_link
from util.cache import cache_if_anonymous
#@end
from courseware.courses import (get_courses, get_course_with_access,
get_courses_by_university, sort_by_announcement)
from courseware.model_data import FieldDataCache
import datetime
from reportlab.pdfgen import canvas
from reportlab.pdfbase import pdfmetrics,ttfonts
import os
from io import BytesIO
import urllib
from django.contrib.auth.decorators import login_required
from django.core.context_processors import csrf
from django.views.decorators.cache import cache_control
from administration.models import Author,CertificateAssociationType,Certificate
import cStringIO as StringIO
from xhtml2pdf import pisa
from django.http import HttpResponse
from cgi import escape
#@begin:get course-time for certificate
#@date:2016-05-04
from student.models import (Registration, UserProfile, TestCenterUser, TestCenterUserForm,
TestCenterRegistration, TestCenterRegistrationForm,
PendingNameChange, PendingEmailChange,
CourseEnrollment, unique_id_for_user,
get_testcenter_registration, CourseEnrollmentAllowed)
from study_time.models import record_time_store
from xmodule.course_module import CourseDescriptor
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.modulestore.django import modulestore
from django.contrib.auth.models import User
#@end
#@begin:change the Total Course Time
#@date:2016-06-21
from reporting.models import reporting_store
#@end
#logger = logging.getLogger(__name__)
log = logging.getLogger("tracking")
@csrf_exempt
def update_certificate(request):
"""
Will update GeneratedCertificate for a new certificate or
modify an existing certificate entry.
See models.py for a state diagram of certificate states
This view should only ever be accessed by the xqueue server
"""
if request.method == "POST":
xqueue_body = json.loads(request.POST.get('xqueue_body'))
xqueue_header = json.loads(request.POST.get('xqueue_header'))
try:
cert = GeneratedCertificate.objects.get(
user__username=xqueue_body['username'],
course_id=xqueue_body['course_id'],
key=xqueue_header['lms_key'])
except GeneratedCertificate.DoesNotExist:
logger.critical('Unable to lookup certificate\n'
'xqueue_body: {0}\n'
'xqueue_header: {1}'.format(
xqueue_body, xqueue_header))
return HttpResponse(json.dumps({
'return_code': 1,
'content': 'unable to lookup key'}),
mimetype='application/json')
if 'error' in xqueue_body:
cert.status = status.error
if 'error_reason' in xqueue_body:
# Hopefully we will record a meaningful error
# here if something bad happened during the
# certificate generation process
#
# example:
# (aamorm BerkeleyX/CS169.1x/2012_Fall)
# <class 'simples3.bucket.S3Error'>:
# HTTP error (reason=error(32, 'Broken pipe'), filename=None) :
# certificate_agent.py:175
cert.error_reason = xqueue_body['error_reason']
else:
if cert.status in [status.generating, status.regenerating]:
cert.download_uuid = xqueue_body['download_uuid']
cert.verify_uuid = xqueue_body['verify_uuid']
cert.download_url = xqueue_body['url']
cert.status = status.downloadable
elif cert.status in [status.deleting]:
cert.status = status.deleted
else:
logger.critical('Invalid state for cert update: {0}'.format(
cert.status))
return HttpResponse(json.dumps({
'return_code': 1,
'content': 'invalid cert status'}),
mimetype='application/json')
cert.save()
return HttpResponse(json.dumps({'return_code': 0}),
mimetype='application/json')
#@begin:View certificate page
#@date:2013-11-28
def course_from_id(course_id):
"""Return the CourseDescriptor corresponding to this course_id"""
course_loc = CourseDescriptor.id_to_location(course_id)
return modulestore().get_instance(course_id, course_loc)
'''
@login_required
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def download_certificate(request,course_id,completed_time):
certificate_type=[['WestEd','PCG Education','A.L.L.','Understanding Language Initiative at Stanford'],['CT Core Standards']]
user_id = request.user.id
user_course = get_course_with_access(user_id, course_id, 'load')
t_time = ""
t_y = ""
t_m = ""
t_d = ""
changdu = len(completed_time)
if changdu > 9:
t_time = completed_time.split('-')
t_y = int(t_time[0])
t_m = int(t_time[1])
t_d = int(t_time[2])
c_completed_time = datetime.date(t_y,t_m,t_d).strftime("%B %d, %Y ")
####### Ancestor
user_id_temp = user_id + 15
temp1 = '821bf6753e09qx4'
temp2 = '103md94e157wf62a9'
user_id_string = '%d' %user_id_temp
pdf_filename = temp1 + user_id_string + temp2 + '.pdf'
# Create the HttpResponse object with the appropriate PDF headers.
response = HttpResponse(content_type='application/pdf')
#response['Content-Disposition'] = 'attachment; filename="certificate.pdf"' #directly download pdf
buffer = BytesIO()
# Create the PDF object, using the BytesIO object as its "file." Paper size = 'A4'
c = canvas.Canvas(buffer,pagesize=(841.89,595.27))
for i in range(len(certificate_type)):
if user_course.display_organization in certificate_type[i]:
if i==0:
pdf=draw_certificate_default(request,user_course,c_completed_time,buffer,c)
else:
pdf=draw_certificate_CT(buffer,c)
response.write(pdf)
return response
'''
@login_required
def course_credits(request):
return render_to_response('course_credits.html', {})
@login_required
def course_credits_tcsj(request):
return render_to_response('course_credits_tcsj.html', {})
@login_required
def course_credits_baker_univesity(request):
return render_to_response('course_credits_baker_univesity.html', {})
@ensure_csrf_cookie
@cache_if_anonymous
def download_certificate_demo(request):
return render_to_response('d_certificate_demo.html', {})
#@end
'''
def draw_certificate_default(request, user_course, c_completed_time, buffer, c):
first_name = request.user.first_name
last_name = request.user.last_name
c_course_name = user_course.display_name_with_default
c_user_name = first_name + ' ' +last_name
c_organization = ''
c_estimated_effort = ''
c_course_full_name = user_course.display_number_with_default + " " + c_course_name #course name
fontpath = '/home/tahoe/edx_all/edx-platform/lms/static/fonts'
imagepath = '/home/tahoe/edx_all/edx-platform/lms/static/images/certificate'
pdfmetrics.registerFont(ttfonts.TTFont('Open Sans',os.path.join(fontpath, 'OpenSans-Regular-webfont.ttf')))
pdfmetrics.registerFont(ttfonts.TTFont('OpenSans_i',os.path.join(fontpath, 'OpenSans-Italic-webfont.ttf')))
pdfmetrics.registerFont(ttfonts.TTFont('OpenSans_b',os.path.join(fontpath, 'OpenSans-Bold-webfont.ttf')))
pdfmetrics.registerFont(ttfonts.TTFont('Nunito',os.path.join(fontpath, 'Nunito-Regular.ttf')))
fontsize_completedtime = 15
fontsize_maincontent = 20
fontsize_username = 45
fontsize_coursename = 25
fontsize_effort = 21
if user_course.display_number_with_default == "PEP101x":
c_organization = 'PCG Education'
c.drawImage(imagepath+"/certificate_pcg.jpg",0,0, width=841.89,height=595.27,mask=None)
c_estimated_effort = user_course.certificates_estimated_effort
else:
c_organization = 'WestEd'
c.drawImage(imagepath+"/certificate_wested.jpg",0,0, width=841.89,height=595.27,mask=None)
c_estimated_effort = user_course.certificates_estimated_effort
c.drawImage(imagepath+"/qianzi.jpg",360,50, width=None,height=None,mask=None)
#c.drawImage(imagepath+"/pcgeducationdown_logo.jpg",590,75, width=None,height=None,mask=None)
c.setFillColorRGB(0.5,0.5,0.5)
c.setFont("Open Sans", 25)
c.drawString(642,490,"CERTIFICATE")
c.setFont("OpenSans_i", fontsize_completedtime)
c.drawString(652,468,c_completed_time)
c.setFont("Open Sans", fontsize_maincontent)
c.drawString(50,400,'This is to certify that')
c.drawString(50,313,'Successfully completed')
c.drawString(50,230,'a course of study offered by ')
c.setFont("OpenSans_b", fontsize_maincontent)
c.drawString(315,230,c_organization)
if user_course.display_number_with_default == "PEP101x":
c.setFont("Open Sans", fontsize_maincontent)
c.drawString(460,230,', a partner in ')
c.setFont("OpenSans_b", fontsize_maincontent)
c.drawString(584,230,'Pepper')
c.setFont("Open Sans", fontsize_maincontent)
c.drawString(655,230,', an online')
c.setFont("Open Sans", fontsize_maincontent)
c.drawString(50,205,'learning initiative for ')
c.setFont("OpenSans_b", fontsize_maincontent)
c.drawString(247,205,'Common Core Specialists')
else:
c.setFont("Open Sans", fontsize_maincontent)
c.drawString(389,230,', a partner in ')
c.setFont("OpenSans_b", fontsize_maincontent)
c.drawString(514,230,'Pepper')
c.setFont("Open Sans", fontsize_maincontent)
c.drawString(585,230,', an online learning')
c.setFont("Open Sans", fontsize_maincontent)
c.drawString(50,205,'initiative for ')
c.setFont("OpenSans_b", fontsize_maincontent)
c.drawString(165,205,'Common Core Specialists')
c.setFont("Open Sans", fontsize_effort)
c.drawString(50,50,'Estimated Effort: ' + c_estimated_effort)
c.setFillColorRGB(0,0.5,0.85)
if len(c_user_name)<=25:
fontsize_username = 45
c.setFont("Nunito", fontsize_username)
c.drawString(50,348,c_user_name)
elif len(c_user_name)<=40:
fontsize_username = 37
c.setFont("Nunito", fontsize_username)
c.drawString(50,352,c_user_name)
else:
fontsize_username = 24
c.setFont("Nunito", fontsize_username)
c.drawString(50,356,c_user_name)
c.setFont("Nunito", fontsize_coursename)
c.drawString(50,270,c_course_full_name)
c.showPage()
c.save()
# Get the value of the BytesIO buffer and write it to the response.
pdf = buffer.getvalue()
buffer.close()
return pdf
'''
def draw_certificate_default(request, user_course, c_completed_time, buffer, c):
first_name = request.user.first_name
last_name = request.user.last_name
c_course_name = user_course.display_name_with_default
c_user_name = first_name + ' ' +last_name
c_organization = ''
c_estimated_effort = ''
c_course_full_name = user_course.display_number_with_default + " " + c_course_name #course name
fontpath = '/home/tahoe/edx_all/edx-platform/lms/static/fonts'
imagepath = '/home/tahoe/edx_all/edx-platform/lms/static/images/certificate'
pdfmetrics.registerFont(ttfonts.TTFont('Open Sans',os.path.join(fontpath, 'OpenSans-Regular-webfont.ttf')))
pdfmetrics.registerFont(ttfonts.TTFont('OpenSans_i',os.path.join(fontpath, 'OpenSans-Italic-webfont.ttf')))
pdfmetrics.registerFont(ttfonts.TTFont('OpenSans_b',os.path.join(fontpath, 'OpenSans-Bold-webfont.ttf')))
pdfmetrics.registerFont(ttfonts.TTFont('Nunito',os.path.join(fontpath, 'Nunito-Regular.ttf')))
fontsize_completedtime = 15
fontsize_maincontent = 20
fontsize_username = 45
fontsize_coursename = 25
fontsize_effort = 21
if user_course.display_organization == "PCG Education":
c_organization = 'PCG Education'
c.drawImage(imagepath+"/certificate_pcg.jpg",0,0, width=841.89,height=595.27,mask=None)
c_estimated_effort = user_course.certificates_estimated_effort
elif user_course.display_organization=='WestEd':
c_organization = 'WestEd'
c.drawImage(imagepath+"/certificate_wested.jpg",0,0, width=841.89,height=595.27,mask=None)
c_estimated_effort = user_course.certificates_estimated_effort
elif user_course.display_organization=='A.L.L.':
c_organization = 'Accelerated Literacy Learning'
c.drawImage(imagepath+"/certificate_A.L.L.jpg",0,0, width=841.89,height=595.27,mask=None)
c_estimated_effort = user_course.certificates_estimated_effort
elif user_course.display_organization=='Understanding Language Initiative at Stanford':
c_organization = 'Understanding Language Initiative at Stanford'
c.drawImage(imagepath+"/certificate_ULStanford.jpg",0,0, width=841.89,height=595.27,mask=None)
c_estimated_effort = user_course.certificates_estimated_effort
c.drawImage(imagepath+"/qianzi.jpg",360,50, width=None,height=None,mask=None)
#c.drawImage(imagepath+"/pcgeducationdown_logo.jpg",590,75, width=None,height=None,mask=None)
c.setFillColorRGB(0.5,0.5,0.5)
c.setFont("Open Sans", 25)
c.drawString(642,490,"CERTIFICATE")
c.setFont("OpenSans_i", fontsize_completedtime)
c.drawString(652,468,c_completed_time)
c.setFont("Open Sans", fontsize_maincontent)
c.drawString(50,400,'This is to certify that')
c.drawString(50,313,'Successfully completed')
if user_course.display_organization == "PCG Education":
c.drawString(50,230,'a course of study offered by ')
c.setFont("OpenSans_b", fontsize_maincontent)
c.drawString(315,230,c_organization)
c.setFont("Open Sans", fontsize_maincontent)
c.drawString(460,230,', a partner in ')
c.setFont("OpenSans_b", fontsize_maincontent)
c.drawString(584,230,'Pepper')
c.setFont("Open Sans", fontsize_maincontent)
c.drawString(655,230,', an online')
c.setFont("Open Sans", fontsize_maincontent)
c.drawString(50,205,'learning community.')
elif user_course.display_organization=='WestEd':
c.drawString(50,230,'a course of study offered by ')
c.setFont("OpenSans_b", fontsize_maincontent)
c.drawString(315,230,c_organization)
c.setFont("Open Sans", fontsize_maincontent)
c.drawString(389,230,', a partner in ')
c.setFont("OpenSans_b", fontsize_maincontent)
c.drawString(514,230,'Pepper')
c.setFont("Open Sans", fontsize_maincontent)
c.drawString(585,230,', an online learning')
c.setFont("Open Sans", fontsize_maincontent)
c.drawString(50,205,'initiative for ')
c.setFont("OpenSans_b", fontsize_maincontent)
c.drawString(165,205,'Common Core Specialists')
elif user_course.display_organization=='A.L.L.':
c.drawString(50,230,'a course of study offered by ')
c.setFont("OpenSans_b", fontsize_maincontent)
c.drawString(315,230,c_organization)
c.setFont("Open Sans", fontsize_maincontent)
c.drawString(613,230,', a partner in ')
c.setFont("OpenSans_b", fontsize_maincontent)
c.drawString(737,230,'Pepper')
c.setFont("Open Sans", fontsize_maincontent)
c.drawString(807,230,',')
c.drawString(50,205,'an online')
c.setFont("Open Sans", fontsize_maincontent)
c.drawString(144,205,'learning community.')
elif user_course.display_organization=='Understanding Language Initiative at Stanford':
c.drawString(50,230,'a course which is part of Pepper, an online learning community. Content for this ')
c.drawString(50,205,'course was created by the ')
c.setFont("OpenSans_b", fontsize_maincontent)
c.drawString(302,205,c_organization)
c.drawString(50,180,"University.")
c.setFont("Open Sans", fontsize_effort)
c.drawString(50,50,'Estimated Effort: ' + c_estimated_effort)
c.setFillColorRGB(0,0.5,0.85)
if len(c_user_name)<=25:
fontsize_username = 45
c.setFont("Nunito", fontsize_username)
c.drawString(50,348,c_user_name)
elif len(c_user_name)<=40:
fontsize_username = 37
c.setFont("Nunito", fontsize_username)
c.drawString(50,352,c_user_name)
else:
fontsize_username = 24
c.setFont("Nunito", fontsize_username)
c.drawString(50,356,c_user_name)
c.setFont("Nunito", fontsize_coursename)
c.drawString(50,270,c_course_full_name)
c.showPage()
c.save()
# Get the value of the BytesIO buffer and write it to the response.
pdf = buffer.getvalue()
buffer.close()
return pdf
def draw_certificate_CT(buffer, c):
fontpath = '/home/tahoe/edx_all/edx-platform/lms/static/fonts'
imagepath = '/home/tahoe/edx_all/edx-platform/lms/static/images/certificate'
pdfmetrics.registerFont(ttfonts.TTFont('Nunito',os.path.join(fontpath, 'Nunito-Regular.ttf')))
c.drawImage(imagepath+"/blank_certificate.jpg",0,0, width=841.89,height=595.27,mask=None)
fontsize = 40
c.setFillColorRGB(0,0.5,0.85)
c.setFont("Nunito",fontsize )
c.drawString(50,328,'Thank you for completing this module!')
c.showPage()
c.save()
pdf = buffer.getvalue()
buffer.close()
return pdf
# new certificate------------------------------------------------
@ensure_csrf_cookie
@cache_if_anonymous
def certificate_preview(request):
return render_to_response('certificate_preview.html', {})
def getCertificateBlob(request,organization):
certificate = Certificate.objects.filter(association_type__name='School',association=request.user.profile.school_id)
if len(certificate) > 0:
return certificate[0].certificate_blob
certificate = Certificate.objects.filter(association_type__name='District',association=request.user.profile.district.id)
if len(certificate) > 0:
return certificate[0].certificate_blob
author = Author.objects.filter(name=organization)
if len(author) > 0:
certificate = Certificate.objects.filter(association_type__name='Author',association=author[0].id)
if len(certificate) > 0:
return certificate[0].certificate_blob
return ''
@login_required
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def download_certificate(request, course_id, completed_time):
user_id = request.user.id
user_course = get_course_with_access(user_id, course_id, 'load')
first_name = request.user.first_name
last_name = request.user.last_name
estimated_effort = user_course.certificates_estimated_effort
#@begin:get course-time for certificate
#@date:2016-05-04
#all_course_time = get_allcoursetime(user_id, course_id)
all_course_time = get_total_course_time(user_id, course_id) #2016-06-21 change the Total Course Time
log.debug("all_course_time------------------")
log.debug(all_course_time)
estimated_effort_list = estimated_effort.split()
estimated_effort_new_list = []
for el in estimated_effort_list:
estimated_effort_new_list.append(el.capitalize())
estimated_effort_Upper = ' '.join(estimated_effort_new_list)
#@end
course_name = user_course.display_name_with_default
# user_name = first_name + ' ' + last_name
course_full_name = user_course.display_number_with_default + " " + course_name
completed_time = datetime.datetime.strptime(completed_time, '%Y-%m-%d').strftime('%B %d, %Y ')
blob = urllib.unquote(getCertificateBlob(request, user_course.display_organization).decode('utf8').encode('utf8'))
output_error = ''
try:
blob = blob.format(
firstname=first_name,
lastname=last_name,
coursename=course_name,
coursenumber=course_full_name,
date=completed_time,
hours=estimated_effort_Upper,
recordedhours=all_course_time)
except KeyError, e:
output_error = 'The placeholder {0} does not exist.'.format(str(e))
# return render_to_response('download_certificate.html', {'content': blob, 'outputError': output_error})
context_dict = {
'content': blob,
'outputError': output_error,
}
html = render_to_string('download_certificate.html', context_dict)
result = StringIO.StringIO()
pdf = pisa.CreatePDF(StringIO.StringIO(html.encode("UTF-8")), result, encoding="UTF-8")
if not pdf.err:
return HttpResponse(result.getvalue(), content_type='application/pdf')
return HttpResponse('There was an error when generating your certificate: <pre>%s</pre>' % escape(html))
#@begin:change the Total Course Time
#@date:2016-06-21
def get_total_course_time(user_id, course_id):
total_course_time = 0
rs = reporting_store()
rs.set_collection('UserCourseView')
results = rs.collection.find({"user_id":user_id,"course_id":course_id},{"_id":0,"total_time":1})
total_time_user = 0
for v in results:
total_time_user = total_time_user + v['total_time']
total_course_time = recorded_time_format(total_time_user)
return total_course_time
#@end
#@begin:get course-time for certificate
#@date:2016-05-04
def get_allcoursetime(user_id, course_id):
user = User.objects.get(id=str(user_id))
course_time = 0
external_time = 0
rts = record_time_store()
all_course_time = 0
all_course_time_unit = ""
try:
c = course_from_id(course_id)
external_time = rts.get_external_time(str(user.id), c.id)
except ItemNotFoundError:
log.error("User {0} enrolled in non-existent course {1}"
.format(user.username, course_id))
course_time = rts.get_course_time(str(user.id), course_id, 'courseware')
all_course_time = course_time + external_time
all_course_time_unit = recorded_time_format(all_course_time)
return all_course_time_unit
def study_time_format(t, is_sign=False):
sign = ''
if t < 0 and is_sign:
sign = '-'
t = abs(t)
hour_unit = ' Hour, '
minute_unit = ' Minute'
hour = int(t / 60 / 60)
minute = int(t / 60 % 60)
if hour != 1:
hour_unit = ' Hours, '
if minute != 1:
minute_unit = 'Minutes'
if hour > 0:
hour_full = str(hour) + hour_unit
else:
hour_full = ''
return ('{0}{1} {2} {3}').format(sign, hour_full, minute, minute_unit)
def recorded_time_format(t, is_sign=False):
sign = ''
if t < 0 and is_sign:
sign = '-'
t = abs(t)
hour_unit = ' Hour '
minute_unit = ' Minute'
hour = int(t / 60 / 60)
minute = int(t / 60 % 60)
if hour != 1:
hour_unit = ' Hours '
if minute != 1:
minute_unit = 'Minutes'
if hour > 0:
hour_full = str(hour) + hour_unit
else:
hour_full = ''
return ('{0}{1} {2} {3}').format(sign, hour_full, minute, minute_unit)
#@end
| EduPepperPDTesting/pepper2013-testing | lms/djangoapps/certificates/views.py | Python | agpl-3.0 | 23,890 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-06 13:24
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('badges', '0003_badgedesign_bg_color'),
]
operations = [
migrations.AlterField(
model_name='badgedesign',
name='bg_color',
field=models.CharField(default='#FFFFFF', help_text='E.g. #00ff00', max_length=7, validators=[django.core.validators.RegexValidator('^#[a-fA-F0-9]{6}$')], verbose_name='Background color'),
),
]
| helfertool/helfertool | src/badges/migrations/0004_auto_20160306_1424.py | Python | agpl-3.0 | 638 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Search Architecture:
- Have a list of accounts
- Create an "overseer" thread
- Search Overseer:
- Tracks incoming new location values
- Tracks "paused state"
- During pause or new location will clears current search queue
- Starts search_worker threads
- Search Worker Threads each:
- Have a unique API login
- Listens to the same Queue for areas to scan
- Can re-login as needed
- Pushes finds to db queue and webhook queue
'''
import logging
import math
import os
import sys
import traceback
import random
import time
import copy
import requests
import schedulers
import terminalsize
import timeit
from datetime import datetime
from threading import Thread, Lock
from queue import Queue, Empty
from sets import Set
from collections import deque
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
from distutils.version import StrictVersion
from pgoapi.utilities import f2i
from pgoapi import utilities as util
from pgoapi.hash_server import (HashServer, BadHashRequestException,
HashingOfflineException)
from .models import (parse_map, GymDetails, parse_gyms, MainWorker,
WorkerStatus, HashKeys)
from .utils import now, clear_dict_response
from .transform import get_new_coords, jitter_location
from .account import (setup_api, check_login, get_tutorial_state,
complete_tutorial, AccountSet, parse_new_timestamp_ms)
from .captcha import captcha_overseer_thread, handle_captcha
from .proxy import get_new_proxy
log = logging.getLogger(__name__)
loginDelayLock = Lock()
# Thread to handle user input.
def switch_status_printer(display_type, current_page, mainlog,
loglevel, logmode):
# Disable logging of the first handler - the stream handler, and disable
# it's output.
if (logmode != 'logs'):
mainlog.handlers[0].setLevel(logging.CRITICAL)
while True:
# Wait for the user to press a key.
command = raw_input()
if command == '':
# Switch between logging and display.
if display_type[0] != 'logs':
# Disable display, enable on screen logging.
mainlog.handlers[0].setLevel(loglevel)
display_type[0] = 'logs'
# If logs are going slowly, sometimes it's hard to tell you
# switched. Make it clear.
print 'Showing logs...'
elif display_type[0] == 'logs':
# Enable display, disable on screen logging (except for
# critical messages).
mainlog.handlers[0].setLevel(logging.CRITICAL)
display_type[0] = 'workers'
elif command.isdigit():
current_page[0] = int(command)
mainlog.handlers[0].setLevel(logging.CRITICAL)
display_type[0] = 'workers'
elif command.lower() == 'f':
mainlog.handlers[0].setLevel(logging.CRITICAL)
display_type[0] = 'failedaccounts'
elif command.lower() == 'h':
mainlog.handlers[0].setLevel(logging.CRITICAL)
display_type[0] = 'hashstatus'
# Thread to print out the status of each worker.
def status_printer(threadStatus, search_items_queue_array, db_updates_queue,
wh_queue, account_queue, account_failures, account_captchas,
logmode, hash_key, key_scheduler):
if (logmode == 'logs'):
display_type = ['logs']
else:
display_type = ['workers']
current_page = [1]
# Grab current log / level.
mainlog = logging.getLogger()
loglevel = mainlog.getEffectiveLevel()
# Start another thread to get user input.
t = Thread(target=switch_status_printer,
name='switch_status_printer',
args=(display_type, current_page, mainlog, loglevel, logmode))
t.daemon = True
t.start()
while True:
time.sleep(1)
if display_type[0] == 'logs':
# In log display mode, we don't want to show anything.
continue
# Create a list to hold all the status lines, so they can be printed
# all at once to reduce flicker.
status_text = []
if display_type[0] == 'workers':
# Get the terminal size.
width, height = terminalsize.get_terminal_size()
# Queue and overseer take 2 lines. Switch message takes up 2
# lines. Remove an extra 2 for things like screen status lines.
usable_height = height - 6
# Prevent people running terminals only 6 lines high from getting a
# divide by zero.
if usable_height < 1:
usable_height = 1
# Print the queue length.
search_items_queue_size = 0
for i in range(0, len(search_items_queue_array)):
search_items_queue_size += search_items_queue_array[i].qsize()
skip_total = threadStatus['Overseer']['skip_total']
status_text.append((
'Queues: {} search items, {} db updates, {} webhook. ' +
'Total skipped items: {}. Spare accounts available: {}. ' +
'Accounts on hold: {}. Accounts with captcha: {}').format(
search_items_queue_size, db_updates_queue.qsize(),
wh_queue.qsize(), skip_total, account_queue.qsize(),
len(account_failures), len(account_captchas)))
# Print status of overseer.
status_text.append('{} Overseer: {}'.format(
threadStatus['Overseer']['scheduler'],
threadStatus['Overseer']['message']))
# Calculate the total number of pages. Subtracting for the
# overseer.
overseer_line_count = (
threadStatus['Overseer']['message'].count('\n'))
total_pages = math.ceil(
(len(threadStatus) - 1 - overseer_line_count) /
float(usable_height))
# Prevent moving outside the valid range of pages.
if current_page[0] > total_pages:
current_page[0] = total_pages
if current_page[0] < 1:
current_page[0] = 1
# Calculate which lines to print.
start_line = usable_height * (current_page[0] - 1)
end_line = start_line + usable_height
current_line = 1
# Find the longest username and proxy.
userlen = 4
proxylen = 5
for item in threadStatus:
if threadStatus[item]['type'] == 'Worker':
userlen = max(userlen, len(threadStatus[item]['username']))
if 'proxy_display' in threadStatus[item]:
proxylen = max(proxylen, len(
str(threadStatus[item]['proxy_display'])))
# How pretty.
status = '{:10} | {:5} | {:' + str(userlen) + '} | {:' + str(
proxylen) + '} | {:7} | {:6} | {:5} | {:7} | {:8} | {:10}'
# Print the worker status.
status_text.append(status.format('Worker ID', 'Start', 'User',
'Proxy', 'Success', 'Failed',
'Empty', 'Skipped', 'Captchas',
'Message'))
for item in sorted(threadStatus):
if(threadStatus[item]['type'] == 'Worker'):
current_line += 1
# Skip over items that don't belong on this page.
if current_line < start_line:
continue
if current_line > end_line:
break
status_text.append(status.format(
item,
time.strftime('%H:%M',
time.localtime(
threadStatus[item]['starttime'])),
threadStatus[item]['username'],
threadStatus[item]['proxy_display'],
threadStatus[item]['success'],
threadStatus[item]['fail'],
threadStatus[item]['noitems'],
threadStatus[item]['skip'],
threadStatus[item]['captcha'],
threadStatus[item]['message']))
elif display_type[0] == 'failedaccounts':
status_text.append('-----------------------------------------')
status_text.append('Accounts on hold:')
status_text.append('-----------------------------------------')
# Find the longest account name.
userlen = 4
for account in account_failures:
userlen = max(userlen, len(account['account']['username']))
status = '{:' + str(userlen) + '} | {:10} | {:20}'
status_text.append(status.format('User', 'Hold Time', 'Reason'))
for account in account_failures:
status_text.append(status.format(
account['account']['username'],
time.strftime('%H:%M:%S',
time.localtime(account['last_fail_time'])),
account['reason']))
elif display_type[0] == 'hashstatus':
status_text.append(
'----------------------------------------------------------')
status_text.append('Hash key status:')
status_text.append(
'----------------------------------------------------------')
status = '{:21} | {:9} | {:9} | {:9}'
status_text.append(status.format('Key', 'Remaining', 'Maximum',
'Peak'))
if hash_key is not None:
for key in hash_key:
key_instance = key_scheduler.keys[key]
key_text = key
if key_scheduler.current() == key:
key_text += '*'
status_text.append(status.format(
key_text,
key_instance['remaining'],
key_instance['maximum'],
key_instance['peak']))
# Print the status_text for the current screen.
status_text.append((
'Page {}/{}. Page number to switch pages. F to show on hold ' +
'accounts. H to show hash status. <ENTER> alone to switch ' +
'between status and log view').format(current_page[0],
total_pages))
# Clear the screen.
os.system('cls' if os.name == 'nt' else 'clear')
# Print status.
print '\n'.join(status_text)
# The account recycler monitors failed accounts and places them back in the
# account queue 2 hours after they failed.
# This allows accounts that were soft banned to be retried after giving
# them a chance to cool down.
def account_recycler(args, accounts_queue, account_failures):
while True:
# Run once a minute.
time.sleep(60)
log.info('Account recycler running. Checking status of %d accounts.',
len(account_failures))
# Create a new copy of the failure list to search through, so we can
# iterate through it without it changing.
failed_temp = list(account_failures)
# Search through the list for any item that last failed before
# -ari/--account-rest-interval seconds.
ok_time = now() - args.account_rest_interval
for a in failed_temp:
if a['last_fail_time'] <= ok_time:
# Remove the account from the real list, and add to the account
# queue.
log.info('Account {} returning to active duty.'.format(
a['account']['username']))
account_failures.remove(a)
accounts_queue.put(a['account'])
else:
if 'notified' not in a:
log.info((
'Account {} needs to cool off for {} minutes due ' +
'to {}.').format(
a['account']['username'],
round((a['last_fail_time'] - ok_time) / 60, 0),
a['reason']))
a['notified'] = True
def worker_status_db_thread(threads_status, name, db_updates_queue):
while True:
workers = {}
overseer = None
for status in threads_status.values():
if status['type'] == 'Overseer':
overseer = {
'worker_name': name,
'message': status['message'],
'method': status['scheduler'],
'last_modified': datetime.utcnow(),
'accounts_working': status['active_accounts'],
'accounts_captcha': status['accounts_captcha'],
'accounts_failed': status['accounts_failed']
}
elif status['type'] == 'Worker':
workers[status['username']] = WorkerStatus.db_format(
status, name)
if overseer is not None:
db_updates_queue.put((MainWorker, {0: overseer}))
db_updates_queue.put((WorkerStatus, workers))
time.sleep(3)
# The main search loop that keeps an eye on the over all process.
def search_overseer_thread(args, new_location_queue, pause_bit, heartb,
db_updates_queue, wh_queue):
log.info('Search overseer starting...')
search_items_queue_array = []
scheduler_array = []
account_queue = Queue()
account_sets = AccountSet(args.hlvl_kph)
threadStatus = {}
key_scheduler = None
api_check_time = 0
hashkeys_last_upsert = timeit.default_timer()
hashkeys_upsert_min_delay = 5.0
'''
Create a queue of accounts for workers to pull from. When a worker has
failed too many times, it can get a new account from the queue and
reinitialize the API. Workers should return accounts to the queue so
they can be tried again later, but must wait a bit before doing do so
to prevent accounts from being cycled through too quickly.
'''
for i, account in enumerate(args.accounts):
account_queue.put(account)
'''
Create sets of special case accounts.
Currently limited to L30+ IV/CP scanning.
'''
account_sets.create_set('30', args.accounts_L30)
# Debug.
log.info('Added %s accounts to the L30 pool.', len(args.accounts_L30))
# Create a list for failed accounts.
account_failures = []
# Create a double-ended queue for captcha'd accounts
account_captchas = deque()
threadStatus['Overseer'] = {
'message': 'Initializing',
'type': 'Overseer',
'starttime': now(),
'accounts_captcha': 0,
'accounts_failed': 0,
'active_accounts': 0,
'skip_total': 0,
'captcha_total': 0,
'success_total': 0,
'fail_total': 0,
'empty_total': 0,
'scheduler': args.scheduler,
'scheduler_status': {'tth_found': 0}
}
# Create the key scheduler.
if args.hash_key:
log.info('Enabling hashing key scheduler...')
key_scheduler = schedulers.KeyScheduler(args.hash_key,
db_updates_queue)
if(args.print_status):
log.info('Starting status printer thread...')
t = Thread(target=status_printer,
name='status_printer',
args=(threadStatus, search_items_queue_array,
db_updates_queue, wh_queue, account_queue,
account_failures, account_captchas,
args.print_status, args.hash_key,
key_scheduler))
t.daemon = True
t.start()
# Create account recycler thread.
log.info('Starting account recycler thread...')
t = Thread(target=account_recycler, name='account-recycler',
args=(args, account_queue, account_failures))
t.daemon = True
t.start()
# Create captcha overseer thread.
if args.captcha_solving:
log.info('Starting captcha overseer thread...')
t = Thread(target=captcha_overseer_thread, name='captcha-overseer',
args=(args, account_queue, account_captchas, key_scheduler,
wh_queue))
t.daemon = True
t.start()
if args.status_name is not None:
log.info('Starting status database thread...')
t = Thread(target=worker_status_db_thread,
name='status_worker_db',
args=(threadStatus, args.status_name, db_updates_queue))
t.daemon = True
t.start()
# Create specified number of search_worker_thread.
log.info('Starting search worker threads...')
log.info('Configured scheduler is %s.', args.scheduler)
for i in range(0, args.workers):
log.debug('Starting search worker thread %d...', i)
if i == 0 or (args.beehive and i % args.workers_per_hive == 0):
search_items_queue = Queue()
# Create the appropriate type of scheduler to handle the search
# queue.
scheduler = schedulers.SchedulerFactory.get_scheduler(
args.scheduler, [search_items_queue], threadStatus, args)
scheduler_array.append(scheduler)
search_items_queue_array.append(search_items_queue)
# Set proxy for each worker, using round robin.
proxy_display = 'No'
proxy_url = False # Will be assigned inside a search thread.
workerId = 'Worker {:03}'.format(i)
threadStatus[workerId] = {
'type': 'Worker',
'message': 'Creating thread...',
'success': 0,
'fail': 0,
'noitems': 0,
'skip': 0,
'captcha': 0,
'username': '',
'proxy_display': proxy_display,
'proxy_url': proxy_url,
}
t = Thread(target=search_worker_thread,
name='search-worker-{}'.format(i),
args=(args, account_queue, account_sets,
account_failures, account_captchas,
search_items_queue, pause_bit,
threadStatus[workerId], db_updates_queue,
wh_queue, scheduler, key_scheduler))
t.daemon = True
t.start()
if not args.no_version_check:
log.info('Enabling new API force Watchdog.')
# A place to track the current location.
current_location = False
# Keep track of the last status for accounts so we can calculate
# what have changed since the last check
last_account_status = {}
stats_timer = 0
# The real work starts here but will halt on pause_bit.set().
while True:
if (args.hash_key is not None and
(hashkeys_last_upsert + hashkeys_upsert_min_delay)
<= timeit.default_timer()):
upsertKeys(args.hash_key, key_scheduler, db_updates_queue)
hashkeys_last_upsert = timeit.default_timer()
odt_triggered = (args.on_demand_timeout > 0 and
(now() - args.on_demand_timeout) > heartb[0])
if odt_triggered:
pause_bit.set()
log.info('Searching paused due to inactivity...')
# Wait here while scanning is paused.
while pause_bit.is_set():
for i in range(0, len(scheduler_array)):
scheduler_array[i].scanning_paused()
# API Watchdog - Continue to check API version.
if not args.no_version_check and not odt_triggered:
api_check_time = check_forced_version(
args, api_check_time, pause_bit)
time.sleep(1)
# If a new location has been passed to us, get the most recent one.
if not new_location_queue.empty():
log.info('New location caught, moving search grid.')
try:
while True:
current_location = new_location_queue.get_nowait()
except Empty:
pass
step_distance = 0.45 if args.no_pokemon else 0.07
locations = generate_hive_locations(
current_location, step_distance,
args.step_limit, len(scheduler_array))
for i in range(0, len(scheduler_array)):
scheduler_array[i].location_changed(locations[i],
db_updates_queue)
# If there are no search_items_queue either the loop has finished or
# it's been cleared above. Either way, time to fill it back up.
for i in range(0, len(scheduler_array)):
if scheduler_array[i].time_to_refresh_queue():
threadStatus['Overseer']['message'] = (
'Search queue {} empty, scheduling ' +
'more items to scan.').format(i)
log.debug(
'Search queue %d empty, scheduling more items to scan.', i)
try: # Can't have the scheduler die because of a DB deadlock.
scheduler_array[i].schedule()
except Exception as e:
log.error(
'Schedule creation had an Exception: {}.'.format(
repr(e)))
traceback.print_exc(file=sys.stdout)
time.sleep(10)
else:
threadStatus['Overseer']['message'] = scheduler_array[
i].get_overseer_message()
# Let's update the total stats and add that info to message
# Added exception handler as dict items change
try:
update_total_stats(threadStatus, last_account_status)
except Exception as e:
log.error(
'Update total stats had an Exception: {}.'.format(
repr(e)))
traceback.print_exc(file=sys.stdout)
time.sleep(10)
threadStatus['Overseer']['message'] += '\n' + get_stats_message(
threadStatus)
# If enabled, display statistics information into logs on a
# periodic basis.
if args.stats_log_timer:
stats_timer += 1
if stats_timer == args.stats_log_timer:
log.info(get_stats_message(threadStatus))
stats_timer = 0
# Update Overseer statistics
threadStatus['Overseer']['accounts_failed'] = len(account_failures)
threadStatus['Overseer']['accounts_captcha'] = len(account_captchas)
# Send webhook updates when scheduler status changes.
if args.webhook_scheduler_updates:
wh_status_update(args, threadStatus['Overseer'], wh_queue,
scheduler_array[0])
# API Watchdog - Check if Niantic forces a new API.
if not args.no_version_check and not odt_triggered:
api_check_time = check_forced_version(
args, api_check_time, pause_bit)
# Now we just give a little pause here.
time.sleep(1)
def get_scheduler_tth_found_pct(scheduler):
tth_found_pct = getattr(scheduler, 'tth_found', 0)
if tth_found_pct > 0:
# Avoid division by zero. Keep 0.0 default for consistency.
active_sp = max(getattr(scheduler, 'active_sp', 0.0), 1.0)
tth_found_pct = tth_found_pct * 100.0 / float(active_sp)
return tth_found_pct
def wh_status_update(args, status, wh_queue, scheduler):
scheduler_name = status['scheduler']
if args.speed_scan:
tth_found = get_scheduler_tth_found_pct(scheduler)
spawns_found = getattr(scheduler, 'spawns_found', 0)
if (tth_found - status['scheduler_status']['tth_found']) > 0.01:
log.debug('Scheduler update is due, sending webhook message.')
wh_queue.put(('scheduler', {'name': scheduler_name,
'instance': args.status_name,
'tth_found': tth_found,
'spawns_found': spawns_found}))
status['scheduler_status']['tth_found'] = tth_found
def get_stats_message(threadStatus):
overseer = threadStatus['Overseer']
starttime = overseer['starttime']
elapsed = now() - starttime
# Just to prevent division by 0 errors, when needed
# set elapsed to 1 millisecond
if elapsed == 0:
elapsed = 1
sph = overseer['success_total'] * 3600.0 / elapsed
fph = overseer['fail_total'] * 3600.0 / elapsed
eph = overseer['empty_total'] * 3600.0 / elapsed
skph = overseer['skip_total'] * 3600.0 / elapsed
cph = overseer['captcha_total'] * 3600.0 / elapsed
ccost = cph * 0.00299
cmonth = ccost * 730
message = ('Total active: {} | Success: {} ({:.1f}/hr) | ' +
'Fails: {} ({:.1f}/hr) | Empties: {} ({:.1f}/hr) | ' +
'Skips {} ({:.1f}/hr) | ' +
'Captchas: {} ({:.1f}/hr)|${:.5f}/hr|${:.3f}/mo').format(
overseer['active_accounts'],
overseer['success_total'], sph,
overseer['fail_total'], fph,
overseer['empty_total'], eph,
overseer['skip_total'], skph,
overseer['captcha_total'], cph,
ccost, cmonth)
return message
def update_total_stats(threadStatus, last_account_status):
overseer = threadStatus['Overseer']
# Calculate totals.
usercount = 0
current_accounts = Set()
for tstatus in threadStatus.itervalues():
if tstatus.get('type', '') == 'Worker':
usercount += 1
username = tstatus.get('username', '')
current_accounts.add(username)
last_status = last_account_status.get(username, {})
overseer['skip_total'] += stat_delta(tstatus, last_status, 'skip')
overseer[
'captcha_total'] += stat_delta(tstatus, last_status, 'captcha')
overseer[
'empty_total'] += stat_delta(tstatus, last_status, 'noitems')
overseer['fail_total'] += stat_delta(tstatus, last_status, 'fail')
overseer[
'success_total'] += stat_delta(tstatus, last_status, 'success')
last_account_status[username] = copy.deepcopy(tstatus)
overseer['active_accounts'] = usercount
# Remove last status for accounts that workers
# are not using anymore
for username in last_account_status.keys():
if username not in current_accounts:
del last_account_status[username]
# Generates the list of locations to scan.
def generate_hive_locations(current_location, step_distance,
step_limit, hive_count):
NORTH = 0
EAST = 90
SOUTH = 180
WEST = 270
xdist = math.sqrt(3) * step_distance # Distance between column centers.
ydist = 3 * (step_distance / 2) # Distance between row centers.
results = []
results.append((current_location[0], current_location[1], 0))
loc = current_location
ring = 1
while len(results) < hive_count:
loc = get_new_coords(loc, ydist * (step_limit - 1), NORTH)
loc = get_new_coords(loc, xdist * (1.5 * step_limit - 0.5), EAST)
results.append((loc[0], loc[1], 0))
for i in range(ring):
loc = get_new_coords(loc, ydist * step_limit, NORTH)
loc = get_new_coords(loc, xdist * (1.5 * step_limit - 1), WEST)
results.append((loc[0], loc[1], 0))
for i in range(ring):
loc = get_new_coords(loc, ydist * (step_limit - 1), SOUTH)
loc = get_new_coords(loc, xdist * (1.5 * step_limit - 0.5), WEST)
results.append((loc[0], loc[1], 0))
for i in range(ring):
loc = get_new_coords(loc, ydist * (2 * step_limit - 1), SOUTH)
loc = get_new_coords(loc, xdist * 0.5, WEST)
results.append((loc[0], loc[1], 0))
for i in range(ring):
loc = get_new_coords(loc, ydist * (step_limit), SOUTH)
loc = get_new_coords(loc, xdist * (1.5 * step_limit - 1), EAST)
results.append((loc[0], loc[1], 0))
for i in range(ring):
loc = get_new_coords(loc, ydist * (step_limit - 1), NORTH)
loc = get_new_coords(loc, xdist * (1.5 * step_limit - 0.5), EAST)
results.append((loc[0], loc[1], 0))
# Back to start.
for i in range(ring - 1):
loc = get_new_coords(loc, ydist * (2 * step_limit - 1), NORTH)
loc = get_new_coords(loc, xdist * 0.5, EAST)
results.append((loc[0], loc[1], 0))
loc = get_new_coords(loc, ydist * (2 * step_limit - 1), NORTH)
loc = get_new_coords(loc, xdist * 0.5, EAST)
ring += 1
return results
def search_worker_thread(args, account_queue, account_sets,
account_failures, account_captchas,
search_items_queue, pause_bit, status, dbq, whq,
scheduler, key_scheduler):
log.debug('Search worker thread starting...')
# The outer forever loop restarts only when the inner one is
# intentionally exited - which should only be done when the worker
# is failing too often, and probably banned.
# This reinitializes the API and grabs a new account from the queue.
while True:
try:
# Force storing of previous worker info to keep consistency.
if 'starttime' in status:
dbq.put((WorkerStatus, {0: WorkerStatus.db_format(status)}))
status['starttime'] = now()
# Track per loop.
first_login = True
# Make sure the scheduler is done for valid locations.
while not scheduler.ready:
time.sleep(1)
status['message'] = ('Waiting to get new account from the'
+ ' queue...')
log.info(status['message'])
# Get an account.
account = account_queue.get()
status.update(WorkerStatus.get_worker(
account['username'], scheduler.scan_location))
status['message'] = 'Switching to account {}.'.format(
account['username'])
log.info(status['message'])
# New lease of life right here.
status['fail'] = 0
status['success'] = 0
status['noitems'] = 0
status['skip'] = 0
status['captcha'] = 0
stagger_thread(args)
# Sleep when consecutive_fails reaches max_failures, overall fails
# for stat purposes.
consecutive_fails = 0
# Sleep when consecutive_noitems reaches max_empty, overall noitems
# for stat purposes.
consecutive_noitems = 0
api = setup_api(args, status, account)
# The forever loop for the searches.
while True:
while pause_bit.is_set():
status['message'] = 'Scanning paused.'
time.sleep(2)
# If this account has been messing up too hard, let it rest.
if ((args.max_failures > 0) and
(consecutive_fails >= args.max_failures)):
status['message'] = (
'Account {} failed more than {} scans; possibly bad ' +
'account. Switching accounts...').format(
account['username'],
args.max_failures)
log.warning(status['message'])
account_failures.append({'account': account,
'last_fail_time': now(),
'reason': 'failures'})
# Exit this loop to get a new account and have the API
# recreated.
break
# If this account has not found anything for too long, let it
# rest.
if ((args.max_empty > 0) and
(consecutive_noitems >= args.max_empty)):
status['message'] = (
'Account {} returned empty scan for more than {} ' +
'scans; possibly ip is banned. Switching ' +
'accounts...').format(account['username'],
args.max_empty)
log.warning(status['message'])
account_failures.append({'account': account,
'last_fail_time': now(),
'reason': 'empty scans'})
# Exit this loop to get a new account and have the API
# recreated.
break
# If used proxy disappears from "live list" after background
# checking - switch account but do not freeze it (it's not an
# account failure).
if args.proxy and status['proxy_url'] not in args.proxy:
status['message'] = (
'Account {} proxy {} is not in a live list any ' +
'more. Switching accounts...').format(
account['username'], status['proxy_url'])
log.warning(status['message'])
# Experimental, nobody did this before.
account_queue.put(account)
# Exit this loop to get a new account and have the API
# recreated.
break
# If this account has been running too long, let it rest.
if (args.account_search_interval is not None):
if (status['starttime'] <=
(now() - args.account_search_interval)):
status['message'] = (
'Account {} is being rotated out to rest.'.format(
account['username']))
log.info(status['message'])
account_failures.append({'account': account,
'last_fail_time': now(),
'reason': 'rest interval'})
break
# Grab the next thing to search (when available).
step, step_location, appears, leaves, messages, wait = (
scheduler.next_item(status))
status['message'] = messages['wait']
# The next_item will return the value telling us how long
# to sleep. This way the status can be updated
time.sleep(wait)
# Using step as a flag for no valid next location returned.
if step == -1:
time.sleep(scheduler.delay(status['last_scan_date']))
continue
# Too soon?
# Adding a 10 second grace period.
if appears and now() < appears + 10:
first_loop = True
paused = False
while now() < appears + 10:
if pause_bit.is_set():
paused = True
break # Why can't python just have `break 2`...
status['message'] = messages['early']
if first_loop:
log.info(status['message'])
first_loop = False
time.sleep(1)
if paused:
scheduler.task_done(status)
continue
# Too late?
if leaves and now() > (leaves - args.min_seconds_left):
scheduler.task_done(status)
status['skip'] += 1
status['message'] = messages['late']
log.info(status['message'])
# No sleep here; we've not done anything worth sleeping
# for. Plus we clearly need to catch up!
continue
status['message'] = messages['search']
log.debug(status['message'])
# Let the api know where we intend to be for this loop.
# Doing this before check_login so it does not also have
# to be done when the auth token is refreshed.
api.set_position(*step_location)
if args.hash_key:
key = key_scheduler.next()
log.debug('Using key {} for this scan.'.format(key))
api.activate_hash_server(key)
# Ok, let's get started -- check our login status.
status['message'] = 'Logging in...'
check_login(args, account, api, step_location,
status['proxy_url'])
# Only run this when it's the account's first login, after
# check_login().
if first_login:
first_login = False
# Check tutorial completion.
if args.complete_tutorial:
tutorial_state = get_tutorial_state(args, api, account)
if not all(x in tutorial_state
for x in (0, 1, 3, 4, 7)):
log.info('Completing tutorial steps for %s.',
account['username'])
complete_tutorial(args, api, account,
tutorial_state)
else:
log.info('Account %s already completed tutorial.',
account['username'])
# Putting this message after the check_login so the messages
# aren't out of order.
status['message'] = messages['search']
log.info(status['message'])
# Make the actual request.
scan_date = datetime.utcnow()
response_dict = map_request(api, account, step_location,
args.no_jitter)
status['last_scan_date'] = datetime.utcnow()
# Record the time and the place that the worker made the
# request.
status['latitude'] = step_location[0]
status['longitude'] = step_location[1]
dbq.put((WorkerStatus, {0: WorkerStatus.db_format(status)}))
# Nothing back. Mark it up, sleep, carry on.
if not response_dict:
status['fail'] += 1
consecutive_fails += 1
status['message'] = messages['invalid']
log.error(status['message'])
time.sleep(scheduler.delay(status['last_scan_date']))
continue
# Got the response, check for captcha, parse it out, then send
# todo's to db/wh queues.
try:
captcha = handle_captcha(args, status, api, account,
account_failures,
account_captchas, whq,
response_dict, step_location)
if captcha is not None and captcha:
# Make another request for the same location
# since the previous one was captcha'd.
scan_date = datetime.utcnow()
response_dict = map_request(api, account,
step_location,
args.no_jitter)
elif captcha is not None:
account_queue.task_done()
time.sleep(3)
break
parsed = parse_map(args, response_dict, step_location,
dbq, whq, key_scheduler, api, status,
scan_date, account, account_sets)
del response_dict
scheduler.task_done(status, parsed)
if parsed['count'] > 0:
status['success'] += 1
consecutive_noitems = 0
else:
status['noitems'] += 1
consecutive_noitems += 1
consecutive_fails = 0
status['message'] = ('Search at {:6f},{:6f} completed ' +
'with {} finds.').format(
step_location[0], step_location[1],
parsed['count'])
log.debug(status['message'])
except Exception as e:
parsed = False
status['fail'] += 1
consecutive_fails += 1
# consecutive_noitems = 0 - I propose to leave noitems
# counter in case of error.
status['message'] = ('Map parse failed at {:6f},{:6f}, ' +
'abandoning location. {} may be ' +
'banned.').format(step_location[0],
step_location[1],
account['username'])
log.exception('{}. Exception message: {}'.format(
status['message'], repr(e)))
if response_dict is not None:
del response_dict
# Get detailed information about gyms.
if args.gym_info and parsed:
# Build a list of gyms to update.
gyms_to_update = {}
for gym in parsed['gyms'].values():
# Can only get gym details within 1km of our position.
distance = calc_distance(
step_location, [gym['latitude'], gym['longitude']])
if distance < 1.0:
# Check if we already have details on this gym.
# Get them if not.
try:
record = GymDetails.get(gym_id=gym['gym_id'])
except GymDetails.DoesNotExist as e:
gyms_to_update[gym['gym_id']] = gym
continue
# If we have a record of this gym already, check if
# the gym has been updated since our last update.
if record.last_scanned < gym['last_modified']:
gyms_to_update[gym['gym_id']] = gym
continue
else:
log.debug(
('Skipping update of gym @ %f/%f, ' +
'up to date.'),
gym['latitude'], gym['longitude'])
continue
else:
log.debug(
('Skipping update of gym @ %f/%f, too far ' +
'away from our location at %f/%f (%fkm).'),
gym['latitude'], gym['longitude'],
step_location[0], step_location[1], distance)
if len(gyms_to_update):
gym_responses = {}
current_gym = 1
status['message'] = (
'Updating {} gyms for location {},{}...').format(
len(gyms_to_update), step_location[0],
step_location[1])
log.debug(status['message'])
for gym in gyms_to_update.values():
status['message'] = (
'Getting details for gym {} of {} for ' +
'location {:6f},{:6f}...').format(
current_gym, len(gyms_to_update),
step_location[0], step_location[1])
time.sleep(random.random() + 2)
response = gym_request(api, account, step_location,
gym, args.api_version)
# Make sure the gym was in range. (Sometimes the
# API gets cranky about gyms that are ALMOST 1km
# away.)
if response:
if response['responses'][
'GYM_GET_INFO']['result'] == 2:
log.warning(
('Gym @ %f/%f is out of range (%dkm),'
+ ' skipping.'),
gym['latitude'], gym['longitude'],
distance)
else:
gym_responses[gym['gym_id']] = response[
'responses']['GYM_GET_INFO']
del response
# Increment which gym we're on for status messages.
current_gym += 1
status['message'] = (
'Processing details of {} gyms for location ' +
'{:6f},{:6f}...').format(len(gyms_to_update),
step_location[0],
step_location[1])
log.debug(status['message'])
if gym_responses:
parse_gyms(args, gym_responses,
whq, dbq)
del gym_responses
# Update hashing key stats in the database based on the values
# reported back by the hashing server.
if args.hash_key:
key = HashServer.status.get('token', None)
key_instance = key_scheduler.keys[key]
key_instance['remaining'] = HashServer.status.get(
'remaining', 0)
key_instance['maximum'] = (
HashServer.status.get('maximum', 0))
usage = (
key_instance['maximum'] -
key_instance['remaining'])
if key_instance['peak'] < usage:
key_instance['peak'] = usage
if key_instance['expires'] is None:
expires = HashServer.status.get(
'expiration', None)
if expires is not None:
expires = datetime.utcfromtimestamp(expires)
key_instance['expires'] = expires
key_instance['last_updated'] = datetime.utcnow()
log.debug('Hash key %s has %s/%s RPM left.', key,
key_instance['remaining'],
key_instance['maximum'])
# Delay the desired amount after "scan" completion.
delay = scheduler.delay(status['last_scan_date'])
status['message'] += ' Sleeping {}s until {}.'.format(
delay,
time.strftime(
'%H:%M:%S',
time.localtime(time.time() + args.scan_delay)))
log.info(status['message'])
time.sleep(delay)
# Catch any process exceptions, log them, and continue the thread.
except Exception as e:
log.error((
'Exception in search_worker under account {} Exception ' +
'message: {}.').format(account['username'], repr(e)))
status['message'] = (
'Exception in search_worker using account {}. Restarting ' +
'with fresh account. See logs for details.').format(
account['username'])
traceback.print_exc(file=sys.stdout)
account_failures.append({'account': account,
'last_fail_time': now(),
'reason': 'exception'})
time.sleep(args.scan_delay)
def upsertKeys(keys, key_scheduler, db_updates_queue):
# Prepare hashing keys to be sent to the db. But only
# sent latest updates of the 'peak' value per key.
hashkeys = {}
for key in keys:
key_instance = key_scheduler.keys[key]
hashkeys[key] = key_instance
hashkeys[key]['key'] = key
hashkeys[key]['peak'] = max(key_instance['peak'],
HashKeys.getStoredPeak(key))
db_updates_queue.put((HashKeys, hashkeys))
def map_request(api, account, position, no_jitter=False):
# Create scan_location to send to the api based off of position, because
# tuples aren't mutable.
if no_jitter:
# Just use the original coordinates.
scan_location = position
else:
# Jitter it, just a little bit.
scan_location = jitter_location(position)
log.debug('Jittered to: %f/%f/%f',
scan_location[0], scan_location[1], scan_location[2])
try:
cell_ids = util.get_cell_ids(scan_location[0], scan_location[1])
timestamps = [0, ] * len(cell_ids)
req = api.create_request()
req.get_map_objects(latitude=f2i(scan_location[0]),
longitude=f2i(scan_location[1]),
since_timestamp_ms=timestamps,
cell_id=cell_ids)
req.check_challenge()
req.get_hatched_eggs()
req.get_inventory(last_timestamp_ms=account['last_timestamp_ms'])
req.check_awarded_badges()
req.get_buddy_walked()
req.get_inbox(is_history=True)
response = req.call()
response = clear_dict_response(response, True)
parse_new_timestamp_ms(account, response)
return response
except HashingOfflineException as e:
log.error('Hashing server is unreachable, it might be offline.')
except BadHashRequestException as e:
log.error('Invalid or expired hashing key: %s.',
api._hash_server_token)
except Exception as e:
log.exception('Exception while downloading map: %s', repr(e))
return False
def gym_request(api, account, position, gym, api_version):
try:
log.info('Getting details for gym @ %f/%f (%fkm away)',
gym['latitude'], gym['longitude'],
calc_distance(position, [gym['latitude'], gym['longitude']]))
req = api.create_request()
req.gym_get_info(
gym_id=gym['gym_id'],
player_lat_degrees=f2i(position[0]),
player_lng_degrees=f2i(position[1]),
gym_lat_degrees=gym['latitude'],
gym_lng_degrees=gym['longitude'])
req.check_challenge()
req.get_hatched_eggs()
req.get_inventory(last_timestamp_ms=account['last_timestamp_ms'])
req.check_awarded_badges()
req.get_buddy_walked()
req.get_inbox(is_history=True)
response = req.call()
parse_new_timestamp_ms(account, response)
response = clear_dict_response(response)
return response
except Exception as e:
log.exception('Exception while downloading gym details: %s.', repr(e))
return False
def calc_distance(pos1, pos2):
R = 6378.1 # KM radius of the earth.
dLat = math.radians(pos1[0] - pos2[0])
dLon = math.radians(pos1[1] - pos2[1])
a = math.sin(dLat / 2) * math.sin(dLat / 2) + \
math.cos(math.radians(pos1[0])) * math.cos(math.radians(pos2[0])) * \
math.sin(dLon / 2) * math.sin(dLon / 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
d = R * c
return d
# Delay each thread start time so that logins occur after delay.
def stagger_thread(args):
loginDelayLock.acquire()
delay = args.login_delay + ((random.random() - .5) / 2)
log.debug('Delaying thread startup for %.2f seconds', delay)
time.sleep(delay)
loginDelayLock.release()
# The delta from last stat to current stat
def stat_delta(current_status, last_status, stat_name):
return current_status.get(stat_name, 0) - last_status.get(stat_name, 0)
def check_forced_version(args, api_check_time, pause_bit):
if int(time.time()) > api_check_time:
log.debug("Checking forced API version.")
api_check_time = int(time.time()) + args.version_check_interval
forced_api = get_api_version(args)
if not forced_api:
# Couldn't retrieve API version. Pause scanning.
pause_bit.set()
log.warning('Forced API check got no or invalid response. ' +
'Possible bad proxy.')
log.warning('Scanner paused due to failed API check.')
return api_check_time
# Got a response let's compare version numbers.
try:
if StrictVersion(args.api_version) < StrictVersion(forced_api):
# Installed API version is lower. Pause scanning.
pause_bit.set()
log.warning('Started with API: %s, ' +
'Niantic forced to API: %s',
args.api_version,
forced_api)
log.warning('Scanner paused due to forced Niantic API update.')
else:
# API check was successful and
# installed API version is newer or equal forced API.
# Continue scanning.
log.debug("API check was successful. Continue scanning.")
pause_bit.clear()
except ValueError as e:
# Unknown version format. Pause scanning as well.
pause_bit.set()
log.warning('Niantic forced unknown API version format: %s.',
forced_api)
log.warning('Scanner paused due to unknown API version format.')
except Exception as e:
# Something else happened. Pause scanning as well.
pause_bit.set()
log.warning('Unknown error on API version comparison: %s.',
repr(e))
log.warning('Scanner paused due to unknown API check error.')
return api_check_time
def get_api_version(args):
"""Retrieve forced API version by Niantic
Args:
args: Command line arguments
Returns:
API version string. False if request failed.
"""
proxies = {}
if args.proxy:
num, proxy = get_new_proxy(args)
proxies = {
'http': proxy,
'https': proxy
}
try:
s = requests.Session()
s.mount('https://',
HTTPAdapter(max_retries=Retry(total=3,
backoff_factor=0.5,
status_forcelist=[500, 502,
503, 504])))
r = s.get(
'https://pgorelease.nianticlabs.com/plfe/version',
proxies=proxies,
verify=False,
timeout=5)
return r.text[2:] if r.status_code == requests.codes.ok else False
except Exception as e:
log.warning('error on API check: %s', repr(e))
return False
| mpw1337/RocketMap | pogom/search.py | Python | agpl-3.0 | 56,889 |
from django.utils.translation import ugettext_lazy as _
app_info = {
'name': 'comments',
'author': 'Katrid',
'website': 'http://katrid.com',
'short_description': 'Enterprise Social Network',
'description': _('Comments, Discussions, Mailing List, News, Document Followers'),
'dependencies': ['keops.modules.contact'],
'category': _('Communication'),
'version': '0.2',
}
| mrmuxl/keops | keops/modules/comments/apps.py | Python | agpl-3.0 | 403 |
# -*- coding: utf-8 -*-
import os.path
from btb.log_filter import skip_unreadable_post
BASE_DIR = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
TEST_RUNNER = 'btb.test_runner.BtbTestRunner'
#
# Locale
#
TIME_ZONE = 'US/Eastern'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
#
# Filesystem
#
# Uploaded files.
# We want .url for vanilla storage to point to a public MEDIA_URL, but the
# MEDIA_ROOT to by default be private.
# These refer to private media only.
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
MEDIA_URL = '/private_media/'
UPLOAD_TO = "uploads"
FILE_UPLOAD_PERMISSIONS = 0664
# Serve PUBLIC_MEDIA_ROOT with webserver, at PUBLIC_MEDIA_URL. Symlinks for
# anything public will be added to this directory, so it should be on the same
# filesystem as PRIVATE_MEDIA_ROOT.
PUBLIC_MEDIA_ROOT = os.path.join(MEDIA_ROOT, "public")
PUBLIC_MEDIA_URL = '/media/'
# Static files (js, css, etc). Before deployment, these live in 'static' (or
# individual apps' static dirs). When we deply to production, we use django's
# staticfiles to collect them into 'site_static'.
STATIC_ROOT = os.path.join(BASE_DIR, "site_static")
STATIC_URL = '/static/'
#ADMIN_MEDIA_PREFIX = '/static/admin/'
STATICFILES_DIRS = (os.path.join(BASE_DIR, "static"),)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
COMPRESS_PRECOMPILERS = (
('text/javascript', 'cat'), # this shouldn't be necessary, but is
('text/css', 'cat'), # this shouldn't be necessary, but is
('text/coffeescript', 'coffee --compile --stdio'),
('text/x-sass', 'sass --compass -I "%s"' % (os.path.join(BASE_DIR, "static", "css"))),
)
#
# Other stuff
#
# Make this unique, and don't share it with anybody. Override it in
# settings.py.
SECRET_KEY = 'W^X~>p2j+u9XmNfNyt<9;ffaIVo{2vsfI-?_o8z893V8t$<[7\\'
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'pagination.middleware.PaginationMiddleware',
)
ROOT_URLCONF = 'scanblog.urls'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, "templates"),
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.request",
"django.contrib.messages.context_processors.messages",
"btb.context_processors.site",
"notification.context_processors.notification",
)
INSTALLED_APPS = (
# django internal apps (list first)
# 'contenttypes' must come before 'auth'
# http://stackoverflow.com/a/18292090/85461
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.humanize',
'django.contrib.admin',
'django.contrib.flatpages',
'django.contrib.messages',
'django.contrib.staticfiles',
# btb includes
'about',
'accounts',
'annotations',
'blogs',
'btb',
'comments',
'correspondence',
'moderation',
'profiles',
'subscriptions',
'scanning',
'campaigns',
# 3rd party dependencies
'registration',
'djcelery',
'compressor',
'sorl.thumbnail',
'notification',
'pagination',
'urlcrypt',
)
LOGGING = {
"version": 1,
"disable_existing_lggers": False,
"handlers": {
"mail_admins": {
"level": "ERROR",
'filters': ['require_debug_false'],
"class": "django.utils.log.AdminEmailHandler",
'filters': ['require_debug_false', 'skip_unreadable_posts'],
}
},
"loggers": {
"django.request": {
"handlers": ["mail_admins"],
"level": "ERROR",
"propagate": True,
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
},
'skip_unreadable_posts': {
'()': 'django.utils.log.CallbackFilter',
'callback': skip_unreadable_post,
}
}
}
AUTHENTICATION_BACKENDS = (
'scanblog.accounts.backends.CaseInsensitiveAuthenticationBackend',
)
AUTH_USER_MODEL = "auth.User"
CONTACT_EMAIL = "contact@betweenthebars.org"
MAIL_DROP_ID = 1
LOGIN_URL = "/accounts/login"
#TODO: Go to something else after login.
LOGIN_REDIRECT_URL = "/people/show"
EMAIL_HOST = "localhost"
EMAIL_PORT = 25
EMAIL_SUBJECT_PREFIX = "[BetweenTheBars.org] "
REGISTRATION_OPEN = True
COMMENTS_OPEN = True
TRANSCRIPTION_OPEN = True
CACHE_MIDDLEWARE_SECONDS = 60 * 10
# django-registration
ACCOUNT_ACTIVATION_DAYS = 2
# Allows sending of very large files in low memory; doesn't work with
# devserver.
X_SENDFILE_ENABLED = False # Apache only
X_ACCEL_REDIRECT_ENABLED = False # Nginx only
UPLOAD_LIMIT = 20971520 # 20MB
SCAN_PAGES_PER_PAGE = 6
# Celery async processing
import djcelery
djcelery.setup_loader()
BROKER_URL = "amqp://guest:guest@localhost:5672/"
CELERY_TRACK_STARTED = True
CELERY_IGNORE_RESULT = False
#CELERY_RESULT_BACKEND = 'amqp'
CELERY_RESULT_BACKEND = 'djcelery.backends.database:DatabaseBackend'
TEXT_IMAGE_FONT = "/usr/share/fonts/truetype/gentium/GenR102.ttf"
INTERNAL_IPS = ('127.0.0.1',)
# External Commands
NICE_CMD = "/usr/bin/nice"
PDFTK_CMD = "/usr/bin/pdftk"
PDFIMAGES_CMD = "/usr/bin/pdfimages"
PDFTOTEXT_CMD = "/usr/bin/pdftotext"
CONVERT_CMD = "/usr/bin/convert"
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
}
}
THUMBNAIL_CACHE = 'default'
AUTHENTICATION_BACKENDS = (
'urlcrypt.auth_backends.UrlCryptBackend',
'django.contrib.auth.backends.ModelBackend',
)
DISABLE_NOTIFICATIONS = False
DISABLE_ADMIN_NOTIFICATIONS = False
THUMBNAIL_BACKEND = "btb.utils.SameDirThumbnailBackend"
THUMBNAIL_PREFIX = "cache/"
MAX_READY_TO_PUBLISH_DAYS = 6
PUBLISHING_HOURS = (7, 23)
SELENIUM_FIREFOX_BIN = "/usr/bin/firefox"
# Handle apps that don't play nice with migrations.
MIGRATION_MODULES = {
'djcelery': 'btb.djcelery_migrations',
'registration': 'btb.registration_migrations',
'sorl.thumbnail': 'btb.sorl_migrations'
}
| yourcelf/btb | scanblog/scanblog/default_settings.py | Python | agpl-3.0 | 6,676 |
import sdms
server = { 'HOST' : 'localhost',
'PORT' : '2506',
'USER' : 'SYSTEM',
'PASSWORD' : 'VerySecret' }
conn = sdms.SDMSConnectionOpenV2(server, server['USER'], server['PASSWORD'], "Simple Access Example")
try:
if 'ERROR' in conn:
print(str(conn))
exit(1)
except:
pass
stmt = "LIST SESSIONS;"
result = sdms.SDMSCommandWithSoc(conn, stmt)
if 'ERROR' in result:
print(str(result['ERROR']))
else:
for row in result['DATA']['TABLE']:
print("{0:3} {1:8} {2:32} {3:9} {4:15} {5:>15} {6}".format(\
str(row['THIS']), \
str(row['UID']), \
str(row['USER']), \
str(row['TYPE']), \
str(row['START']), \
str(row['IP']), \
str(row['INFORMATION'])))
conn.close()
| schedulix/schedulix | src/examples/SimpleAccess3.py | Python | agpl-3.0 | 704 |
import os
import aiohttp
import random
import string
import asyncio
import shutil
import re
from threading import Thread
from io import BytesIO
from zipfile import ZipFile
from discord.ext import commands
from core import BotError
DEFAULT_MAJOR = "512"
DEFAULT_MINOR = "1416"
class WindowsProcessThread(Thread):
def __init__(self, proc, p_args):
super().__init__()
self._proc = proc
self._args = p_args
self.errored = False
self.error_msg = None
def run(self):
winloop = asyncio.ProactorEventLoop()
future = self._proc(winloop, *self._args)
try:
winloop.run_until_complete(future)
except BotError as err:
self.errored = True
self.error_msg = err.message
except Exception:
self.errored = True
self.error_msg = "Unknown error caught in worker thread."
winloop.close()
def validate_byond_build(byond_str):
"""
A little shit of a failed command argument.
Return a tuple containing (major, minor) build information if the argument
string matches the defined format of: v:{major}.{minor} {rest of code here}.
Returns None if such a tuple can't be generated.
"""
if not byond_str.startswith("v:"):
return None
chunks = byond_str.split(" ")
if not len(chunks) > 1:
return None
chunks = chunks[0].split(".")
# This is triggered alyways forever. So. Return null if format doesn't match.
if len(chunks) != 2:
return None
try:
major = int(chunks[0][2:])
minor = int(chunks[1])
except ValueError:
raise BotError("Error processing BYOND version request.", "validate_byond_build")
return major, minor
class DmCog(commands.Cog):
WORK_FOLDER = "cogs\\byond_eval"
DM_BOILERPLATE = "/world/loop_checks = FALSE;\n" + \
"\n/world/New() {{ dm_eval(); del(src); }}" + \
"\n{0}\n/proc/dm_eval() {{ {1} {2} }}"
def __init__(self, bot):
self.bot = bot
self._instances = []
self._safety_patterns = [r'#(\s*)?include', r'include', r'##',
r'```.*```', r'`.*`', r'Reboot']
self._safety_expressions = []
self._arg_expression = re.compile(r'(?:(?P<pre_proc>.*);;;)?(?:(?P<proc>.*);;)?(?P<to_out>.*)?')
for patt in self._safety_patterns:
self._safety_expressions.append(re.compile(patt))
def get_work_dir(self):
"""Returns the folder where BYOND versions and instances should be saved."""
cwd = os.getcwd()
return os.path.join(cwd, self.WORK_FOLDER)
def new_instance(self, length):
"""Generates a unique instance ID, one which is currently not in use."""
while True:
rand = "".join([random.choice(string.ascii_letters + string.digits) for _ in range(length)])
if rand not in self._instances:
self._instances.append(rand)
return rand
def cleanup_instance(self, instance_id, instance_dir):
"""Deletes all files associated with an instance and removes it from the list."""
if not os.path.isdir(instance_dir):
return
self._instances.remove(instance_id)
shutil.rmtree(instance_dir, ignore_errors=True)
def process_args(self, code):
"""
Generates an array of code segments to be placed into the compiled DM code.
Returned dictionary must have three keys: "pre_proc", "proc", and "to_out".
If those pieces do not exist, they are to be set as None. As to avoid key
errors further down the call stack.
"""
res = self._arg_expression.match(code)
if not res or not res.groupdict():
raise BotError("No valid code sent.", "process_args")
code_segs = {"pre_proc": None, "proc": None, "to_out": None}
res_dict = res.groupdict()
for key in code_segs:
if key in res_dict:
code_segs[key] = res_dict[key]
if (code_segs["pre_proc"] and
not code_segs["pre_proc"].endswith(";") and
not code_segs["pre_proc"].endswith("}")):
code_segs["pre_proc"] += ";"
if (code_segs["proc"] and not code_segs["proc"].endswith(";")
and not code_segs["proc"].endswith(";")):
code_segs["proc"] += ";"
if code_segs["to_out"]:
code_segs["to_out"] = code_segs["to_out"].split(";")
return code_segs
def validate_dm(self, code):
"""Validates the code given for potential exploits."""
for expr in self._safety_expressions:
if expr.search(code):
raise BotError("Disallowed/dangerous code found. Aborting.", "validate_dm")
def generate_dm(self, segments, instance_dir):
"""Generates the .dme file to be compiled."""
with open(f"{instance_dir}\\eval.dme", "w+") as f:
if not segments["pre_proc"]:
segments["pre_proc"] = ""
if segments["to_out"]:
var_dump = ""
for var in segments["to_out"]:
var_dump += f"world.log << {var};"
segments["to_out"] = var_dump
self.validate_dm(var_dump)
else:
segments["to_out"] = ""
if not segments["proc"]:
segments["proc"] = ""
output = self.DM_BOILERPLATE
output = output.format(segments["pre_proc"], segments["proc"], segments["to_out"])
f.write(output)
async def compile_dm(self, loop, instance_dir, major, minor):
"""Executor proc to compile the .dme file provided."""
dm_path = os.path.join(self.get_work_dir(),
f"byond{major}.{minor}\\byond\\bin\\dm.exe")
if not os.path.isfile(dm_path):
raise BotError("dm.exe not found.", "compile_dm")
dme_path = os.path.join(instance_dir, "eval.dme")
if not os.path.isfile(dme_path):
raise BotError(".dme under evaluation not found.", "compile_dm")
process = await asyncio.create_subprocess_exec(*[dm_path, dme_path], loop=loop,
stderr=asyncio.subprocess.DEVNULL,
stdout=asyncio.subprocess.DEVNULL)
try:
await asyncio.wait_for(process.wait(), timeout=60.0, loop=loop)
except TimeoutError:
raise BotError("Compiler timed out.", "compile_dm")
if process.returncode != 0:
raise BotError("Error compiling or running DM.", "compile_dm")
def validate_compile(self, instance_dir):
"""Checks wether or not the compiled end result is safe to run."""
dmb_found = False
for fname in os.listdir(instance_dir):
if fname.endswith(".rsc"):
raise BotError("Resource file detected. Execution aborted.", "validate_compile")
elif fname.endswith(".dmb"):
dmb_found = True
if not dmb_found:
raise BotError("Compilation failed and no .dmb was generated.", "validate_compile")
async def run_dm(self, loop, instance_dir, major, minor):
"""Executor proc to host and run the .dmb file provided."""
dd_path = os.path.join(self.get_work_dir(),
f"byond{major}.{minor}\\byond\\bin\\dreamdaemon.exe")
if not os.path.isfile(dd_path):
raise BotError("dreadaemon.exe not found.", "run_dm")
dmb_path = os.path.join(instance_dir, "eval.dmb")
if not os.path.isfile(dmb_path):
raise BotError(".dmb under evaluation not found.", "run_dm")
p_args = [dd_path, dmb_path] + ["-invisible", "-ultrasafe", "-logself", "-log", "output.log", "-once", "-close", "-quiet"]
process = await asyncio.create_subprocess_exec(*p_args, loop=loop,
stderr=asyncio.subprocess.DEVNULL,
stdout=asyncio.subprocess.DEVNULL)
try:
await asyncio.wait_for(process.wait(), timeout=60.0, loop=loop)
except TimeoutError:
raise BotError("DreamDaemon timed out.", "run_dm")
async def run_executor(self, proc, p_args):
"""A helper for running Windows subprocesses in a separate thread."""
thread = WindowsProcessThread(proc, p_args)
thread.start()
cycles = 0
while cycles < 60:
if not thread.is_alive():
break
cycles += 1
await asyncio.sleep(1)
error = thread.errored
error_msg = thread.error_msg
thread.join()
if error:
raise BotError(error_msg, "run_executor")
def get_output(self, instance_dir):
"""Returns a string containing the first 30 lines from the test instance's log."""
log_path = os.path.join(instance_dir, "output.log")
if not os.path.isfile(log_path):
return "Error: no log file found."
with open(log_path, "r") as file:
content = file.readlines()
if len(content) < 2:
return "No contents found in the log file."
content = [x.strip() for x in content]
content = content[1:11]
content = "\n".join(content)
if len(content) > 1750:
content = content[0:1750] + "\n...Cut-off reached..."
out = "World.log output:\n```\n" + content + "\n```"
return out
def byond_found(self, major=DEFAULT_MAJOR, minor=DEFAULT_MINOR):
"""Checks whether or not the specified version is already found in the test folder."""
path = self.get_work_dir()
byond_path = os.path.join(path, f"byond{major}.{minor}")
if os.path.isdir(byond_path) and os.path.isfile(f"{byond_path}\\byond\\bin\\dm.exe"):
return True
return False
async def setup_byond(self, major=DEFAULT_MAJOR, minor=DEFAULT_MINOR):
"""Downloads and unzips the provided BYOND version."""
path = self.get_work_dir()
byond_path = os.path.join(path, f"byond{major}.{minor}")
url = f"http://www.byond.com/download/build/{major}/{major}.{minor}_byond.zip"
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
try:
data = await resp.read()
except Exception:
raise BotError("Unable to download the BYOND zip file.", "init_byond")
if resp.status != 200:
raise BotError("Unable to download the specified BYOND version.", "init_byond")
with ZipFile(BytesIO(data)) as z:
z.extractall(byond_path)
@commands.command(aliases=["dmeval", "dme"])
@commands.cooldown(1, 5, commands.BucketType.user)
async def dm_eval(self, ctx, *, code):
"""
Evaluates given DM code by compiling and running it. Accepts a maximum
of 4 formatted arguments: v:{byond_major}.{byond.minor} {global_code};;;{eval_code};;{vars;to;log}.
All arguments other than {vars;to;log} are optional and may simply be omitted.
So at bare minimum you simply need to write some variables/expressions to be
evaluated and printed to world.log.
"""
try:
version_tuple = validate_byond_build(code)
if not version_tuple:
version_tuple = (DEFAULT_MAJOR, DEFAULT_MINOR)
else:
code = code[(code.find(" ") + 1):]
if not self.byond_found(*version_tuple):
await ctx.send(f"Version {version_tuple[0]}.{version_tuple[1]} not cached. Downloading. (This may take a bit.)")
await self.setup_byond(*version_tuple)
except BotError as err:
await ctx.send(f"Error while setting up BYOND:\n{err}")
return
except Exception:
await ctx.send(f"Unrecognized exception while setting up BYOND.")
return
instance = self.new_instance(32)
instance_folder = os.path.join(self.get_work_dir(), f"_instances\\{instance}")
if not os.path.isdir(instance_folder):
os.makedirs(instance_folder)
try:
segs = self.process_args(code)
self.generate_dm(segs, instance_folder)
executor_args = [instance_folder, version_tuple[0], version_tuple[1]]
await self.run_executor(self.compile_dm, executor_args)
self.validate_compile(instance_folder)
await self.run_executor(self.run_dm, executor_args)
except BotError as err:
await ctx.send(f"Error compiling or running code:\n{err}")
except Exception:
await ctx.send("Unrecognized error while compiling or running code.")
else:
await ctx.send(self.get_output(instance_folder))
self.cleanup_instance(instance, instance_folder)
@commands.command(aliases=["dmversion", "dmv"])
async def dm_version(self, ctx):
"""Reports the default version of BYOND used by dm_eval."""
await ctx.send(f"The default version of BYOND used for `dm_eval` is: {DEFAULT_MAJOR}.{DEFAULT_MINOR}.")
def setup(bot):
bot.add_cog(DmCog(bot)) | Aurorastation/BOREALISbot2 | cogs/dm_eval.py | Python | agpl-3.0 | 13,511 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-12-28 12:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0020_profile_preferred_proxy'),
]
operations = [
migrations.AddField(
model_name='statistics',
name='machine_requests',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='statistics',
name='machine_requests_abandoned',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='statistics',
name='machine_requests_awaiting_ack',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='statistics',
name='machine_requests_awaiting_appeal',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='statistics',
name='machine_requests_awaiting_response',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='statistics',
name='machine_requests_denied',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='statistics',
name='machine_requests_draft',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='statistics',
name='machine_requests_fix_required',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='statistics',
name='machine_requests_no_docs',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='statistics',
name='machine_requests_partial',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='statistics',
name='machine_requests_payment_required',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='statistics',
name='machine_requests_submitted',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='statistics',
name='machine_requests_success',
field=models.IntegerField(blank=True, null=True),
),
]
| MuckRock/muckrock | muckrock/accounts/migrations/0021_auto_20161228_1233.py | Python | agpl-3.0 | 2,686 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
gitolite-manager
Author: Tim Henderson
Contact: tim.tadh@gmail.com, tadh@case.edu
Copyright: 2013 All Rights Reserved, see LICENSE
'''
import urllib, re
import cgi
from logging import getLogger
log = getLogger('gm:view:user')
from pyramid.view import view_config
from pyramid import httpexceptions as httpexc
from pyramid.httpexceptions import HTTPFound, HTTPBadRequest
from pyramid.response import Response
from gitolite_manager import validate as v
from gitolite_manager.models.session import Session
from gitolite_manager.models.user import User
from gitolite_manager.controllers import key_controller, repo_controller
def get_user_name(user):
email = user.email
return email[:email.index('@')]
def tvars(request, extras):
session = request.environ['gm_session']
defaults = {
'SITENAME' : 'Key Czar',
'SITEURL' : request.application_url,
'request' : request,
'session' : session,
'get_user_name' : get_user_name,
}
defaults.update(extras)
return defaults
@view_config(
route_name='user',
request_method=['GET'],
renderer='templates/user.html'
)
def user(request):
db = request.environ['db.session']
session = request.environ['gm_session']
if session.user is None:
return HTTPFound(request.application_url)
email = session.user.email
user = email[:email.index('@')]
return tvars(request, {
'TITLE' : user + ' user',
'user_name' : user,
'keys_url': request.route_url('user/keys'),
'add_key_url': request.route_url('user/addkey'),
'partners_url': request.route_url('user/partners'),
})
@view_config(
route_name='user/keys',
request_method=['GET'],
renderer='templates/keys.html'
)
def keys(request):
db = request.environ['db.session']
session = request.environ['gm_session']
if session.user is None:
return HTTPFound(request.application_url)
email = session.user.email
user = email[:email.index('@')]
return tvars(request, {
'TITLE' : 'keys for %s' % user,
'user_name' : user,
})
@view_config(
route_name='user/addkey',
request_method=['GET'],
renderer='templates/addkey.html'
)
def addkey(request):
db = request.environ['db.session']
session = request.environ['gm_session']
if session.user is None:
return HTTPFound(request.application_url)
email = session.user.email
user = email[:email.index('@')]
return tvars(request, {
'TITLE' : 'add key for %s' % user,
'user_name' : user,
})
addkey_schema = {
'key': v.type_checker(cgi.FieldStorage),
'csrf': v.type_checker(basestring)
}
@view_config(
route_name='user/addkey',
request_method=['POST'],
renderer='templates/addkey.html'
)
def addkey_post(request):
db = request.environ['db.session']
session = request.environ['gm_session']
if session.user is None:
return HTTPFound(request.route_url("root"))
email = session.user.email
user = email[:email.index('@')]
err, post = v.validate_dictionary(dict(request.POST), addkey_schema)
if err:
return tvars(request, {
'TITLE' : 'add key for %s' % user,
'user_name' : user,
'errors': err,
})
elif not session.valid_csrf(post['csrf'], request.route_url('user/addkey')):
return HTTPFound(request.route_url('root'))
else:
try:
key = post['key'].file.read()
key_controller.add_key(db, session.user, key)
except Exception, e:
log.exception(e)
return tvars(request, {
'TITLE' : 'add key for %s' % user,
'user_name' : user,
'errors': [e],
})
return HTTPFound(request.route_url('user/keys'))
rmkey_schema = {
'keyid': v.type_checker(basestring) &
v.format_checker(re.compile(r'^[0-9]+$')),
}
@view_config(
route_name='user/rmkey',
request_method=['GET'],
renderer='templates/rmkey.html'
)
def rmkey(request):
db = request.environ['db.session']
session = request.environ['gm_session']
if session.user is None:
return HTTPFound(request.application_url)
email = session.user.email
user = email[:email.index('@')]
err, match = v.validate_dictionary(dict(request.matchdict), rmkey_schema)
if err:
return tvars(request, {
'TITLE' : 'remove key for %s' % user,
'user_name' : user,
'errors': err,
})
try:
key_controller.rm_key(db, session.user, int(match['keyid']))
except Exception, e:
return tvars(request, {
'TITLE' : 'remove key for %s' % user,
'user_name' : user,
'errors': [str(e)],
})
return HTTPFound(request.route_url('user/keys'))
@view_config(
route_name='user/partners',
request_method=['GET'],
renderer='templates/add-partner.html'
)
def partners(request):
db = request.environ['db.session']
session = request.environ['gm_session']
if session.user is None:
return HTTPFound(request.application_url)
email = session.user.email
user = email[:email.index('@')]
return tvars(request, {
'TITLE' : 'partners for %s' % user,
'user_name' : user,
})
add_partner_schema = {
'csrf': v.type_checker(basestring),
'case_id': v.type_checker(basestring) &
v.format_checker(re.compile(r'^[a-z]{3}[0-9]*$')),
'repo_name': v.type_checker(basestring) &
v.format_checker(re.compile(r'^[a-zA-Z][a-zA-Z0-9_-]*$')),
}
@view_config(
route_name='user/add-partner',
request_method=['POST'],
renderer='templates/add-partner.html'
)
def add_partners(request):
db = request.environ['db.session']
session = request.environ['gm_session']
if session.user is None:
return HTTPFound(request.application_url)
email = session.user.email
user = email[:email.index('@')]
err, post = v.validate_dictionary(dict(request.POST), add_partner_schema)
if err:
return tvars(request, {
'TITLE' : 'add partner for %s' % user,
'user_name' : user,
'errors': err,
})
elif not session.valid_csrf(post['csrf'], request.route_url('user/add-partner')):
return HTTPFound(request.route_url('root'))
else:
try:
repo_controller.add_partner(db, session.user, post['case_id'],
post['repo_name'])
except Exception, e:
return tvars(request, {
'TITLE' : 'add partner for %s' % user,
'user_name' : user,
'errors': [e],
})
return HTTPFound(request.route_url('user/partners'))
rm_partner_schema = {
'repo_id': v.type_checker(basestring) &
v.format_checker(re.compile(r'^[0-9]+$')),
}
@view_config(
route_name='user/rm-partner',
request_method=['GET'],
renderer='templates/rm-partner.html'
)
def rm_partner(request):
db = request.environ['db.session']
session = request.environ['gm_session']
if session.user is None:
return HTTPFound(request.application_url)
email = session.user.email
user = email[:email.index('@')]
err, match = v.validate_dictionary(dict(request.matchdict), rm_partner_schema)
if err:
return tvars(request, {
'TITLE' : 'remove partner for %s' % user,
'user_name' : user,
'errors': err,
})
try:
repo_controller.rm_partner(db, session.user, int(match['repo_id']))
except Exception, e:
return tvars(request, {
'TITLE' : 'remove partner for %s' % user,
'user_name' : user,
'errors': [str(e)],
})
return HTTPFound(request.route_url('user/partners'))
| timtadh/gitolite-manager | gitolite_manager/views/user.py | Python | agpl-3.0 | 7,934 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': ' Customer points based on invoice amounts',
'version': '1.0',
'category': 'Generic Modules',
'author': 'Rajkumar',
'website': 'http://www.openerp.com',
'depends': ['product','base','account'],
'init_xml': [ ],
'update_xml': ['customer_commission.xml','customer_commission_board_view.xml'],
'demo_xml': [ ],
'test': [ ],
'installable': True,
'active': False,
'description': """ Customer points are created as based on invoice amounts
using these points to reduce the invoice amount another payments"""
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| ksrajkumar/openerp-6.1 | openerp/addons/itara_customer_commission/__openerp__.py | Python | agpl-3.0 | 1,639 |
"""Implements basics of Capa, including class CapaModule."""
import cgi
import copy
import datetime
import hashlib
import json
import logging
import os
import traceback
import struct
import sys
import re
# We don't want to force a dependency on datadog, so make the import conditional
try:
import dogstats_wrapper as dog_stats_api
except ImportError:
dog_stats_api = None
from capa.capa_problem import LoncapaProblem, LoncapaSystem
from capa.responsetypes import StudentInputError, \
ResponseError, LoncapaProblemError
from capa.util import convert_files_to_filenames, get_inner_html_from_xpath
from .progress import Progress
from xmodule.exceptions import NotFoundError
from xblock.fields import Scope, String, Boolean, Dict, Integer, Float
from .fields import Timedelta, Date
from django.utils.timezone import UTC
from xmodule.capa_base_constants import RANDOMIZATION, SHOWANSWER
from django.conf import settings
log = logging.getLogger("edx.courseware")
# Make '_' a no-op so we can scrape strings. Using lambda instead of
# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
# Generate this many different variants of problems with rerandomize=per_student
NUM_RANDOMIZATION_BINS = 20
# Never produce more than this many different seeds, no matter what.
MAX_RANDOMIZATION_BINS = 1000
def randomization_bin(seed, problem_id):
"""
Pick a randomization bin for the problem given the user's seed and a problem id.
We do this because we only want e.g. 20 randomizations of a problem to make analytics
interesting. To avoid having sets of students that always get the same problems,
we'll combine the system's per-student seed with the problem id in picking the bin.
"""
r_hash = hashlib.sha1()
r_hash.update(str(seed))
r_hash.update(str(problem_id))
# get the first few digits of the hash, convert to an int, then mod.
return int(r_hash.hexdigest()[:7], 16) % NUM_RANDOMIZATION_BINS
class Randomization(String):
"""
Define a field to store how to randomize a problem.
"""
def from_json(self, value):
if value in ("", "true"):
return RANDOMIZATION.ALWAYS
elif value == "false":
return RANDOMIZATION.PER_STUDENT
return value
to_json = from_json
class ComplexEncoder(json.JSONEncoder):
"""
Extend the JSON encoder to correctly handle complex numbers
"""
def default(self, obj):
"""
Print a nicely formatted complex number, or default to the JSON encoder
"""
if isinstance(obj, complex):
return u"{real:.7g}{imag:+.7g}*j".format(real=obj.real, imag=obj.imag)
return json.JSONEncoder.default(self, obj)
class CapaFields(object):
"""
Define the possible fields for a Capa problem
"""
display_name = String(
display_name=_("Display Name"),
help=_("This name appears in the horizontal navigation at the top of the page."),
scope=Scope.settings,
# it'd be nice to have a useful default but it screws up other things; so,
# use display_name_with_default for those
default=_("Blank Advanced Problem")
)
attempts = Integer(
help=_("Number of attempts taken by the student on this problem"),
default=0,
scope=Scope.user_state)
max_attempts = Integer(
display_name=_("Maximum Attempts"),
help=_("Defines the number of times a student can try to answer this problem. "
"If the value is not set, infinite attempts are allowed."),
values={"min": 0}, scope=Scope.settings
)
due = Date(help=_("Date that this problem is due by"), scope=Scope.settings)
graceperiod = Timedelta(
help=_("Amount of time after the due date that submissions will be accepted"),
scope=Scope.settings
)
showanswer = String(
display_name=_("Show Answer"),
help=_("Defines when to show the answer to the problem. "
"A default value can be set in Advanced Settings."),
scope=Scope.settings,
default=SHOWANSWER.FINISHED,
values=[
{"display_name": _("Always"), "value": SHOWANSWER.ALWAYS},
{"display_name": _("Answered"), "value": SHOWANSWER.ANSWERED},
{"display_name": _("Attempted"), "value": SHOWANSWER.ATTEMPTED},
{"display_name": _("Closed"), "value": SHOWANSWER.CLOSED},
{"display_name": _("Finished"), "value": SHOWANSWER.FINISHED},
{"display_name": _("Correct or Past Due"), "value": SHOWANSWER.CORRECT_OR_PAST_DUE},
{"display_name": _("Past Due"), "value": SHOWANSWER.PAST_DUE},
{"display_name": _("Never"), "value": SHOWANSWER.NEVER}]
)
force_save_button = Boolean(
help=_("Whether to force the save button to appear on the page"),
scope=Scope.settings,
default=False
)
reset_key = "DEFAULT_SHOW_RESET_BUTTON"
default_reset_button = getattr(settings, reset_key) if hasattr(settings, reset_key) else False
show_reset_button = Boolean(
display_name=_("Show Reset Button"),
help=_("Determines whether a 'Reset' button is shown so the user may reset their answer. "
"A default value can be set in Advanced Settings."),
scope=Scope.settings,
default=default_reset_button
)
rerandomize = Randomization(
display_name=_("Randomization"),
help=_(
'Defines when to randomize the variables specified in the associated Python script. '
'For problems that do not randomize values, specify \"Never\". '
),
default=RANDOMIZATION.NEVER,
scope=Scope.settings,
values=[
{"display_name": _("Always"), "value": RANDOMIZATION.ALWAYS},
{"display_name": _("On Reset"), "value": RANDOMIZATION.ONRESET},
{"display_name": _("Never"), "value": RANDOMIZATION.NEVER},
{"display_name": _("Per Student"), "value": RANDOMIZATION.PER_STUDENT}
]
)
data = String(help=_("XML data for the problem"), scope=Scope.content, default="<problem></problem>")
correct_map = Dict(help=_("Dictionary with the correctness of current student answers"),
scope=Scope.user_state, default={})
input_state = Dict(help=_("Dictionary for maintaining the state of inputtypes"), scope=Scope.user_state)
student_answers = Dict(help=_("Dictionary with the current student responses"), scope=Scope.user_state)
done = Boolean(help=_("Whether the student has answered the problem"), scope=Scope.user_state)
seed = Integer(help=_("Random seed for this student"), scope=Scope.user_state)
last_submission_time = Date(help=_("Last submission time"), scope=Scope.user_state)
submission_wait_seconds = Integer(
display_name=_("Timer Between Attempts"),
help=_("Seconds a student must wait between submissions for a problem with multiple attempts."),
scope=Scope.settings,
default=0)
weight = Float(
display_name=_("Problem Weight"),
help=_("Defines the number of points each problem is worth. "
"If the value is not set, each response field in the problem is worth one point."),
values={"min": 0, "step": .1},
scope=Scope.settings
)
markdown = String(help=_("Markdown source of this module"), default=None, scope=Scope.settings)
source_code = String(
help=_("Source code for LaTeX and Word problems. This feature is not well-supported."),
scope=Scope.settings
)
text_customization = Dict(
help=_("String customization substitutions for particular locations"),
scope=Scope.settings
# TODO: someday it should be possible to not duplicate this definition here
# and in inheritance.py
)
use_latex_compiler = Boolean(
help=_("Enable LaTeX templates?"),
default=False,
scope=Scope.settings
)
matlab_api_key = String(
display_name=_("Matlab API key"),
help=_("Enter the API key provided by MathWorks for accessing the MATLAB Hosted Service. "
"This key is granted for exclusive use by this course for the specified duration. "
"Please do not share the API key with other courses and notify MathWorks immediately "
"if you believe the key is exposed or compromised. To obtain a key for your course, "
"or to report an issue, please contact moocsupport@mathworks.com"),
scope=Scope.settings
)
class CapaMixin(CapaFields):
"""
Core logic for Capa Problem, which can be used by XModules or XBlocks.
"""
def __init__(self, *args, **kwargs):
super(CapaMixin, self).__init__(*args, **kwargs)
due_date = self.due
if self.graceperiod is not None and due_date:
self.close_date = due_date + self.graceperiod
else:
self.close_date = due_date
if self.seed is None:
self.choose_new_seed()
# Need the problem location in openendedresponse to send out. Adding
# it to the system here seems like the least clunky way to get it
# there.
self.runtime.set('location', self.location.to_deprecated_string())
try:
# TODO (vshnayder): move as much as possible of this work and error
# checking to descriptor load time
self.lcp = self.new_lcp(self.get_state_for_lcp())
# At this point, we need to persist the randomization seed
# so that when the problem is re-loaded (to check/view/save)
# it stays the same.
# However, we do not want to write to the database
# every time the module is loaded.
# So we set the seed ONLY when there is not one set already
if self.seed is None:
self.seed = self.lcp.seed
except Exception as err: # pylint: disable=broad-except
msg = u'cannot create LoncapaProblem {loc}: {err}'.format(
loc=self.location.to_deprecated_string(), err=err)
# TODO (vshnayder): do modules need error handlers too?
# We shouldn't be switching on DEBUG.
if self.runtime.DEBUG:
log.warning(msg)
# TODO (vshnayder): This logic should be general, not here--and may
# want to preserve the data instead of replacing it.
# e.g. in the CMS
msg = u'<p>{msg}</p>'.format(msg=cgi.escape(msg))
msg += u'<p><pre>{tb}</pre></p>'.format(
# just the traceback, no message - it is already present above
tb=cgi.escape(
u''.join(
['Traceback (most recent call last):\n'] +
traceback.format_tb(sys.exc_info()[2])
)
)
)
# create a dummy problem with error message instead of failing
problem_text = (u'<problem><text><span class="inline-error">'
u'Problem {url} has an error:</span>{msg}</text></problem>'.format(
url=self.location.to_deprecated_string(),
msg=msg)
)
self.lcp = self.new_lcp(self.get_state_for_lcp(), text=problem_text)
else:
# add extra info and raise
raise Exception(msg), None, sys.exc_info()[2]
self.set_state_from_lcp()
assert self.seed is not None
def choose_new_seed(self):
"""
Choose a new seed.
"""
if self.rerandomize == RANDOMIZATION.NEVER:
self.seed = 1
elif self.rerandomize == RANDOMIZATION.PER_STUDENT and hasattr(self.runtime, 'seed'):
# see comment on randomization_bin
self.seed = randomization_bin(self.runtime.seed, unicode(self.location).encode('utf-8'))
else:
self.seed = struct.unpack('i', os.urandom(4))[0]
# So that sandboxed code execution can be cached, but still have an interesting
# number of possibilities, cap the number of different random seeds.
self.seed %= MAX_RANDOMIZATION_BINS
def new_lcp(self, state, text=None):
"""
Generate a new Loncapa Problem
"""
if text is None:
text = self.data
capa_system = LoncapaSystem(
ajax_url=self.runtime.ajax_url,
anonymous_student_id=self.runtime.anonymous_student_id,
cache=self.runtime.cache,
can_execute_unsafe_code=self.runtime.can_execute_unsafe_code,
get_python_lib_zip=self.runtime.get_python_lib_zip,
DEBUG=self.runtime.DEBUG,
filestore=self.runtime.filestore,
i18n=self.runtime.service(self, "i18n"),
node_path=self.runtime.node_path,
render_template=self.runtime.render_template,
seed=self.runtime.seed, # Why do we do this if we have self.seed?
STATIC_URL=self.runtime.STATIC_URL,
xqueue=self.runtime.xqueue,
matlab_api_key=self.matlab_api_key
)
return LoncapaProblem(
problem_text=text,
id=self.location.html_id(),
state=state,
seed=self.seed,
capa_system=capa_system,
capa_module=self, # njp
)
def get_state_for_lcp(self):
"""
Give a dictionary holding the state of the module
"""
return {
'done': self.done,
'correct_map': self.correct_map,
'student_answers': self.student_answers,
'input_state': self.input_state,
'seed': self.seed,
}
def set_state_from_lcp(self):
"""
Set the module's state from the settings in `self.lcp`
"""
lcp_state = self.lcp.get_state()
self.done = lcp_state['done']
self.correct_map = lcp_state['correct_map']
self.input_state = lcp_state['input_state']
self.student_answers = lcp_state['student_answers']
self.seed = lcp_state['seed']
def set_last_submission_time(self):
"""
Set the module's last submission time (when the problem was checked)
"""
self.last_submission_time = datetime.datetime.now(UTC())
def get_score(self):
"""
Access the problem's score
"""
return self.lcp.get_score()
def max_score(self):
"""
Access the problem's max score
"""
return self.lcp.get_max_score()
def get_progress(self):
"""
For now, just return score / max_score
"""
score_dict = self.get_score()
score = score_dict['score']
total = score_dict['total']
if total > 0:
if self.weight is not None:
# Progress objects expect total > 0
if self.weight == 0:
return None
# scale score and total by weight/total:
score = score * self.weight / total
total = self.weight
try:
return Progress(score, total)
except (TypeError, ValueError):
log.exception("Got bad progress")
return None
return None
def get_html(self):
"""
Return some html with data about the module
"""
progress = self.get_progress()
return self.runtime.render_template('problem_ajax.html', {
'element_id': self.location.html_id(),
'id': self.location.to_deprecated_string(),
'ajax_url': self.runtime.ajax_url,
'progress_status': Progress.to_js_status_str(progress),
'progress_detail': Progress.to_js_detail_str(progress),
'content': self.get_problem_html(encapsulate=False),
})
def check_button_name(self):
"""
Determine the name for the "check" button.
Usually it is just "Check", but if this is the student's
final attempt, change the name to "Final Check".
The text can be customized by the text_customization setting.
"""
# The logic flow is a little odd so that _('xxx') strings can be found for
# translation while also running _() just once for each string.
_ = self.runtime.service(self, "i18n").ugettext
check = _('Check')
final_check = _('Final Check')
# Apply customizations if present
if 'custom_check' in self.text_customization:
check = _(self.text_customization.get('custom_check')) # pylint: disable=translation-of-non-string
if 'custom_final_check' in self.text_customization:
final_check = _(self.text_customization.get('custom_final_check')) # pylint: disable=translation-of-non-string
# TODO: need a way to get the customized words into the list of
# words to be translated
if self.max_attempts is not None and self.attempts >= self.max_attempts - 1:
return final_check
else:
return check
def check_button_checking_name(self):
"""
Return the "checking..." text for the "check" button.
After the user presses the "check" button, the button will briefly
display the value returned by this function until a response is
received by the server.
The text can be customized by the text_customization setting.
"""
# Apply customizations if present
if 'custom_checking' in self.text_customization:
return self.text_customization.get('custom_checking')
_ = self.runtime.service(self, "i18n").ugettext
return _('Checking...')
def should_show_check_button(self):
"""
Return True/False to indicate whether to show the "Check" button.
"""
submitted_without_reset = (self.is_submitted() and self.rerandomize == RANDOMIZATION.ALWAYS)
# If the problem is closed (past due / too many attempts)
# then we do NOT show the "check" button
# Also, do not show the "check" button if we're waiting
# for the user to reset a randomized problem
if self.closed() or submitted_without_reset:
return False
else:
return True
def should_show_reset_button(self):
"""
Return True/False to indicate whether to show the "Reset" button.
"""
is_survey_question = (self.max_attempts == 0)
# If the problem is closed (and not a survey question with max_attempts==0),
# then do NOT show the reset button.
if self.closed() and not is_survey_question:
return False
# Button only shows up for randomized problems if the question has been submitted
if self.rerandomize in [RANDOMIZATION.ALWAYS, RANDOMIZATION.ONRESET] and self.is_submitted():
return True
else:
# Do NOT show the button if the problem is correct
if self.is_correct():
return False
else:
return self.show_reset_button
def should_show_save_button(self):
"""
Return True/False to indicate whether to show the "Save" button.
"""
# If the user has forced the save button to display,
# then show it as long as the problem is not closed
# (past due / too many attempts)
if self.force_save_button:
return not self.closed()
else:
is_survey_question = (self.max_attempts == 0)
needs_reset = self.is_submitted() and self.rerandomize == RANDOMIZATION.ALWAYS
# If the student has unlimited attempts, and their answers
# are not randomized, then we do not need a save button
# because they can use the "Check" button without consequences.
#
# The consequences we want to avoid are:
# * Using up an attempt (if max_attempts is set)
# * Changing the current problem, and no longer being
# able to view it (if rerandomize is "always")
#
# In those cases. the if statement below is false,
# and the save button can still be displayed.
#
if self.max_attempts is None and self.rerandomize != RANDOMIZATION.ALWAYS:
return False
# If the problem is closed (and not a survey question with max_attempts==0),
# then do NOT show the save button
# If we're waiting for the user to reset a randomized problem
# then do NOT show the save button
elif (self.closed() and not is_survey_question) or needs_reset:
return False
else:
return True
def handle_problem_html_error(self, err):
"""
Create a dummy problem to represent any errors.
Change our problem to a dummy problem containing a warning message to
display to users. Returns the HTML to show to users
`err` is the Exception encountered while rendering the problem HTML.
"""
log.exception(err.message)
# TODO (vshnayder): another switch on DEBUG.
if self.runtime.DEBUG:
msg = (
u'[courseware.capa.capa_module] <font size="+1" color="red">'
u'Failed to generate HTML for problem {url}</font>'.format(
url=cgi.escape(self.location.to_deprecated_string()))
)
msg += u'<p>Error:</p><p><pre>{msg}</pre></p>'.format(msg=cgi.escape(err.message))
msg += u'<p><pre>{tb}</pre></p>'.format(tb=cgi.escape(traceback.format_exc()))
html = msg
else:
# We're in non-debug mode, and possibly even in production. We want
# to avoid bricking of problem as much as possible
# Presumably, student submission has corrupted LoncapaProblem HTML.
# First, pull down all student answers
student_answers = self.lcp.student_answers
answer_ids = student_answers.keys()
# Some inputtypes, such as dynamath, have additional "hidden" state that
# is not exposed to the student. Keep those hidden
# TODO: Use regex, e.g. 'dynamath' is suffix at end of answer_id
hidden_state_keywords = ['dynamath']
for answer_id in answer_ids:
for hidden_state_keyword in hidden_state_keywords:
if answer_id.find(hidden_state_keyword) >= 0:
student_answers.pop(answer_id)
# Next, generate a fresh LoncapaProblem
self.lcp = self.new_lcp(None)
self.set_state_from_lcp()
# Prepend a scary warning to the student
_ = self.runtime.service(self, "i18n").ugettext
warning_msg = _("Warning: The problem has been reset to its initial state!")
warning = '<div class="capa_reset"> <h2> ' + warning_msg + '</h2>'
# Translators: Following this message, there will be a bulleted list of items.
warning_msg = _("The problem's state was corrupted by an invalid submission. The submission consisted of:")
warning += warning_msg + '<ul>'
for student_answer in student_answers.values():
if student_answer != '':
warning += '<li>' + cgi.escape(student_answer) + '</li>'
warning_msg = _('If this error persists, please contact the course staff.')
warning += '</ul>' + warning_msg + '</div>'
html = warning
try:
html += self.lcp.get_html()
except Exception:
# Couldn't do it. Give up.
log.exception("Unable to generate html from LoncapaProblem")
raise
return html
def get_demand_hint(self, hint_index):
"""
Return html for the problem.
Adds check, reset, save, and hint buttons as necessary based on the problem config
and state.
encapsulate: if True (the default) embed the html in a problem <div>
hint_index: (None is the default) if not None, this is the index of the next demand
hint to show.
"""
demand_hints = self.lcp.tree.xpath("//problem/demandhint/hint")
hint_index = hint_index % len(demand_hints)
_ = self.runtime.service(self, "i18n").ugettext
hint_element = demand_hints[hint_index]
hint_text = get_inner_html_from_xpath(hint_element)
if len(demand_hints) == 1:
prefix = _('Hint: ')
else:
# Translators: e.g. "Hint 1 of 3" meaning we are showing the first of three hints.
prefix = _('Hint ({hint_num} of {hints_count}): ').format(hint_num=hint_index + 1,
hints_count=len(demand_hints))
# Log this demand-hint request
event_info = dict()
event_info['module_id'] = self.location.to_deprecated_string()
event_info['hint_index'] = hint_index
event_info['hint_len'] = len(demand_hints)
event_info['hint_text'] = hint_text
self.runtime.publish(self, 'edx.problem.hint.demandhint_displayed', event_info)
# We report the index of this hint, the client works out what index to use to get the next hint
return {
'success': True,
'contents': prefix + hint_text,
'hint_index': hint_index
}
def get_problem_html(self, encapsulate=True):
"""
Return html for the problem.
Adds check, reset, save, and hint buttons as necessary based on the problem config
and state.
encapsulate: if True (the default) embed the html in a problem <div>
"""
try:
html = self.lcp.get_html()
# If we cannot construct the problem HTML,
# then generate an error message instead.
except Exception as err: # pylint: disable=broad-except
html = self.handle_problem_html_error(err)
html = self.remove_tags_from_html(html)
# The convention is to pass the name of the check button if we want
# to show a check button, and False otherwise This works because
# non-empty strings evaluate to True. We use the same convention
# for the "checking" state text.
if self.should_show_check_button():
check_button = self.check_button_name()
check_button_checking = self.check_button_checking_name()
else:
check_button = False
check_button_checking = False
content = {
'name': self.display_name_with_default,
'html': html,
'weight': self.weight,
}
# If demand hints are available, emit hint button and div.
demand_hints = self.lcp.tree.xpath("//problem/demandhint/hint")
demand_hint_possible = len(demand_hints) > 0
context = {
'problem': content,
'id': self.location.to_deprecated_string(),
'check_button': check_button,
'check_button_checking': check_button_checking,
'reset_button': self.should_show_reset_button(),
'save_button': self.should_show_save_button(),
'answer_available': self.answer_available(),
'attempts_used': self.attempts,
'attempts_allowed': self.max_attempts,
'demand_hint_possible': demand_hint_possible
}
html = self.runtime.render_template('problem.html', context)
if encapsulate:
html = u'<div id="problem_{id}" class="problem" data-url="{ajax_url}">'.format(
id=self.location.html_id(), ajax_url=self.runtime.ajax_url
) + html + "</div>"
# Now do all the substitutions which the LMS module_render normally does, but
# we need to do here explicitly since we can get called for our HTML via AJAX
html = self.runtime.replace_urls(html)
if self.runtime.replace_course_urls:
html = self.runtime.replace_course_urls(html)
if self.runtime.replace_jump_to_id_urls:
html = self.runtime.replace_jump_to_id_urls(html)
return html
def remove_tags_from_html(self, html):
"""
The capa xml includes many tags such as <additional_answer> or <demandhint> which are not
meant to be part of the client html. We strip them all and return the resulting html.
"""
tags = ['demandhint', 'choicehint', 'optionhint', 'stringhint', 'numerichint', 'optionhint',
'correcthint', 'regexphint', 'additional_answer', 'stringequalhint', 'compoundhint',
'stringequalhint']
for tag in tags:
html = re.sub(r'<%s.*?>.*?</%s>' % (tag, tag), '', html, flags=re.DOTALL)
# Some of these tags span multiple lines
# Note: could probably speed this up by calling sub() once with a big regex
# vs. simply calling sub() many times as we have here.
return html
def hint_button(self, data):
"""
Hint button handler, returns new html using hint_index from the client.
"""
hint_index = int(data['hint_index'])
return self.get_demand_hint(hint_index)
def is_past_due(self):
"""
Is it now past this problem's due date, including grace period?
"""
return (self.close_date is not None and
datetime.datetime.now(UTC()) > self.close_date)
def closed(self):
"""
Is the student still allowed to submit answers?
"""
if self.max_attempts is not None and self.attempts >= self.max_attempts:
return True
if self.is_past_due():
return True
return False
def is_submitted(self):
"""
Used to decide to show or hide RESET or CHECK buttons.
Means that student submitted problem and nothing more.
Problem can be completely wrong.
Pressing RESET button makes this function to return False.
"""
# used by conditional module
return self.lcp.done
def is_attempted(self):
"""
Has the problem been attempted?
used by conditional module
"""
return self.attempts > 0
def is_correct(self):
"""
True iff full points
"""
score_dict = self.get_score()
return score_dict['score'] == score_dict['total']
def answer_available(self):
"""
Is the user allowed to see an answer?
"""
if self.showanswer == '':
return False
elif self.showanswer == SHOWANSWER.NEVER:
return False
elif self.runtime.user_is_staff:
# This is after the 'never' check because admins can see the answer
# unless the problem explicitly prevents it
return True
elif self.showanswer == SHOWANSWER.ATTEMPTED:
return self.attempts > 0
elif self.showanswer == SHOWANSWER.ANSWERED:
# NOTE: this is slightly different from 'attempted' -- resetting the problems
# makes lcp.done False, but leaves attempts unchanged.
return self.lcp.done
elif self.showanswer == SHOWANSWER.CLOSED:
return self.closed()
elif self.showanswer == SHOWANSWER.FINISHED:
return self.closed() or self.is_correct()
elif self.showanswer == SHOWANSWER.CORRECT_OR_PAST_DUE:
return self.is_correct() or self.is_past_due()
elif self.showanswer == SHOWANSWER.PAST_DUE:
return self.is_past_due()
elif self.showanswer == SHOWANSWER.ALWAYS:
return True
return False
def update_score(self, data):
"""
Delivers grading response (e.g. from asynchronous code checking) to
the capa problem, so its score can be updated
'data' must have a key 'response' which is a string that contains the
grader's response
No ajax return is needed. Return empty dict.
"""
queuekey = data['queuekey']
score_msg = data['xqueue_body']
self.lcp.update_score(score_msg, queuekey)
self.set_state_from_lcp()
self.publish_grade()
return dict() # No AJAX return is needed
def handle_ungraded_response(self, data):
"""
Delivers a response from the XQueue to the capa problem
The score of the problem will not be updated
Args:
- data (dict) must contain keys:
queuekey - a key specific to this response
xqueue_body - the body of the response
Returns:
empty dictionary
No ajax return is needed, so an empty dict is returned
"""
queuekey = data['queuekey']
score_msg = data['xqueue_body']
# pass along the xqueue message to the problem
self.lcp.ungraded_response(score_msg, queuekey)
self.set_state_from_lcp()
return dict()
def handle_input_ajax(self, data):
"""
Handle ajax calls meant for a particular input in the problem
Args:
- data (dict) - data that should be passed to the input
Returns:
- dict containing the response from the input
"""
response = self.lcp.handle_input_ajax(data)
# save any state changes that may occur
self.set_state_from_lcp()
return response
def get_answer(self, _data):
"""
For the "show answer" button.
Returns the answers: {'answers' : answers}
"""
event_info = dict()
event_info['problem_id'] = self.location.to_deprecated_string()
self.track_function_unmask('showanswer', event_info)
if not self.answer_available():
raise NotFoundError('Answer is not available')
else:
answers = self.lcp.get_question_answers()
self.set_state_from_lcp()
# answers (eg <solution>) may have embedded images
# but be careful, some problems are using non-string answer dicts
new_answers = dict()
for answer_id in answers:
try:
answer_content = self.runtime.replace_urls(answers[answer_id])
if self.runtime.replace_jump_to_id_urls:
answer_content = self.runtime.replace_jump_to_id_urls(answer_content)
new_answer = {answer_id: answer_content}
except TypeError:
log.debug(u'Unable to perform URL substitution on answers[%s]: %s',
answer_id, answers[answer_id])
new_answer = {answer_id: answers[answer_id]}
new_answers.update(new_answer)
return {'answers': new_answers}
# Figure out if we should move these to capa_problem?
def get_problem(self, _data):
"""
Return results of get_problem_html, as a simple dict for json-ing.
{ 'html': <the-html> }
Used if we want to reconfirm we have the right thing e.g. after
several AJAX calls.
"""
return {'html': self.get_problem_html(encapsulate=False)}
@staticmethod
def make_dict_of_responses(data):
"""
Make dictionary of student responses (aka "answers")
`data` is POST dictionary (webob.multidict.MultiDict).
The `data` dict has keys of the form 'x_y', which are mapped
to key 'y' in the returned dict. For example,
'input_1_2_3' would be mapped to '1_2_3' in the returned dict.
Some inputs always expect a list in the returned dict
(e.g. checkbox inputs). The convention is that
keys in the `data` dict that end with '[]' will always
have list values in the returned dict.
For example, if the `data` dict contains {'input_1[]': 'test' }
then the output dict would contain {'1': ['test'] }
(the value is a list).
Some other inputs such as ChoiceTextInput expect a dict of values in the returned
dict If the key ends with '{}' then we will assume that the value is a json
encoded dict and deserialize it.
For example, if the `data` dict contains {'input_1{}': '{"1_2_1": 1}'}
then the output dict would contain {'1': {"1_2_1": 1} }
(the value is a dictionary)
Raises an exception if:
-A key in the `data` dictionary does not contain at least one underscore
(e.g. "input" is invalid, but "input_1" is valid)
-Two keys end up with the same name in the returned dict.
(e.g. 'input_1' and 'input_1[]', which both get mapped to 'input_1'
in the returned dict)
"""
answers = dict()
# webob.multidict.MultiDict is a view of a list of tuples,
# so it will return a multi-value key once for each value.
# We only want to consider each key a single time, so we use set(data.keys())
for key in set(data.keys()):
# e.g. input_resistor_1 ==> resistor_1
_, _, name = key.partition('_')
# If key has no underscores, then partition
# will return (key, '', '')
# We detect this and raise an error
if not name:
raise ValueError(u"{key} must contain at least one underscore".format(key=key))
else:
# This allows for answers which require more than one value for
# the same form input (e.g. checkbox inputs). The convention is that
# if the name ends with '[]' (which looks like an array), then the
# answer will be an array.
# if the name ends with '{}' (Which looks like a dict),
# then the answer will be a dict
is_list_key = name.endswith('[]')
is_dict_key = name.endswith('{}')
name = name[:-2] if is_list_key or is_dict_key else name
if is_list_key:
val = data.getall(key)
elif is_dict_key:
try:
val = json.loads(data[key])
# If the submission wasn't deserializable, raise an error.
except(KeyError, ValueError):
raise ValueError(
u"Invalid submission: {val} for {key}".format(val=data[key], key=key)
)
else:
val = data[key]
# If the name already exists, then we don't want
# to override it. Raise an error instead
if name in answers:
raise ValueError(u"Key {name} already exists in answers dict".format(name=name))
else:
answers[name] = val
return answers
def publish_grade(self):
"""
Publishes the student's current grade to the system as an event
"""
score = self.lcp.get_score()
self.runtime.publish(
self,
'grade',
{
'value': score['score'],
'max_value': score['total'],
}
)
return {'grade': score['score'], 'max_grade': score['total']}
# pylint: disable=too-many-statements
def check_problem(self, data, override_time=False):
"""
Checks whether answers to a problem are correct
Returns a map of correct/incorrect answers:
{'success' : 'correct' | 'incorrect' | AJAX alert msg string,
'contents' : html}
"""
event_info = dict()
event_info['state'] = self.lcp.get_state()
event_info['problem_id'] = self.location.to_deprecated_string()
answers = self.make_dict_of_responses(data)
answers_without_files = convert_files_to_filenames(answers)
event_info['answers'] = answers_without_files
metric_name = u'capa.check_problem.{}'.format
# Can override current time
current_time = datetime.datetime.now(UTC())
if override_time is not False:
current_time = override_time
_ = self.runtime.service(self, "i18n").ugettext
# Too late. Cannot submit
if self.closed():
event_info['failure'] = 'closed'
self.track_function_unmask('problem_check_fail', event_info)
if dog_stats_api:
dog_stats_api.increment(metric_name('checks'), tags=[u'result:failed', u'failure:closed'])
raise NotFoundError(_("Problem is closed."))
# Problem submitted. Student should reset before checking again
if self.done and self.rerandomize == RANDOMIZATION.ALWAYS:
event_info['failure'] = 'unreset'
self.track_function_unmask('problem_check_fail', event_info)
if dog_stats_api:
dog_stats_api.increment(metric_name('checks'), tags=[u'result:failed', u'failure:unreset'])
raise NotFoundError(_("Problem must be reset before it can be checked again."))
# Problem queued. Students must wait a specified waittime before they are allowed to submit
# IDEA: consider stealing code from below: pretty-print of seconds, cueing of time remaining
if self.lcp.is_queued():
prev_submit_time = self.lcp.get_recentmost_queuetime()
waittime_between_requests = self.runtime.xqueue['waittime']
if (current_time - prev_submit_time).total_seconds() < waittime_between_requests:
msg = _(u"You must wait at least {wait} seconds between submissions.").format(
wait=waittime_between_requests)
return {'success': msg, 'html': ''}
# Wait time between resets: check if is too soon for submission.
if self.last_submission_time is not None and self.submission_wait_seconds != 0:
if (current_time - self.last_submission_time).total_seconds() < self.submission_wait_seconds:
remaining_secs = int(self.submission_wait_seconds - (current_time - self.last_submission_time).total_seconds())
msg = _(u'You must wait at least {wait_secs} between submissions. {remaining_secs} remaining.').format(
wait_secs=self.pretty_print_seconds(self.submission_wait_seconds),
remaining_secs=self.pretty_print_seconds(remaining_secs))
return {
'success': msg,
'html': ''
}
try:
correct_map = self.lcp.grade_answers(answers)
self.attempts = self.attempts + 1
self.lcp.done = True
self.set_state_from_lcp()
self.set_last_submission_time()
except (StudentInputError, ResponseError, LoncapaProblemError) as inst:
if self.runtime.DEBUG:
log.warning(
"StudentInputError in capa_module:problem_check",
exc_info=True
)
# Save the user's state before failing
self.set_state_from_lcp()
# If the user is a staff member, include
# the full exception, including traceback,
# in the response
if self.runtime.user_is_staff:
msg = u"Staff debug info: {tb}".format(tb=cgi.escape(traceback.format_exc()))
# Otherwise, display just an error message,
# without a stack trace
else:
# Translators: {msg} will be replaced with a problem's error message.
msg = _(u"Error: {msg}").format(msg=inst.message)
return {'success': msg}
except Exception as err:
# Save the user's state before failing
self.set_state_from_lcp()
if self.runtime.DEBUG:
msg = u"Error checking problem: {}".format(err.message)
msg += u'\nTraceback:\n{}'.format(traceback.format_exc())
return {'success': msg}
raise
published_grade = self.publish_grade()
# success = correct if ALL questions in this problem are correct
success = 'correct'
for answer_id in correct_map:
if not correct_map.is_correct(answer_id):
success = 'incorrect'
# NOTE: We are logging both full grading and queued-grading submissions. In the latter,
# 'success' will always be incorrect
event_info['grade'] = published_grade['grade']
event_info['max_grade'] = published_grade['max_grade']
event_info['correct_map'] = correct_map.get_dict()
event_info['success'] = success
event_info['attempts'] = self.attempts
event_info['submission'] = self.get_submission_metadata_safe(answers_without_files, correct_map)
self.track_function_unmask('problem_check', event_info)
if dog_stats_api:
dog_stats_api.increment(metric_name('checks'), tags=[u'result:success'])
if published_grade['max_grade'] != 0:
dog_stats_api.histogram(
metric_name('correct_pct'),
float(published_grade['grade']) / published_grade['max_grade'],
)
dog_stats_api.histogram(
metric_name('attempts'),
self.attempts,
)
# render problem into HTML
html = self.get_problem_html(encapsulate=False)
return {
'success': success,
'contents': html
}
# pylint: enable=too-many-statements
def track_function_unmask(self, title, event_info):
"""
All calls to runtime.track_function route through here so that the
choice names can be unmasked.
"""
# Do the unmask translates on a copy of event_info,
# avoiding problems where an event_info is unmasked twice.
event_unmasked = copy.deepcopy(event_info)
self.unmask_event(event_unmasked)
self.runtime.publish(self, title, event_unmasked)
def unmask_event(self, event_info):
"""
Translates in-place the event_info to account for masking
and adds information about permutation options in force.
"""
# answers is like: {u'i4x-Stanford-CS99-problem-dada976e76f34c24bc8415039dee1300_2_1': u'mask_0'}
# Each response values has an answer_id which matches the key in answers.
for response in self.lcp.responders.values():
# Un-mask choice names in event_info for masked responses.
if response.has_mask():
# We don't assume much about the structure of event_info,
# but check for the existence of the things we need to un-mask.
# Look for answers/id
answer = event_info.get('answers', {}).get(response.answer_id)
if answer is not None:
event_info['answers'][response.answer_id] = response.unmask_name(answer)
# Look for state/student_answers/id
answer = event_info.get('state', {}).get('student_answers', {}).get(response.answer_id)
if answer is not None:
event_info['state']['student_answers'][response.answer_id] = response.unmask_name(answer)
# Look for old_state/student_answers/id -- parallel to the above case, happens on reset
answer = event_info.get('old_state', {}).get('student_answers', {}).get(response.answer_id)
if answer is not None:
event_info['old_state']['student_answers'][response.answer_id] = response.unmask_name(answer)
# Add 'permutation' to event_info for permuted responses.
permutation_option = None
if response.has_shuffle():
permutation_option = 'shuffle'
elif response.has_answerpool():
permutation_option = 'answerpool'
if permutation_option is not None:
# Add permutation record tuple: (one of:'shuffle'/'answerpool', [as-displayed list])
if 'permutation' not in event_info:
event_info['permutation'] = {}
event_info['permutation'][response.answer_id] = (permutation_option, response.unmask_order())
def pretty_print_seconds(self, num_seconds):
"""
Returns time duration nicely formated, e.g. "3 minutes 4 seconds"
"""
# Here _ is the N variant ungettext that does pluralization with a 3-arg call
ungettext = self.runtime.service(self, "i18n").ungettext
hours = num_seconds // 3600
sub_hour = num_seconds % 3600
minutes = sub_hour // 60
seconds = sub_hour % 60
display = ""
if hours > 0:
display += ungettext("{num_hour} hour", "{num_hour} hours", hours).format(num_hour=hours)
if minutes > 0:
if display != "":
display += " "
# translators: "minute" refers to a minute of time
display += ungettext("{num_minute} minute", "{num_minute} minutes", minutes).format(num_minute=minutes)
# Taking care to make "0 seconds" instead of "" for 0 time
if seconds > 0 or (hours == 0 and minutes == 0):
if display != "":
display += " "
# translators: "second" refers to a second of time
display += ungettext("{num_second} second", "{num_second} seconds", seconds).format(num_second=seconds)
return display
def get_submission_metadata_safe(self, answers, correct_map):
"""
Ensures that no exceptions are thrown while generating input metadata summaries. Returns the
summary if it is successfully created, otherwise an empty dictionary.
"""
try:
return self.get_submission_metadata(answers, correct_map)
except Exception: # pylint: disable=broad-except
# NOTE: The above process requires deep inspection of capa structures that may break for some
# uncommon problem types. Ensure that it does not prevent answer submission in those
# cases. Any occurrences of errors in this block should be investigated and resolved.
log.exception('Unable to gather submission metadata, it will not be included in the event.')
return {}
def get_submission_metadata(self, answers, correct_map):
"""
Return a map of inputs to their corresponding summarized metadata.
Returns:
A map whose keys are a unique identifier for the input (in this case a capa input_id) and
whose values are:
question (str): Is the prompt that was presented to the student. It corresponds to the
label of the input.
answer (mixed): Is the answer the student provided. This may be a rich structure,
however it must be json serializable.
response_type (str): The XML tag of the capa response type.
input_type (str): The XML tag of the capa input type.
correct (bool): Whether or not the provided answer is correct. Will be an empty
string if correctness could not be determined.
variant (str): In some cases the same question can have several different variants.
This string should uniquely identify the variant of the question that was answered.
In the capa context this corresponds to the `seed`.
This function attempts to be very conservative and make very few assumptions about the structure
of the problem. If problem related metadata cannot be located it should be replaced with empty
strings ''.
"""
input_metadata = {}
for input_id, internal_answer in answers.iteritems():
answer_input = self.lcp.inputs.get(input_id)
if answer_input is None:
log.warning('Input id %s is not mapped to an input type.', input_id)
answer_response = None
for response, responder in self.lcp.responders.iteritems():
if input_id in responder.answer_ids:
answer_response = responder
if answer_response is None:
log.warning('Answer responder could not be found for input_id %s.', input_id)
user_visible_answer = internal_answer
if hasattr(answer_input, 'get_user_visible_answer'):
user_visible_answer = answer_input.get_user_visible_answer(internal_answer)
# If this problem has rerandomize enabled, then it will generate N variants of the
# question, one per unique seed value. In this case we would like to know which
# variant was selected. Ideally it would be nice to have the exact question that
# was presented to the user, with values interpolated etc, but that can be done
# later if necessary.
variant = ''
if self.rerandomize != RANDOMIZATION.NEVER:
variant = self.seed
is_correct = correct_map.is_correct(input_id)
if is_correct is None:
is_correct = ''
input_metadata[input_id] = {
'question': answer_input.response_data.get('label', ''),
'answer': user_visible_answer,
'response_type': getattr(getattr(answer_response, 'xml', None), 'tag', ''),
'input_type': getattr(answer_input, 'tag', ''),
'correct': is_correct,
'variant': variant,
}
# Add group_label in event data only if the responsetype contains multiple inputtypes
if answer_input.response_data.get('group_label'):
input_metadata[input_id]['group_label'] = answer_input.response_data.get('group_label')
return input_metadata
def rescore_problem(self):
"""
Checks whether the existing answers to a problem are correct.
This is called when the correct answer to a problem has been changed,
and the grade should be re-evaluated.
Returns a dict with one key:
{'success' : 'correct' | 'incorrect' | AJAX alert msg string }
Raises NotFoundError if called on a problem that has not yet been
answered, or NotImplementedError if it's a problem that cannot be rescored.
Returns the error messages for exceptions occurring while performing
the rescoring, rather than throwing them.
"""
event_info = {'state': self.lcp.get_state(), 'problem_id': self.location.to_deprecated_string()}
_ = self.runtime.service(self, "i18n").ugettext
if not self.lcp.supports_rescoring():
event_info['failure'] = 'unsupported'
self.track_function_unmask('problem_rescore_fail', event_info)
# Translators: 'rescoring' refers to the act of re-submitting a student's solution so it can get a new score.
raise NotImplementedError(_("Problem's definition does not support rescoring."))
if not self.done:
event_info['failure'] = 'unanswered'
self.track_function_unmask('problem_rescore_fail', event_info)
raise NotFoundError(_("Problem must be answered before it can be graded again."))
# get old score, for comparison:
orig_score = self.lcp.get_score()
event_info['orig_score'] = orig_score['score']
event_info['orig_total'] = orig_score['total']
try:
correct_map = self.lcp.rescore_existing_answers()
except (StudentInputError, ResponseError, LoncapaProblemError) as inst:
log.warning("Input error in capa_module:problem_rescore", exc_info=True)
event_info['failure'] = 'input_error'
self.track_function_unmask('problem_rescore_fail', event_info)
return {'success': u"Error: {0}".format(inst.message)}
except Exception as err:
event_info['failure'] = 'unexpected'
self.track_function_unmask('problem_rescore_fail', event_info)
if self.runtime.DEBUG:
msg = u"Error checking problem: {0}".format(err.message)
msg += u'\nTraceback:\n' + traceback.format_exc()
return {'success': msg}
raise
# rescoring should have no effect on attempts, so don't
# need to increment here, or mark done. Just save.
self.set_state_from_lcp()
self.publish_grade()
new_score = self.lcp.get_score()
event_info['new_score'] = new_score['score']
event_info['new_total'] = new_score['total']
# success = correct if ALL questions in this problem are correct
success = 'correct'
for answer_id in correct_map:
if not correct_map.is_correct(answer_id):
success = 'incorrect'
# NOTE: We are logging both full grading and queued-grading submissions. In the latter,
# 'success' will always be incorrect
event_info['correct_map'] = correct_map.get_dict()
event_info['success'] = success
event_info['attempts'] = self.attempts
self.track_function_unmask('problem_rescore', event_info)
return {'success': success}
def save_problem(self, data):
"""
Save the passed in answers.
Returns a dict { 'success' : bool, 'msg' : message }
The message is informative on success, and an error message on failure.
"""
event_info = dict()
event_info['state'] = self.lcp.get_state()
event_info['problem_id'] = self.location.to_deprecated_string()
answers = self.make_dict_of_responses(data)
event_info['answers'] = answers
_ = self.runtime.service(self, "i18n").ugettext
# Too late. Cannot submit
if self.closed() and not self.max_attempts == 0:
event_info['failure'] = 'closed'
self.track_function_unmask('save_problem_fail', event_info)
return {
'success': False,
# Translators: 'closed' means the problem's due date has passed. You may no longer attempt to solve the problem.
'msg': _("Problem is closed.")
}
# Problem submitted. Student should reset before saving
# again.
if self.done and self.rerandomize == RANDOMIZATION.ALWAYS:
event_info['failure'] = 'done'
self.track_function_unmask('save_problem_fail', event_info)
return {
'success': False,
'msg': _("Problem needs to be reset prior to save.")
}
self.lcp.student_answers = answers
self.set_state_from_lcp()
self.track_function_unmask('save_problem_success', event_info)
msg = _("Your answers have been saved.")
if not self.max_attempts == 0:
msg = _(
"Your answers have been saved but not graded. Click '{button_name}' to grade them."
).format(button_name=self.check_button_name())
return {
'success': True,
'msg': msg,
'html': self.get_problem_html(encapsulate=False),
}
def reset_problem(self, _data):
"""
Changes problem state to unfinished -- removes student answers,
Causes problem to rerender itself if randomization is enabled.
Returns a dictionary of the form:
{'success': True/False,
'html': Problem HTML string }
If an error occurs, the dictionary will also have an
`error` key containing an error message.
"""
event_info = dict()
event_info['old_state'] = self.lcp.get_state()
event_info['problem_id'] = self.location.to_deprecated_string()
_ = self.runtime.service(self, "i18n").ugettext
if self.closed():
event_info['failure'] = 'closed'
self.track_function_unmask('reset_problem_fail', event_info)
return {
'success': False,
# Translators: 'closed' means the problem's due date has passed. You may no longer attempt to solve the problem.
'error': _("Problem is closed."),
}
if not self.is_submitted():
event_info['failure'] = 'not_done'
self.track_function_unmask('reset_problem_fail', event_info)
return {
'success': False,
# Translators: A student must "make an attempt" to solve the problem on the page before they can reset it.
'error': _("Refresh the page and make an attempt before resetting."),
}
if self.is_submitted() and self.rerandomize in [RANDOMIZATION.ALWAYS, RANDOMIZATION.ONRESET]:
# Reset random number generator seed.
self.choose_new_seed()
# Generate a new problem with either the previous seed or a new seed
self.lcp = self.new_lcp(None)
# Pull in the new problem seed
self.set_state_from_lcp()
# Grade may have changed, so publish new value
self.publish_grade()
event_info['new_state'] = self.lcp.get_state()
self.track_function_unmask('reset_problem', event_info)
return {
'success': True,
'html': self.get_problem_html(encapsulate=False),
}
| chrisndodge/edx-platform | common/lib/xmodule/xmodule/capa_base.py | Python | agpl-3.0 | 62,222 |
from django.core.management.base import BaseCommand
from django.utils import timezone
import logging
import time
from borme.models import Config
from borme.parser.importer import import_borme_download
# from borme.parser.postgres import psql_update_documents
import borme.parser.importer
from libreborme.utils import get_git_revision_short_hash
class Command(BaseCommand):
# args = '<ISO formatted date (ex. 2015-01-01 or --init)> [--local]'
help = 'Import BORMEs from date'
def add_arguments(self, parser):
parser.add_argument(
'-f', '--from',
nargs=1, required=True,
help='ISO formatted date (ex. 2015-01-01) or "init"')
parser.add_argument(
'-t', '--to',
nargs=1, required=True,
help='ISO formatted date (ex. 2016-01-01) or "today"')
parser.add_argument(
'--local-only',
action='store_true',
default=False,
help='Do not download any file')
parser.add_argument(
'--no-missing',
action='store_true',
default=False,
help='Abort if local file is not found')
# json only, pdf only...
def handle(self, *args, **options):
self.set_verbosity(int(options['verbosity']))
start_time = time.time()
import_borme_download(options['from'][0],
options['to'][0],
local_only=options['local_only'],
no_missing=options['no_missing'])
config = Config.objects.first()
if config:
config.last_modified = timezone.now()
else:
config = Config(last_modified=timezone.now())
config.version = get_git_revision_short_hash()
config.save()
# Update Full Text Search
# psql_update_documents()
# Elapsed time
elapsed_time = time.time() - start_time
print('\nElapsed time: %.2f seconds' % elapsed_time)
def set_verbosity(self, verbosity):
if verbosity == 0:
borme.parser.importer.logger.setLevel(logging.ERROR)
elif verbosity == 1: # default
borme.parser.importer.logger.setLevel(logging.INFO)
elif verbosity == 2:
borme.parser.importer.logger.setLevel(logging.INFO)
elif verbosity > 2:
borme.parser.importer.logger.setLevel(logging.DEBUG)
logging.getLogger().setLevel(logging.DEBUG)
| PabloCastellano/libreborme | borme/management/commands/importborme.py | Python | agpl-3.0 | 2,559 |
# This file is part of Wolnelektury, licensed under GNU Affero GPLv3 or later.
# Copyright © Fundacja Nowoczesna Polska. See NOTICE for more information.
#
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='InfoPage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('main_page', models.IntegerField(null=True, verbose_name='main page priority', blank=True)),
('slug', models.SlugField(unique=True, max_length=120, verbose_name='Slug')),
('title', models.CharField(max_length=120, verbose_name='Title', blank=True)),
('title_de', models.CharField(max_length=120, null=True, verbose_name='Title', blank=True)),
('title_en', models.CharField(max_length=120, null=True, verbose_name='Title', blank=True)),
('title_es', models.CharField(max_length=120, null=True, verbose_name='Title', blank=True)),
('title_fr', models.CharField(max_length=120, null=True, verbose_name='Title', blank=True)),
('title_it', models.CharField(max_length=120, null=True, verbose_name='Title', blank=True)),
('title_lt', models.CharField(max_length=120, null=True, verbose_name='Title', blank=True)),
('title_pl', models.CharField(max_length=120, null=True, verbose_name='Title', blank=True)),
('title_ru', models.CharField(max_length=120, null=True, verbose_name='Title', blank=True)),
('title_uk', models.CharField(max_length=120, null=True, verbose_name='Title', blank=True)),
('left_column', models.TextField(verbose_name='left column', blank=True)),
('left_column_de', models.TextField(null=True, verbose_name='left column', blank=True)),
('left_column_en', models.TextField(null=True, verbose_name='left column', blank=True)),
('left_column_es', models.TextField(null=True, verbose_name='left column', blank=True)),
('left_column_fr', models.TextField(null=True, verbose_name='left column', blank=True)),
('left_column_it', models.TextField(null=True, verbose_name='left column', blank=True)),
('left_column_lt', models.TextField(null=True, verbose_name='left column', blank=True)),
('left_column_pl', models.TextField(null=True, verbose_name='left column', blank=True)),
('left_column_ru', models.TextField(null=True, verbose_name='left column', blank=True)),
('left_column_uk', models.TextField(null=True, verbose_name='left column', blank=True)),
('right_column', models.TextField(verbose_name='right column', blank=True)),
('right_column_de', models.TextField(null=True, verbose_name='right column', blank=True)),
('right_column_en', models.TextField(null=True, verbose_name='right column', blank=True)),
('right_column_es', models.TextField(null=True, verbose_name='right column', blank=True)),
('right_column_fr', models.TextField(null=True, verbose_name='right column', blank=True)),
('right_column_it', models.TextField(null=True, verbose_name='right column', blank=True)),
('right_column_lt', models.TextField(null=True, verbose_name='right column', blank=True)),
('right_column_pl', models.TextField(null=True, verbose_name='right column', blank=True)),
('right_column_ru', models.TextField(null=True, verbose_name='right column', blank=True)),
('right_column_uk', models.TextField(null=True, verbose_name='right column', blank=True)),
],
options={
'ordering': ('main_page', 'slug'),
'verbose_name': 'info page',
'verbose_name_plural': 'info pages',
},
bases=(models.Model,),
),
]
| fnp/wolnelektury | src/infopages/migrations/0001_initial.py | Python | agpl-3.0 | 4,109 |
"""
This module is essentially a broker to xmodule/tabs.py -- it was originally introduced to
perform some LMS-specific tab display gymnastics for the Entrance Exams feature
"""
from django.conf import settings
from django.utils.translation import ugettext as _, ugettext_noop
from courseware.access import has_access
from courseware.entrance_exams import user_must_complete_entrance_exam
from student.models import UserProfile
from openedx.core.lib.course_tabs import CourseTabPluginManager
from student.models import CourseEnrollment
from xmodule.tabs import CourseTab, CourseTabList, key_checker
from xmodule.tabs import StaticTab
class EnrolledTab(CourseTab):
"""
A base class for any view types that require a user to be enrolled.
"""
@classmethod
def is_enabled(cls, course, user=None):
if user is None:
return True
return bool(CourseEnrollment.is_enrolled(user, course.id) or has_access(user, 'staff', course, course.id))
class CoursewareTab(EnrolledTab):
"""
The main courseware view.
"""
type = 'courseware'
title = ugettext_noop('Courseware')
priority = 10
view_name = 'courseware'
is_movable = False
is_default = False
is_visible_to_sneak_peek = True
class CourseInfoTab(CourseTab):
"""
The course info view.
"""
type = 'course_info'
title = ugettext_noop('Course Info')
priority = 20
view_name = 'info'
tab_id = 'info'
is_movable = False
is_default = False
is_visible_to_sneak_peek = True
@classmethod
def is_enabled(cls, course, user=None):
return True
class SyllabusTab(EnrolledTab):
"""
A tab for the course syllabus.
"""
type = 'syllabus'
title = ugettext_noop('Syllabus')
priority = 30
view_name = 'syllabus'
allow_multiple = True
is_default = False
is_visible_to_sneak_peek = True
@classmethod
def is_enabled(cls, course, user=None):
if not super(SyllabusTab, cls).is_enabled(course, user=user):
return False
return getattr(course, 'syllabus_present', False)
class ProgressTab(EnrolledTab):
"""
The course progress view.
"""
type = 'progress'
title = ugettext_noop('Progress')
priority = 40
view_name = 'progress'
is_hideable = True
is_default = False
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument
if not super(ProgressTab, cls).is_enabled(course, user=user):
return False
return not course.hide_progress_tab
class TextbookTabsBase(CourseTab):
"""
Abstract class for textbook collection tabs classes.
"""
# Translators: 'Textbooks' refers to the tab in the course that leads to the course' textbooks
title = ugettext_noop("Textbooks")
is_collection = True
is_default = False
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument
return user is None or user.is_authenticated()
@classmethod
def items(cls, course):
"""
A generator for iterating through all the SingleTextbookTab book objects associated with this
collection of textbooks.
"""
raise NotImplementedError()
class TextbookTabs(TextbookTabsBase):
"""
A tab representing the collection of all textbook tabs.
"""
type = 'textbooks'
priority = None
view_name = 'book'
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument
parent_is_enabled = super(TextbookTabs, cls).is_enabled(course, user)
return settings.FEATURES.get('ENABLE_TEXTBOOK') and parent_is_enabled
@classmethod
def items(cls, course):
for index, textbook in enumerate(course.textbooks):
yield SingleTextbookTab(
name=textbook.title,
tab_id='textbook/{0}'.format(index),
view_name=cls.view_name,
index=index
)
class PDFTextbookTabs(TextbookTabsBase):
"""
A tab representing the collection of all PDF textbook tabs.
"""
type = 'pdf_textbooks'
priority = None
view_name = 'pdf_book'
@classmethod
def items(cls, course):
for index, textbook in enumerate(course.pdf_textbooks):
yield SingleTextbookTab(
name=textbook['tab_title'],
tab_id='pdftextbook/{0}'.format(index),
view_name=cls.view_name,
index=index
)
class HtmlTextbookTabs(TextbookTabsBase):
"""
A tab representing the collection of all Html textbook tabs.
"""
type = 'html_textbooks'
priority = None
view_name = 'html_book'
@classmethod
def items(cls, course):
for index, textbook in enumerate(course.html_textbooks):
yield SingleTextbookTab(
name=textbook['tab_title'],
tab_id='htmltextbook/{0}'.format(index),
view_name=cls.view_name,
index=index
)
class LinkTab(CourseTab):
"""
Abstract class for tabs that contain external links.
"""
link_value = ''
def __init__(self, tab_dict=None, name=None, link=None):
self.link_value = tab_dict['link'] if tab_dict else link
def link_value_func(_course, _reverse_func):
""" Returns the link_value as the link. """
return self.link_value
self.type = tab_dict['type']
tab_dict['link_func'] = link_value_func
super(LinkTab, self).__init__(tab_dict)
def __getitem__(self, key):
if key == 'link':
return self.link_value
else:
return super(LinkTab, self).__getitem__(key)
def __setitem__(self, key, value):
if key == 'link':
self.link_value = value
else:
super(LinkTab, self).__setitem__(key, value)
def to_json(self):
to_json_val = super(LinkTab, self).to_json()
to_json_val.update({'link': self.link_value})
return to_json_val
def __eq__(self, other):
if not super(LinkTab, self).__eq__(other):
return False
return self.link_value == other.get('link')
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument
return True
class ExternalDiscussionCourseTab(LinkTab):
"""
A course tab that links to an external discussion service.
"""
type = 'external_discussion'
# Translators: 'Discussion' refers to the tab in the courseware that leads to the discussion forums
title = ugettext_noop('Discussion')
priority = None
is_default = False
@classmethod
def validate(cls, tab_dict, raise_error=True):
""" Validate that the tab_dict for this course tab has the necessary information to render. """
return (super(ExternalDiscussionCourseTab, cls).validate(tab_dict, raise_error) and
key_checker(['link'])(tab_dict, raise_error))
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument
if not super(ExternalDiscussionCourseTab, cls).is_enabled(course, user=user):
return False
return course.discussion_link
class ExternalLinkCourseTab(LinkTab):
"""
A course tab containing an external link.
"""
type = 'external_link'
priority = None
is_default = False # An external link tab is not added to a course by default
allow_multiple = True
@classmethod
def validate(cls, tab_dict, raise_error=True):
""" Validate that the tab_dict for this course tab has the necessary information to render. """
return (super(ExternalLinkCourseTab, cls).validate(tab_dict, raise_error) and
key_checker(['link', 'name'])(tab_dict, raise_error))
class SingleTextbookTab(CourseTab):
"""
A tab representing a single textbook. It is created temporarily when enumerating all textbooks within a
Textbook collection tab. It should not be serialized or persisted.
"""
type = 'single_textbook'
is_movable = False
is_collection_item = True
priority = None
def __init__(self, name, tab_id, view_name, index):
def link_func(course, reverse_func, index=index):
""" Constructs a link for textbooks from a view name, a course, and an index. """
return reverse_func(view_name, args=[unicode(course.id), index])
tab_dict = dict()
tab_dict['name'] = name
tab_dict['tab_id'] = tab_id
tab_dict['link_func'] = link_func
super(SingleTextbookTab, self).__init__(tab_dict)
def to_json(self):
raise NotImplementedError('SingleTextbookTab should not be serialized.')
def get_course_tab_list(request, course):
"""
Retrieves the course tab list from xmodule.tabs and manipulates the set as necessary
"""
user = request.user
is_user_enrolled = user.is_authenticated() and CourseEnrollment.is_enrolled(user, course.id)
xmodule_tab_list = CourseTabList.iterate_displayable(
course,
user=user,
settings=settings,
is_user_authenticated=user.is_authenticated(),
is_user_staff=has_access(user, 'staff', course, course.id),
is_user_enrolled=is_user_enrolled,
is_user_sneakpeek=not UserProfile.has_registered(user),
)
# Now that we've loaded the tabs for this course, perform the Entrance Exam work.
# If the user has to take an entrance exam, we'll need to hide away all but the
# "Courseware" tab. The tab is then renamed as "Entrance Exam".
course_tab_list = []
for tab in xmodule_tab_list:
if user_must_complete_entrance_exam(request, user, course):
# Hide all of the tabs except for 'Courseware'
# Rename 'Courseware' tab to 'Entrance Exam'
if tab.type is not 'courseware':
continue
tab.name = _("Entrance Exam")
course_tab_list.append(tab)
# Add in any dynamic tabs, i.e. those that are not persisted
course_tab_list += _get_dynamic_tabs(course, user)
return course_tab_list
def _get_dynamic_tabs(course, user):
"""
Returns the dynamic tab types for the current user.
Note: dynamic tabs are those that are not persisted in the course, but are
instead added dynamically based upon the user's role.
"""
dynamic_tabs = list()
for tab_type in CourseTabPluginManager.get_tab_types():
if getattr(tab_type, "is_dynamic", False):
tab = tab_type(dict())
if tab.is_enabled(course, user=user):
dynamic_tabs.append(tab)
dynamic_tabs.sort(key=lambda dynamic_tab: dynamic_tab.name)
return dynamic_tabs
| jbassen/edx-platform | lms/djangoapps/courseware/tabs.py | Python | agpl-3.0 | 10,813 |
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import sys
import erppeek
import shutil
import parameters # Micronaet: configuration file
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
# -----------------------------------------------------------------------------
# Parameters:
# -----------------------------------------------------------------------------
# ODOO connection:
odoo_server = parameters.odoo_server
odoo_port = parameters.odoo_port
odoo_user = parameters.odoo_user
odoo_password = parameters.odoo_password
odoo_database = parameters.odoo_database
# Dropbox:
demo = parameters.demo
samba_path = parameters.samba_path
dropbox_path = parameters.dropbox_path
print '''
Setup parameters:
ODOO: Connection: %s:%s DB %s utente: %s
Demo: %s
Samba folders: %s
Dropbox path: %s
''' % (
odoo_server,
odoo_port,
odoo_database,
odoo_user,
demo,
samba_path,
dropbox_path,
)
# -----------------------------------------------------------------------------
# UTILITY:
# -----------------------------------------------------------------------------
def get_modify_date(fullname):
''' Return modify date for file
'''
modify_date = datetime.fromtimestamp(
os.stat(fullname).st_mtime).strftime('%Y-%m-%d')
return modify_date
# -----------------------------------------------------------------------------
# ODOO operation:
# -----------------------------------------------------------------------------
odoo = erppeek.Client(
'http://%s:%s' % (
odoo_server, odoo_port),
db=odoo_database,
user=odoo_user,
password=odoo_password,
)
# Pool used:
product_pool = odoo.model('product.product.web.server')
product_ids = product_pool.search([
('connector_id.wordpress', '=', True),
])
# Check elements:
#error = [] # Error database
#warning = [] # Warning database
#info = [] # Info database
#log = [] # Log database
#log_sym = [] # Log database for symlinks
#product_odoo = {}
# Only if new file (check how):
dropbox_root_path = os.path.expanduser(dropbox_path)
samba_root_path = os.path.expanduser(samba_path)
# -----------------------------------------------------------------------------
# Save current files (Dropbox folder):
# -----------------------------------------------------------------------------
current_files = []
for root, folders, files in os.walk(dropbox_root_path):
for f in files:
current_files.append(
os.path.join(root, f)
break # only first folder!
# -----------------------------------------------------------------------------
# Logg on all product image selected:
# -----------------------------------------------------------------------------
for product in product_pool.browse(product_ids):
for image in product.image_ids:
image_id = image.id
code = image.album_id.code
samba_relative_path = image.album_id.path # TODO dropbox_path
filename = product.filename
origin = os.path.(samba_relative_path, filename)
destination = os.path.(dropbox_root_path, '%s.%s' % (code, filename))
if destination in current_files:
current_files.remove(destination)
# Create symlink:
try:
os.symlink(origin, destination)
log_sym.append('CREATO: origin: %s destination: %s' % (
origin, destination))
except:
log_sym.append('ERRORE: origin: %s destination: %s' % (
origin, destination))
# Find dropbox link:
# Save dropbox link:
os.system('chmod 777 "%s" -R' % dropbox_path)
for filename in current_files:
os.rm(filename)
# file_modify = get_modify_date(fullname)
# os.system('mkdir -p "%s"' % product_folder)
print 'End operation'
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| Micronaet/micronaet-script | DropboxWebsite/dropbox.py | Python | agpl-3.0 | 4,978 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cases', '0018_auto_20170418_1220'),
]
operations = [
migrations.AlterField(
model_name='case',
name='type',
field=models.ForeignKey(related_name='cases', to='cases.CaseType'),
),
]
| HelloLily/hellolily | lily/cases/migrations/0019_auto_20170418_1243.py | Python | agpl-3.0 | 425 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Freshermeat - An open source software directory and release tracker.
# Copyright (C) 2017-2020 Cédric Bonhomme - https://www.cedricbonhomme.org
#
# For more information: https://sr.ht/~cedric/freshermeat
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from freshermeat.bootstrap import manager
from freshermeat.models import Language
from freshermeat.web.views.api.v1.common import url_prefix
blueprint_language = manager.create_api_blueprint(
Language, url_prefix=url_prefix, methods=["GET"]
)
| cedricbonhomme/services | freshermeat/web/views/api/v1/language.py | Python | agpl-3.0 | 1,163 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import os
from dateutil.relativedelta import relativedelta
from datetime import datetime, date
import click
from werkzeug.security import generate_password_hash
import newspipe.models
from newspipe.bootstrap import application, db
from newspipe.controllers import UserController, ArticleController
logger = logging.getLogger("commands")
@application.cli.command("db_empty")
def db_empty():
"Will drop every datas stocked in db."
with application.app_context():
newspipe.models.db_empty(db)
@application.cli.command("db_create")
def db_create():
"Will create the database from conf parameters."
with application.app_context():
try:
db.create_all()
except Exception as e:
print(e)
@application.cli.command("create_admin")
@click.option("--nickname", default="admin", help="Nickname")
@click.option("--password", default="password", help="Password")
def create_admin(nickname, password):
"Will create an admin user."
admin = {
"is_admin": True,
"is_api": True,
"is_active": True,
"nickname": nickname,
"pwdhash": generate_password_hash(password),
}
with application.app_context():
try:
UserController(ignore_context=True).create(**admin)
except Exception as e:
print(e)
@application.cli.command("delete_user")
@click.option("--user-id", required=True, help="Id of the user to delete.")
def delete_user(user_id=None):
"Delete the user with the id specified in the command line."
try:
user = UserController().delete(user_id)
print("User {} deleted".format(user.nickname))
except Exception as e:
print(e)
@application.cli.command("delete_inactive_users")
@click.option("--last-seen", default=6, help="Number of months since last seen.")
def delete_inactive_users(last_seen):
"Delete inactive users (inactivity is given in parameter and specified in number of months)."
filter = {}
filter["last_seen__lt"] = date.today() - relativedelta(months=last_seen)
users = UserController().read(**filter)
for user in users:
db.session.delete(user)
try:
print("Deleting user {}...".format(user.nickname))
db.session.commit()
except:
db.session.rollback()
print("Inactive users deleted.")
@application.cli.command("disable_inactive_users")
@click.option("--last-seen", default=6, help="Number of months since last seen.")
def disable_inactive_users(last_seen):
"Disable inactive users (inactivity is given in parameter and specified in number of months)."
filter = {}
filter["last_seen__lt"] = date.today() - relativedelta(months=last_seen)
users = UserController().read(**filter)
for user in users:
user.is_active = False
user.is_public_profile = False
user.automatic_crawling = False
try:
print("Updating user {}...".format(user.nickname))
db.session.commit()
except:
db.session.rollback()
print("Inactive users disabled.")
@application.cli.command("delete_read_articles")
def delete_read_articles():
"Delete read articles (and not liked) retrieved since more than 60 days ago."
filter = {}
filter["user_id__ne"] = 1
filter["readed"] = True
filter["like"] = False
filter["retrieved_date__lt"] = date.today() - relativedelta(days=60)
articles = ArticleController().read(**filter).limit(5000)
for article in articles:
try:
db.session.delete(article)
db.session.commit()
except:
db.session.rollback()
print("Read articles deleted.")
@application.cli.command("fix_article_entry_id")
def fix_article_entry_id():
filter = {}
filter["entry_id"] = None
articles = ArticleController().read(**filter).limit(50)
for article in articles:
try:
article.entry_id = str(article.id)
db.session.commit()
except:
db.session.rollback()
@application.cli.command("fetch_asyncio")
@click.option("--user-id", default=None, help="Id of the user")
@click.option("--feed-id", default=None, help="If of the feed")
def fetch_asyncio(user_id=None, feed_id=None):
"Crawl the feeds with asyncio."
import asyncio
with application.app_context():
from newspipe.crawler import default_crawler
filters = {}
filters["is_active"] = True
filters["automatic_crawling"] = True
if None is not user_id:
filters["id"] = user_id
users = UserController().read(**filters).all()
try:
feed_id = int(feed_id)
except:
feed_id = None
loop = asyncio.get_event_loop()
queue = asyncio.Queue(maxsize=3, loop=loop)
producer_coro = default_crawler.retrieve_feed(queue, users, feed_id)
consumer_coro = default_crawler.insert_articles(queue, 1)
logger.info("Starting crawler.")
start = datetime.now()
loop.run_until_complete(asyncio.gather(producer_coro, consumer_coro))
end = datetime.now()
loop.close()
logger.info("Crawler finished in {} seconds.".format((end - start).seconds))
| JARR-aggregator/JARR | newspipe/commands.py | Python | agpl-3.0 | 5,322 |
#
# File: capa/capa_problem.py
#
# Nomenclature:
#
# A capa Problem is a collection of text and capa Response questions.
# Each Response may have one or more Input entry fields.
# The capa problem may include a solution.
#
"""
Main module which shows problems (of "capa" type).
This is used by capa_module.
"""
from datetime import datetime
import logging
import os.path
import re
from lxml import etree
from xml.sax.saxutils import unescape
from copy import deepcopy
from capa.correctmap import CorrectMap
import capa.inputtypes as inputtypes
import capa.customrender as customrender
import capa.responsetypes as responsetypes
from capa.util import contextualize_text, convert_files_to_filenames
import capa.xqueue_interface as xqueue_interface
from capa.safe_exec import safe_exec
from pytz import UTC
# extra things displayed after "show answers" is pressed
solution_tags = ['solution']
# these get captured as student responses
response_properties = ["codeparam", "responseparam", "answer", "openendedparam"]
# special problem tags which should be turned into innocuous HTML
html_transforms = {
'problem': {'tag': 'div'},
'text': {'tag': 'span'},
'math': {'tag': 'span'},
}
# These should be removed from HTML output, including all subelements
html_problem_semantics = [
"codeparam",
"responseparam",
"answer",
"script",
"hintgroup",
"openendedparam",
"openendedrubric"
]
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# main class for this module
class LoncapaSystem(object):
"""
An encapsulation of resources needed from the outside.
These interfaces are collected here so that a caller of LoncapaProblem
can provide these resources however make sense for their environment, and
this code can remain independent.
Attributes:
i18n: an object implementing the `gettext.Translations` interface so
that we can use `.ugettext` to localize strings.
See :class:`ModuleSystem` for documentation of other attributes.
"""
def __init__( # pylint: disable=invalid-name
self,
ajax_url,
anonymous_student_id,
cache,
can_execute_unsafe_code,
DEBUG, # pylint: disable=invalid-name
filestore,
i18n,
node_path,
render_template,
seed, # Why do we do this if we have self.seed?
STATIC_URL, # pylint: disable=invalid-name
xqueue,
):
self.ajax_url = ajax_url
self.anonymous_student_id = anonymous_student_id
self.cache = cache
self.can_execute_unsafe_code = can_execute_unsafe_code
self.DEBUG = DEBUG # pylint: disable=invalid-name
self.filestore = filestore
self.i18n = i18n
self.node_path = node_path
self.render_template = render_template
self.seed = seed # Why do we do this if we have self.seed?
self.STATIC_URL = STATIC_URL # pylint: disable=invalid-name
self.xqueue = xqueue
class LoncapaProblem(object):
"""
Main class for capa Problems.
"""
def __init__(self, problem_text, id, capa_system, state=None, seed=None):
"""
Initializes capa Problem.
Arguments:
problem_text (string): xml defining the problem.
id (string): identifier for this problem, often a filename (no spaces).
capa_system (LoncapaSystem): LoncapaSystem instance which provides OS,
rendering, user context, and other resources.
state (dict): containing the following keys:
- `seed` (int) random number generator seed
- `student_answers` (dict) maps input id to the stored answer for that input
- `correct_map` (CorrectMap) a map of each input to their 'correctness'
- `done` (bool) indicates whether or not this problem is considered done
- `input_state` (dict) maps input_id to a dictionary that holds the state for that input
seed (int): random number generator seed.
"""
## Initialize class variables from state
self.do_reset()
self.problem_id = id
self.capa_system = capa_system
state = state or {}
# Set seed according to the following priority:
# 1. Contained in problem's state
# 2. Passed into capa_problem via constructor
self.seed = state.get('seed', seed)
assert self.seed is not None, "Seed must be provided for LoncapaProblem."
self.student_answers = state.get('student_answers', {})
if 'correct_map' in state:
self.correct_map.set_dict(state['correct_map'])
self.done = state.get('done', False)
self.input_state = state.get('input_state', {})
# Convert startouttext and endouttext to proper <text></text>
problem_text = re.sub(r"startouttext\s*/", "text", problem_text)
problem_text = re.sub(r"endouttext\s*/", "/text", problem_text)
self.problem_text = problem_text
# parse problem XML file into an element tree
self.tree = etree.XML(problem_text)
# handle any <include file="foo"> tags
self._process_includes()
# construct script processor context (eg for customresponse problems)
self.context = self._extract_context(self.tree)
# Pre-parse the XML tree: modifies it to add ID's and perform some in-place
# transformations. This also creates the dict (self.responders) of Response
# instances for each question in the problem. The dict has keys = xml subtree of
# Response, values = Response instance
self._preprocess_problem(self.tree)
if not self.student_answers: # True when student_answers is an empty dict
self.set_initial_display()
# dictionary of InputType objects associated with this problem
# input_id string -> InputType object
self.inputs = {}
self.extracted_tree = self._extract_html(self.tree)
def do_reset(self):
"""
Reset internal state to unfinished, with no answers
"""
self.student_answers = dict()
self.correct_map = CorrectMap()
self.done = False
def set_initial_display(self):
"""
Set the student's answers to the responders' initial displays, if specified.
"""
initial_answers = dict()
for responder in self.responders.values():
if hasattr(responder, 'get_initial_display'):
initial_answers.update(responder.get_initial_display())
self.student_answers = initial_answers
def __unicode__(self):
return u"LoncapaProblem ({0})".format(self.problem_id)
def get_state(self):
"""
Stored per-user session data neeeded to:
1) Recreate the problem
2) Populate any student answers.
"""
return {'seed': self.seed,
'student_answers': self.student_answers,
'correct_map': self.correct_map.get_dict(),
'input_state': self.input_state,
'done': self.done}
def get_max_score(self):
"""
Return the maximum score for this problem.
"""
maxscore = 0
for responder in self.responders.values():
maxscore += responder.get_max_score()
return maxscore
def get_score(self):
"""
Compute score for this problem. The score is the number of points awarded.
Returns a dictionary {'score': integer, from 0 to get_max_score(),
'total': get_max_score()}.
"""
correct = 0
for key in self.correct_map:
try:
correct += self.correct_map.get_npoints(key)
except Exception:
log.error('key=%s, correct_map = %s', key, self.correct_map)
raise
if (not self.student_answers) or len(self.student_answers) == 0:
return {'score': 0,
'total': self.get_max_score()}
else:
return {'score': correct,
'total': self.get_max_score()}
def update_score(self, score_msg, queuekey):
"""
Deliver grading response (e.g. from async code checking) to
the specific ResponseType that requested grading
Returns an updated CorrectMap
"""
cmap = CorrectMap()
cmap.update(self.correct_map)
for responder in self.responders.values():
if hasattr(responder, 'update_score'):
# Each LoncapaResponse will update its specific entries in cmap
# cmap is passed by reference
responder.update_score(score_msg, cmap, queuekey)
self.correct_map.set_dict(cmap.get_dict())
return cmap
def ungraded_response(self, xqueue_msg, queuekey):
"""
Handle any responses from the xqueue that do not contain grades
Will try to pass the queue message to all inputtypes that can handle ungraded responses
Does not return any value
"""
# check against each inputtype
for the_input in self.inputs.values():
# if the input type has an ungraded function, pass in the values
if hasattr(the_input, 'ungraded_response'):
the_input.ungraded_response(xqueue_msg, queuekey)
def is_queued(self):
"""
Returns True if any part of the problem has been submitted to an external queue
(e.g. for grading.)
"""
return any(self.correct_map.is_queued(answer_id) for answer_id in self.correct_map)
def get_recentmost_queuetime(self):
"""
Returns a DateTime object that represents the timestamp of the most recent
queueing request, or None if not queued
"""
if not self.is_queued():
return None
# Get a list of timestamps of all queueing requests, then convert it to a DateTime object
queuetime_strs = [
self.correct_map.get_queuetime_str(answer_id)
for answer_id in self.correct_map
if self.correct_map.is_queued(answer_id)
]
queuetimes = [
datetime.strptime(qt_str, xqueue_interface.dateformat).replace(tzinfo=UTC)
for qt_str in queuetime_strs
]
return max(queuetimes)
def grade_answers(self, answers):
"""
Grade student responses. Called by capa_module.check_problem.
`answers` is a dict of all the entries from request.POST, but with the first part
of each key removed (the string before the first "_").
Thus, for example, input_ID123 -> ID123, and input_fromjs_ID123 -> fromjs_ID123
Calls the Response for each question in this problem, to do the actual grading.
"""
# if answers include File objects, convert them to filenames.
self.student_answers = convert_files_to_filenames(answers)
return self._grade_answers(answers)
def supports_rescoring(self):
"""
Checks that the current problem definition permits rescoring.
More precisely, it checks that there are no response types in
the current problem that are not fully supported (yet) for rescoring.
This includes responsetypes for which the student's answer
is not properly stored in state, i.e. file submissions. At present,
we have no way to know if an existing response was actually a real
answer or merely the filename of a file submitted as an answer.
It turns out that because rescoring is a background task, limiting
it to responsetypes that don't support file submissions also means
that the responsetypes are synchronous. This is convenient as it
permits rescoring to be complete when the rescoring call returns.
"""
return all('filesubmission' not in responder.allowed_inputfields for responder in self.responders.values())
def rescore_existing_answers(self):
"""
Rescore student responses. Called by capa_module.rescore_problem.
"""
return self._grade_answers(None)
def _grade_answers(self, student_answers):
"""
Internal grading call used for checking new 'student_answers' and also
rescoring existing student_answers.
For new student_answers being graded, `student_answers` is a dict of all the
entries from request.POST, but with the first part of each key removed
(the string before the first "_"). Thus, for example,
input_ID123 -> ID123, and input_fromjs_ID123 -> fromjs_ID123.
For rescoring, `student_answers` is None.
Calls the Response for each question in this problem, to do the actual grading.
"""
# old CorrectMap
oldcmap = self.correct_map
# start new with empty CorrectMap
newcmap = CorrectMap()
# Call each responsetype instance to do actual grading
for responder in self.responders.values():
# File objects are passed only if responsetype explicitly allows
# for file submissions. But we have no way of knowing if
# student_answers contains a proper answer or the filename of
# an earlier submission, so for now skip these entirely.
# TODO: figure out where to get file submissions when rescoring.
if 'filesubmission' in responder.allowed_inputfields and student_answers is None:
_ = self.capa_system.i18n.ugettext
raise Exception(_(u"Cannot rescore problems with possible file submissions"))
# use 'student_answers' only if it is provided, and if it might contain a file
# submission that would not exist in the persisted "student_answers".
if 'filesubmission' in responder.allowed_inputfields and student_answers is not None:
results = responder.evaluate_answers(student_answers, oldcmap)
else:
results = responder.evaluate_answers(self.student_answers, oldcmap)
newcmap.update(results)
self.correct_map = newcmap
return newcmap
def get_question_answers(self):
"""
Returns a dict of answer_ids to answer values. If we cannot generate
an answer (this sometimes happens in customresponses), that answer_id is
not included. Called by "show answers" button JSON request
(see capa_module)
"""
# dict of (id, correct_answer)
answer_map = dict()
for response in self.responders.keys():
results = self.responder_answers[response]
answer_map.update(results)
# include solutions from <solution>...</solution> stanzas
for entry in self.tree.xpath("//" + "|//".join(solution_tags)):
answer = etree.tostring(entry)
if answer:
answer_map[entry.get('id')] = contextualize_text(answer, self.context)
log.debug('answer_map = %s', answer_map)
return answer_map
def get_answer_ids(self):
"""
Return the IDs of all the responses -- these are the keys used for
the dicts returned by grade_answers and get_question_answers. (Though
get_question_answers may only return a subset of these.
"""
answer_ids = []
for response in self.responders.keys():
results = self.responder_answers[response]
answer_ids.append(results.keys())
return answer_ids
def get_html(self):
"""
Main method called externally to get the HTML to be rendered for this capa Problem.
"""
html = contextualize_text(etree.tostring(self._extract_html(self.tree)), self.context)
return html
def handle_input_ajax(self, data):
"""
InputTypes can support specialized AJAX calls. Find the correct input and pass along the correct data
Also, parse out the dispatch from the get so that it can be passed onto the input type nicely
"""
# pull out the id
input_id = data['input_id']
if self.inputs[input_id]:
dispatch = data['dispatch']
return self.inputs[input_id].handle_ajax(dispatch, data)
else:
log.warning("Could not find matching input for id: %s", input_id)
return {}
# ======= Private Methods Below ========
def _process_includes(self):
"""
Handle any <include file="foo"> tags by reading in the specified file and inserting it
into our XML tree. Fail gracefully if debugging.
"""
includes = self.tree.findall('.//include')
for inc in includes:
filename = inc.get('file')
if filename is not None:
try:
# open using LoncapaSystem OSFS filestore
ifp = self.capa_system.filestore.open(filename)
except Exception as err:
log.warning(
'Error %s in problem xml include: %s',
err,
etree.tostring(inc, pretty_print=True)
)
log.warning(
'Cannot find file %s in %s', filename, self.capa_system.filestore
)
# if debugging, don't fail - just log error
# TODO (vshnayder): need real error handling, display to users
if not self.capa_system.DEBUG:
raise
else:
continue
try:
# read in and convert to XML
incxml = etree.XML(ifp.read())
except Exception as err:
log.warning(
'Error %s in problem xml include: %s',
err,
etree.tostring(inc, pretty_print=True)
)
log.warning('Cannot parse XML in %s', (filename))
# if debugging, don't fail - just log error
# TODO (vshnayder): same as above
if not self.capa_system.DEBUG:
raise
else:
continue
# insert new XML into tree in place of include
parent = inc.getparent()
parent.insert(parent.index(inc), incxml)
parent.remove(inc)
log.debug('Included %s into %s' % (filename, self.problem_id))
def _extract_system_path(self, script):
"""
Extracts and normalizes additional paths for code execution.
For now, there's a default path of data/course/code; this may be removed
at some point.
script : ?? (TODO)
"""
DEFAULT_PATH = ['code']
# Separate paths by :, like the system path.
raw_path = script.get('system_path', '').split(":") + DEFAULT_PATH
# find additional comma-separated modules search path
path = []
for dir in raw_path:
if not dir:
continue
# path is an absolute path or a path relative to the data dir
dir = os.path.join(self.capa_system.filestore.root_path, dir)
# Check that we are within the filestore tree.
reldir = os.path.relpath(dir, self.capa_system.filestore.root_path)
if ".." in reldir:
log.warning("Ignoring Python directory outside of course: %r", dir)
continue
abs_dir = os.path.normpath(dir)
path.append(abs_dir)
return path
def _extract_context(self, tree):
"""
Extract content of <script>...</script> from the problem.xml file, and exec it in the
context of this problem. Provides ability to randomize problems, and also set
variables for problem answer checking.
Problem XML goes to Python execution context. Runs everything in script tags.
"""
context = {}
context['seed'] = self.seed
all_code = ''
python_path = []
for script in tree.findall('.//script'):
stype = script.get('type')
if stype:
if 'javascript' in stype:
continue # skip javascript
if 'perl' in stype:
continue # skip perl
# TODO: evaluate only python
for d in self._extract_system_path(script):
if d not in python_path and os.path.exists(d):
python_path.append(d)
XMLESC = {"'": "'", """: '"'}
code = unescape(script.text, XMLESC)
all_code += code
if all_code:
try:
safe_exec(
all_code,
context,
random_seed=self.seed,
python_path=python_path,
cache=self.capa_system.cache,
slug=self.problem_id,
unsafely=self.capa_system.can_execute_unsafe_code(),
)
except Exception as err:
log.exception("Error while execing script code: " + all_code)
msg = "Error while executing script code: %s" % str(err).replace('<', '<')
raise responsetypes.LoncapaProblemError(msg)
# Store code source in context, along with the Python path needed to run it correctly.
context['script_code'] = all_code
context['python_path'] = python_path
return context
def _extract_html(self, problemtree): # private
"""
Main (private) function which converts Problem XML tree to HTML.
Calls itself recursively.
Returns Element tree of XHTML representation of problemtree.
Calls render_html of Response instances to render responses into XHTML.
Used by get_html.
"""
if not isinstance(problemtree.tag, basestring):
# Comment and ProcessingInstruction nodes are not Elements,
# and we're ok leaving those behind.
# BTW: etree gives us no good way to distinguish these things
# other than to examine .tag to see if it's a string. :(
return
if (problemtree.tag == 'script' and problemtree.get('type')
and 'javascript' in problemtree.get('type')):
# leave javascript intact.
return deepcopy(problemtree)
if problemtree.tag in html_problem_semantics:
return
problemid = problemtree.get('id') # my ID
if problemtree.tag in inputtypes.registry.registered_tags():
# If this is an inputtype subtree, let it render itself.
status = "unsubmitted"
msg = ''
hint = ''
hintmode = None
input_id = problemtree.get('id')
if problemid in self.correct_map:
pid = input_id
status = self.correct_map.get_correctness(pid)
msg = self.correct_map.get_msg(pid)
hint = self.correct_map.get_hint(pid)
hintmode = self.correct_map.get_hintmode(pid)
value = ""
if self.student_answers and problemid in self.student_answers:
value = self.student_answers[problemid]
if input_id not in self.input_state:
self.input_state[input_id] = {}
# do the rendering
state = {
'value': value,
'status': status,
'id': input_id,
'input_state': self.input_state[input_id],
'feedback': {
'message': msg,
'hint': hint,
'hintmode': hintmode,
}
}
input_type_cls = inputtypes.registry.get_class_for_tag(problemtree.tag)
# save the input type so that we can make ajax calls on it if we need to
self.inputs[input_id] = input_type_cls(self.capa_system, problemtree, state)
return self.inputs[input_id].get_html()
# let each Response render itself
if problemtree in self.responders:
overall_msg = self.correct_map.get_overall_message()
return self.responders[problemtree].render_html(
self._extract_html, response_msg=overall_msg
)
# let each custom renderer render itself:
if problemtree.tag in customrender.registry.registered_tags():
renderer_class = customrender.registry.get_class_for_tag(problemtree.tag)
renderer = renderer_class(self.capa_system, problemtree)
return renderer.get_html()
# otherwise, render children recursively, and copy over attributes
tree = etree.Element(problemtree.tag)
for item in problemtree:
item_xhtml = self._extract_html(item)
if item_xhtml is not None:
tree.append(item_xhtml)
if tree.tag in html_transforms:
tree.tag = html_transforms[problemtree.tag]['tag']
else:
# copy attributes over if not innocufying
for (key, value) in problemtree.items():
tree.set(key, value)
tree.text = problemtree.text
tree.tail = problemtree.tail
return tree
def _preprocess_problem(self, tree): # private
"""
Assign IDs to all the responses
Assign sub-IDs to all entries (textline, schematic, etc.)
Annoted correctness and value
In-place transformation
Also create capa Response instances for each responsetype and save as self.responders
Obtain all responder answers and save as self.responder_answers dict (key = response)
"""
response_id = 1
self.responders = {}
for response in tree.xpath('//' + "|//".join(responsetypes.registry.registered_tags())):
response_id_str = self.problem_id + "_" + str(response_id)
# create and save ID for this response
response.set('id', response_id_str)
response_id += 1
answer_id = 1
input_tags = inputtypes.registry.registered_tags()
inputfields = tree.xpath(
"|".join(['//' + response.tag + '[@id=$id]//' + x for x in (input_tags + solution_tags)]),
id=response_id_str
)
# assign one answer_id for each input type or solution type
for entry in inputfields:
entry.attrib['response_id'] = str(response_id)
entry.attrib['answer_id'] = str(answer_id)
entry.attrib['id'] = "%s_%i_%i" % (self.problem_id, response_id, answer_id)
answer_id = answer_id + 1
# instantiate capa Response
responsetype_cls = responsetypes.registry.get_class_for_tag(response.tag)
responder = responsetype_cls(response, inputfields, self.context, self.capa_system)
# save in list in self
self.responders[response] = responder
# get responder answers (do this only once, since there may be a performance cost,
# eg with externalresponse)
self.responder_answers = {}
for response in self.responders.keys():
try:
self.responder_answers[response] = self.responders[response].get_answers()
except:
log.debug('responder %s failed to properly return get_answers()',
self.responders[response]) # FIXME
raise
# <solution>...</solution> may not be associated with any specific response; give
# IDs for those separately
# TODO: We should make the namespaces consistent and unique (e.g. %s_problem_%i).
solution_id = 1
for solution in tree.findall('.//solution'):
solution.attrib['id'] = "%s_solution_%i" % (self.problem_id, solution_id)
solution_id += 1
| pku9104038/edx-platform | common/lib/capa/capa/capa_problem.py | Python | agpl-3.0 | 28,574 |
import json
from c2corg_api.models.route import Route
from c2corg_api.models.topo_map import ArchiveTopoMap, TopoMap, MAP_TYPE
from c2corg_api.models.topo_map_association import TopoMapAssociation
from c2corg_api.models.waypoint import Waypoint
from c2corg_api.tests.search import reset_search_index
from c2corg_api.models.common.attributes import quality_types
from shapely.geometry import shape, Polygon
from c2corg_api.models.document import (
DocumentGeometry, ArchiveDocumentLocale, DocumentLocale)
from c2corg_api.views.document import DocumentRest
from c2corg_api.tests.views import BaseDocumentTestRest
class TestTopoMapRest(BaseDocumentTestRest):
def setUp(self): # noqa
self.set_prefix_and_model(
"/maps", MAP_TYPE, TopoMap, ArchiveTopoMap, ArchiveDocumentLocale)
BaseDocumentTestRest.setUp(self)
self._add_test_data()
def test_get_collection(self):
body = self.get_collection()
doc = body['documents'][0]
self.assertNotIn('geometry', doc)
def test_get_collection_paginated(self):
self.app.get("/maps?offset=invalid", status=400)
self.assertResultsEqual(
self.get_collection({'offset': 0, 'limit': 0}), [], 4)
self.assertResultsEqual(
self.get_collection({'offset': 0, 'limit': 1}),
[self.map4.document_id], 4)
self.assertResultsEqual(
self.get_collection({'offset': 0, 'limit': 2}),
[self.map4.document_id, self.map3.document_id], 4)
self.assertResultsEqual(
self.get_collection({'offset': 1, 'limit': 2}),
[self.map3.document_id, self.map2.document_id], 4)
def test_get_collection_lang(self):
self.get_collection_lang()
def test_get_collection_search(self):
reset_search_index(self.session)
self.assertResultsEqual(
self.get_collection_search({'l': 'en'}),
[self.map4.document_id, self.map1.document_id], 2)
def test_get(self):
body = self.get(self.map1)
self._assert_geometry(body)
self.assertNotIn('maps', body)
def test_get_cooked(self):
self.get_cooked(self.map1)
def test_get_cooked_with_defaulting(self):
self.get_cooked_with_defaulting(self.map1)
def test_get_lang(self):
self.get_lang(self.map1)
def test_get_new_lang(self):
self.get_new_lang(self.map1)
def test_get_404(self):
self.get_404()
def test_get_caching(self):
self.get_caching(self.map1)
def test_get_info(self):
body, locale = self.get_info(self.map1, 'en')
self.assertEqual(locale.get('lang'), 'en')
def test_post_not_moderator(self):
headers = self.add_authorization_header(username='contributor')
self.app_post_json(
self._prefix, {}, headers=headers,
expect_errors=True, status=403)
def test_post_error(self):
body = self.post_error({}, user='moderator')
errors = body.get('errors')
self.assertEqual(len(errors), 2)
self.assertCorniceRequired(errors[0], 'locales')
self.assertCorniceRequired(errors[1], 'geometry')
def test_post_missing_title(self):
body_post = {
'editor': 'IGN',
'scale': '25000',
'code': '3432OT',
'geometry': {
'id': 5678, 'version': 6789,
'geom_detail': '{"type":"Polygon","coordinates":[[[668519.249382151,5728802.39591739],[668518.249382151,5745465.66808356],[689156.247019149,5745465.66808356],[689156.247019149,5728802.39591739],[668519.249382151,5728802.39591739]]]}' # noqa
},
'locales': [
{'lang': 'en'}
]
}
self.post_missing_title(body_post, user='moderator')
def test_post_non_whitelisted_attribute(self):
body = {
'editor': 'IGN',
'scale': '25000',
'code': '3432OT',
'protected': True,
'geometry': {
'id': 5678, 'version': 6789,
'geom_detail': '{"type":"Polygon","coordinates":[[[668519.249382151,5728802.39591739],[668518.249382151,5745465.66808356],[689156.247019149,5745465.66808356],[689156.247019149,5728802.39591739],[668519.249382151,5728802.39591739]]]}' # noqa
},
'locales': [
{'lang': 'en', 'title': 'Lac d\'Annecy'}
]
}
self.post_non_whitelisted_attribute(body, user='moderator')
def test_post_missing_content_type(self):
self.post_missing_content_type({})
def test_post_success(self):
body = {
'editor': 'IGN',
'scale': '25000',
'code': '3432OT',
'geometry': {
'id': 5678, 'version': 6789,
'geom_detail': '{"type":"Polygon","coordinates":[[[668518.249382151,5728802.39591739],[668518.249382151,5745465.66808356],[689156.247019149,5745465.66808356],[689156.247019149,5728802.39591739],[668518.249382151,5728802.39591739]]]}' # noqa
},
'locales': [
{'lang': 'en', 'title': 'Lac d\'Annecy'}
]
}
body, doc = self.post_success(body, user='moderator')
self.assertIsNotNone(body['geometry'].get('geom_detail'))
version = doc.versions[0]
archive_map = version.document_archive
self.assertEqual(archive_map.editor, 'IGN')
self.assertEqual(archive_map.scale, '25000')
self.assertEqual(archive_map.code, '3432OT')
archive_locale = version.document_locales_archive
self.assertEqual(archive_locale.lang, 'en')
self.assertEqual(archive_locale.title, 'Lac d\'Annecy')
archive_geometry = version.document_geometry_archive
self.assertEqual(archive_geometry.version, doc.geometry.version)
self.assertIsNotNone(archive_geometry.geom_detail)
self.assertIsNotNone(archive_geometry.geom_detail)
# check that a link for intersecting documents is created
links = self.session.query(TopoMapAssociation). \
filter(
TopoMapAssociation.topo_map_id == doc.document_id). \
order_by(TopoMapAssociation.document_id). \
all()
self.assertEqual(len(links), 2)
self.assertEqual(links[0].document_id, self.waypoint1.document_id)
self.check_cache_version(self.waypoint1.document_id, 2)
self.assertEqual(links[1].document_id, self.route.document_id)
self.check_cache_version(self.route.document_id, 2)
def test_put_wrong_document_id(self):
body = {
'document': {
'document_id': '9999999',
'version': self.map1.version,
'editor': 'IGN',
'scale': '25000',
'code': '3432OT',
'locales': [
{'lang': 'en', 'title': 'Lac d\'Annecy',
'version': self.locale_en.version}
]
}
}
self.put_wrong_document_id(body, user='moderator')
def test_put_wrong_document_version(self):
body = {
'document': {
'document_id': self.map1.document_id,
'version': -9999,
'editor': 'IGN',
'scale': '25000',
'code': '3432OT',
'locales': [
{'lang': 'en', 'title': 'Lac d\'Annecy',
'version': self.locale_en.version}
]
}
}
self.put_wrong_version(body, self.map1.document_id, user='moderator')
def test_put_wrong_locale_version(self):
body = {
'document': {
'document_id': self.map1.document_id,
'version': self.map1.version,
'editor': 'IGN',
'scale': '25000',
'code': '3432OT',
'locales': [
{'lang': 'en', 'title': 'Lac d\'Annecy',
'version': -9999}
]
}
}
self.put_wrong_version(body, self.map1.document_id, user='moderator')
def test_put_wrong_ids(self):
body = {
'document': {
'document_id': self.map1.document_id,
'version': self.map1.version,
'editor': 'IGN',
'scale': '25000',
'code': '3432OT',
'locales': [
{'lang': 'en', 'title': 'Lac d\'Annecy',
'version': self.locale_en.version}
]
}
}
self.put_wrong_ids(body, self.map1.document_id, user='moderator')
def test_put_no_document(self):
self.put_put_no_document(self.map1.document_id, user='moderator')
def test_put_success_all(self):
body = {
'message': 'Update',
'document': {
'document_id': self.map1.document_id,
'version': self.map1.version,
'quality': quality_types[1],
'editor': 'IGN',
'scale': '25000',
'code': '3433OT',
'geometry': {
'version': self.map1.geometry.version,
'geom_detail': '{"type":"Polygon","coordinates":[[[668519.249382151,5728802.39591739],[668518.249382151,5745465.66808356],[689156.247019149,5745465.66808356],[689156.247019149,5728802.39591739],[668519.249382151,5728802.39591739]]]}' # noqa
},
'locales': [
{'lang': 'en', 'title': 'New title',
'version': self.locale_en.version}
]
}
}
(body, map1) = self.put_success_all(body, self.map1, user='moderator')
self.assertEqual(map1.code, '3433OT')
locale_en = map1.get_locale('en')
self.assertEqual(locale_en.title, 'New title')
# version with lang 'en'
versions = map1.versions
version_en = self.get_latest_version('en', versions)
archive_locale = version_en.document_locales_archive
self.assertEqual(archive_locale.title, 'New title')
archive_document_en = version_en.document_archive
self.assertEqual(archive_document_en.scale, '25000')
self.assertEqual(archive_document_en.code, '3433OT')
archive_geometry_en = version_en.document_geometry_archive
self.assertEqual(archive_geometry_en.version, 2)
# version with lang 'fr'
version_fr = self.get_latest_version('fr', versions)
archive_locale = version_fr.document_locales_archive
self.assertEqual(archive_locale.title, 'Lac d\'Annecy')
# check that the links to intersecting documents are updated
links = self.session.query(TopoMapAssociation). \
filter(
TopoMapAssociation.topo_map_id == self.map1.document_id). \
all()
self.assertEqual(len(links), 2)
self.assertEqual(links[0].document_id, self.waypoint1.document_id)
self.check_cache_version(self.waypoint1.document_id, 2)
self.assertEqual(links[1].document_id, self.route.document_id)
self.check_cache_version(self.route.document_id, 2)
# waypoint 2 is no longer associated, the cache key was incremented
self.check_cache_version(self.waypoint2.document_id, 2)
def test_put_success_figures_only(self):
body = {
'message': 'Changing figures',
'document': {
'document_id': self.map1.document_id,
'version': self.map1.version,
'quality': quality_types[1],
'editor': 'IGN',
'scale': '25000',
'code': '3433OT',
'locales': [
{'lang': 'en', 'title': 'Lac d\'Annecy',
'version': self.locale_en.version}
]
}
}
(body, map1) = self.put_success_figures_only(
body, self.map1, user='moderator')
self.assertEqual(map1.code, '3433OT')
def test_put_success_lang_only(self):
body = {
'message': 'Changing lang',
'document': {
'document_id': self.map1.document_id,
'version': self.map1.version,
'quality': quality_types[1],
'editor': 'IGN',
'scale': '25000',
'code': '3431OT',
'locales': [
{'lang': 'en', 'title': 'New title',
'version': self.locale_en.version}
]
}
}
(body, map1) = self.put_success_lang_only(
body, self.map1, user='moderator')
self.assertEqual(
map1.get_locale('en').title, 'New title')
def test_put_success_new_lang(self):
"""Test updating a document by adding a new locale.
"""
body = {
'message': 'Adding lang',
'document': {
'document_id': self.map1.document_id,
'version': self.map1.version,
'quality': quality_types[1],
'editor': 'IGN',
'scale': '25000',
'code': '3431OT',
'locales': [
{'lang': 'es', 'title': 'Lac d\'Annecy'}
]
}
}
(body, map1) = self.put_success_new_lang(
body, self.map1, user='moderator')
self.assertEqual(map1.get_locale('es').title, 'Lac d\'Annecy')
def _assert_geometry(self, body):
self.assertIsNotNone(body.get('geometry'))
geometry = body.get('geometry')
self.assertIsNotNone(geometry.get('version'))
self.assertIsNotNone(geometry.get('geom_detail'))
geom = geometry.get('geom_detail')
polygon = shape(json.loads(geom))
self.assertIsInstance(polygon, Polygon)
def _add_test_data(self):
self.map1 = TopoMap(editor='IGN', scale='25000', code='3431OT')
self.locale_en = DocumentLocale(lang='en', title='Lac d\'Annecy')
self.locale_fr = DocumentLocale(lang='fr', title='Lac d\'Annecy')
self.map1.locales.append(self.locale_en)
self.map1.locales.append(self.locale_fr)
self.map1.geometry = DocumentGeometry(
geom_detail='SRID=3857;POLYGON((611774 5706934,611774 5744215,'
'642834 5744215,642834 5706934,611774 5706934))')
self.session.add(self.map1)
self.session.flush()
user_id = self.global_userids['contributor']
DocumentRest.create_new_version(self.map1, user_id)
self.map2 = TopoMap(
editor='IGN', scale='25000', code='3432OT')
self.session.add(self.map2)
self.map3 = TopoMap(
editor='IGN', scale='25000', code='3433OT')
self.session.add(self.map3)
self.map4 = TopoMap(
editor='IGN', scale='25000', code='3434OT')
self.map4.locales.append(DocumentLocale(
lang='en', title='Lac d\'Annecy'))
self.map4.locales.append(DocumentLocale(
lang='fr', title='Lac d\'Annecy'))
self.session.add(self.map4)
self.session.flush()
self.waypoint1 = Waypoint(
waypoint_type='summit',
geometry=DocumentGeometry(
geom='SRID=3857;POINT(677461.381691516 5740879.44638645)')
)
self.waypoint2 = Waypoint(
waypoint_type='summit',
geometry=DocumentGeometry(
geom='SRID=3857;POINT(693666.031687976 5741108.7574713)')
)
route_geom = 'SRID=3857;LINESTRING(668518 5728802, 668528 5728812)'
self.route = Route(
activities=['skitouring'],
geometry=DocumentGeometry(geom_detail=route_geom))
self.session.add_all([self.waypoint1, self.waypoint2, self.route])
self.session.add(TopoMapAssociation(
document=self.waypoint2, topo_map=self.map1))
self.session.flush()
| c2corg/v6_api | c2corg_api/tests/views/test_topo_map.py | Python | agpl-3.0 | 16,155 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
##
## Author: Adriano Monteiro Marques <adriano@umitproject.org>
##
## Copyright (C) 2011 S2S Network Consultoria e Tecnologia da Informacao LTDA
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU Affero General Public License as
## published by the Free Software Foundation, either version 3 of the
## License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Affero General Public License for more details.
##
## You should have received a copy of the GNU Affero General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
from django.conf.urls.defaults import *
import django_cron
urlpatterns = patterns('',
# url('^check_passive_hosts/?$', 'status_cron.views.check_passive_hosts', name='check_passive_hosts'),
url('^check_passive_url_task/(?P<module_key>[0-9a-zA-Z\-\_]+)/?$', 'status_cron.views.check_passive_url_task', name='check_passive_url_task'),
url('^check_passive_port_task/(?P<module_key>[0-9a-zA-Z\-\_]+)/?$', 'status_cron.views.check_passive_port_task', name='check_passive_port_task'),
url('^aggregate_daily_status/?$', 'status_cron.views.aggregate_daily_status', name='aggregate_daily_status'),
# url('^check_notifications/?$', 'status_cron.views.check_notifications', name='check_notifications'),
url('^send_notification_task/(?P<notification_id>[0-9a-zA-Z\-\_]+)/?$', 'status_cron.views.send_notification_task', name='send_notification_task'),
) | umitproject/site-status | status_cron/urls.py | Python | agpl-3.0 | 1,731 |
# -*- coding: utf-8 -*-
# Copyright 2019 ACSONE SA/NV
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import fields, models
class StockPicking(models.Model):
_inherit = 'stock.picking'
default_tracking_url = fields.Char(
related='carrier_id.default_tracking_url',
readonly=True,
)
| OCA/carrier-delivery | delivery_carrier_default_tracking_url/models/stock_picking.py | Python | agpl-3.0 | 340 |
import web
from inginious.frontend.plugins.utils.admin_api import AdminApi
from inginious.frontend.plugins.utils import get_mandatory_parameter
class FilterTasksApi(AdminApi):
def API_POST(self):
parameters = web.input()
task_query = get_mandatory_parameter(parameters, "task_query")
limit = int(get_mandatory_parameter(parameters, "limit"))
page = int(get_mandatory_parameter(parameters, "page"))
course_ids = set(bank["courseid"]
for bank in self.database.problem_banks.find())
for course_id, course in self.course_factory.get_all_courses().items():
if self.user_manager.has_admin_rights_on_course(course):
course_ids.add(course_id)
tasks = list(self.database.tasks_cache.aggregate([
{
"$match":
{
"$text": {
"$search": task_query,
"$diacriticSensitive": False,
"$caseSensitive": False
}
}
},
{
"$match":
{
"course_id": {"$in": list(course_ids)}
}
},
{
"$project": {
"course_id": 1,
"task_id": 1,
"task_name": 1,
"task_author": 1,
"task_context": 1,
"tags": 1,
"course_name": 1,
"_id": 0,
"score": {"$meta": "textScore"}
}
},
{
"$sort": {"score": -1}
}
]))
left = limit * (page - 1)
right = left + limit
total_pages = len(tasks) // limit
if len(tasks) % limit != 0 or total_pages == 0:
total_pages += 1
if right >= len(tasks):
tasks = tasks[left:]
else:
tasks = tasks[left:right]
response = {'total_pages': total_pages, "tasks": tasks}
return 200, response
| JuezUN/INGInious | inginious/frontend/plugins/problem_bank/pages/api/filter_tasks_api.py | Python | agpl-3.0 | 2,183 |
##############################################################################
# For copyright and license notices, see __manifest__.py file in module root
# directory
##############################################################################
from . import website
from . import res_config_settings
| ingadhoc/website | website_sale_order_type_ux/models/__init__.py | Python | agpl-3.0 | 303 |
#! -*- coding: utf-8 -*-
from collections import OrderedDict
from sqlalchemy import Column, Date, ForeignKey, Index, String
from sqlalchemy import Integer
from sqlalchemy.orm import relationship
from radar.database import db
from radar.models.common import MetaModelMixin, patient_id_column, patient_relationship, uuid_pk_column
from radar.models.logs import log_changes
COUNTRIES = OrderedDict([
('AF', 'Afghanistan'),
('AX', 'Åland Islands'),
('AL', 'Albania'),
('DZ', 'Algeria'),
('AS', 'American Samoa'),
('AD', 'Andorra'),
('AO', 'Angola'),
('AI', 'Anguilla'),
('AQ', 'Antarctica'),
('AG', 'Antigua and Barbuda'),
('AR', 'Argentina'),
('AM', 'Armenia'),
('AW', 'Aruba'),
('AU', 'Australia'),
('AT', 'Austria'),
('AZ', 'Azerbaijan'),
('BS', 'Bahamas'),
('BH', 'Bahrain'),
('BD', 'Bangladesh'),
('BB', 'Barbados'),
('BY', 'Belarus'),
('BE', 'Belgium'),
('BZ', 'Belize'),
('BJ', 'Benin'),
('BM', 'Bermuda'),
('BT', 'Bhutan'),
('BO', 'Bolivia, Plurinational State of'),
('BQ', 'Bonaire, Sint Eustatius and Saba'),
('BA', 'Bosnia and Herzegovina'),
('BW', 'Botswana'),
('BV', 'Bouvet Island'),
('BR', 'Brazil'),
('IO', 'British Indian Ocean Territory'),
('BN', 'Brunei Darussalam'),
('BG', 'Bulgaria'),
('BF', 'Burkina Faso'),
('BI', 'Burundi'),
('KH', 'Cambodia'),
('CM', 'Cameroon'),
('CA', 'Canada'),
('CV', 'Cape Verde'),
('KY', 'Cayman Islands'),
('CF', 'Central African Republic'),
('TD', 'Chad'),
('CL', 'Chile'),
('CN', 'China'),
('CX', 'Christmas Island'),
('CC', 'Cocos (Keeling) Islands'),
('CO', 'Colombia'),
('KM', 'Comoros'),
('CG', 'Congo'),
('CD', 'Congo, the Democratic Republic of the'),
('CK', 'Cook Islands'),
('CR', 'Costa Rica'),
('CI', 'Côte d\'Ivoire'),
('HR', 'Croatia'),
('CU', 'Cuba'),
('CW', 'Curaçao'),
('CY', 'Cyprus'),
('CZ', 'Czech Republic'),
('DK', 'Denmark'),
('DJ', 'Djibouti'),
('DM', 'Dominica'),
('DO', 'Dominican Republic'),
('EC', 'Ecuador'),
('EG', 'Egypt'),
('SV', 'El Salvador'),
('GQ', 'Equatorial Guinea'),
('ER', 'Eritrea'),
('EE', 'Estonia'),
('ET', 'Ethiopia'),
('FK', 'Falkland Islands (Malvinas)'),
('FO', 'Faroe Islands'),
('FJ', 'Fiji'),
('FI', 'Finland'),
('FR', 'France'),
('GF', 'French Guiana'),
('PF', 'French Polynesia'),
('TF', 'French Southern Territories'),
('GA', 'Gabon'),
('GM', 'Gambia'),
('GE', 'Georgia'),
('DE', 'Germany'),
('GH', 'Ghana'),
('GI', 'Gibraltar'),
('GR', 'Greece'),
('GL', 'Greenland'),
('GD', 'Grenada'),
('GP', 'Guadeloupe'),
('GU', 'Guam'),
('GT', 'Guatemala'),
('GG', 'Guernsey'),
('GN', 'Guinea'),
('GW', 'Guinea-Bissau'),
('GY', 'Guyana'),
('HT', 'Haiti'),
('HM', 'Heard Island and McDonald Islands'),
('VA', 'Holy See (Vatican City State)'),
('HN', 'Honduras'),
('HK', 'Hong Kong'),
('HU', 'Hungary'),
('IS', 'Iceland'),
('IN', 'India'),
('ID', 'Indonesia'),
('IR', 'Iran, Islamic Republic of'),
('IQ', 'Iraq'),
('IE', 'Ireland'),
('IM', 'Isle of Man'),
('IL', 'Israel'),
('IT', 'Italy'),
('JM', 'Jamaica'),
('JP', 'Japan'),
('JE', 'Jersey'),
('JO', 'Jordan'),
('KZ', 'Kazakhstan'),
('KE', 'Kenya'),
('KI', 'Kiribati'),
('KP', 'Korea, Democratic People\'s Republic of'),
('KR', 'Korea, Republic of'),
('KW', 'Kuwait'),
('KG', 'Kyrgyzstan'),
('LA', 'Lao People\'s Democratic Republic'),
('LV', 'Latvia'),
('LB', 'Lebanon'),
('LS', 'Lesotho'),
('LR', 'Liberia'),
('LY', 'Libya'),
('LI', 'Liechtenstein'),
('LT', 'Lithuania'),
('LU', 'Luxembourg'),
('MO', 'Macao'),
('MK', 'Macedonia, the former Yugoslav Republic of'),
('MG', 'Madagascar'),
('MW', 'Malawi'),
('MY', 'Malaysia'),
('MV', 'Maldives'),
('ML', 'Mali'),
('MT', 'Malta'),
('MH', 'Marshall Islands'),
('MQ', 'Martinique'),
('MR', 'Mauritania'),
('MU', 'Mauritius'),
('YT', 'Mayotte'),
('MX', 'Mexico'),
('FM', 'Micronesia, Federated States of'),
('MD', 'Moldova, Republic of'),
('MC', 'Monaco'),
('MN', 'Mongolia'),
('ME', 'Montenegro'),
('MS', 'Montserrat'),
('MA', 'Morocco'),
('MZ', 'Mozambique'),
('MM', 'Myanmar'),
('NA', 'Namibia'),
('NR', 'Nauru'),
('NP', 'Nepal'),
('NL', 'Netherlands'),
('NC', 'New Caledonia'),
('NZ', 'New Zealand'),
('NI', 'Nicaragua'),
('NE', 'Niger'),
('NG', 'Nigeria'),
('NU', 'Niue'),
('NF', 'Norfolk Island'),
('MP', 'Northern Mariana Islands'),
('NO', 'Norway'),
('OM', 'Oman'),
('PK', 'Pakistan'),
('PW', 'Palau'),
('PS', 'Palestinian Territory, Occupied'),
('PA', 'Panama'),
('PG', 'Papua New Guinea'),
('PY', 'Paraguay'),
('PE', 'Peru'),
('PH', 'Philippines'),
('PN', 'Pitcairn'),
('PL', 'Poland'),
('PT', 'Portugal'),
('PR', 'Puerto Rico'),
('QA', 'Qatar'),
('RE', 'Réunion'),
('RO', 'Romania'),
('RU', 'Russian Federation'),
('RW', 'Rwanda'),
('BL', 'Saint Barthélemy'),
('SH', 'Saint Helena, Ascension and Tristan da Cunha'),
('KN', 'Saint Kitts and Nevis'),
('LC', 'Saint Lucia'),
('MF', 'Saint Martin (French part)'),
('PM', 'Saint Pierre and Miquelon'),
('VC', 'Saint Vincent and the Grenadines'),
('WS', 'Samoa'),
('SM', 'San Marino'),
('ST', 'Sao Tome and Principe'),
('SA', 'Saudi Arabia'),
('SN', 'Senegal'),
('RS', 'Serbia'),
('SC', 'Seychelles'),
('SL', 'Sierra Leone'),
('SG', 'Singapore'),
('SX', 'Sint Maarten (Dutch part)'),
('SK', 'Slovakia'),
('SI', 'Slovenia'),
('SB', 'Solomon Islands'),
('SO', 'Somalia'),
('ZA', 'South Africa'),
('GS', 'South Georgia and the South Sandwich Islands'),
('SS', 'South Sudan'),
('ES', 'Spain'),
('LK', 'Sri Lanka'),
('SD', 'Sudan'),
('SR', 'Suriname'),
('SJ', 'Svalbard and Jan Mayen'),
('SZ', 'Swaziland'),
('SE', 'Sweden'),
('CH', 'Switzerland'),
('SY', 'Syrian Arab Republic'),
('TW', 'Taiwan, Province of China'),
('TJ', 'Tajikistan'),
('TZ', 'Tanzania, United Republic of'),
('TH', 'Thailand'),
('TL', 'Timor-Leste'),
('TG', 'Togo'),
('TK', 'Tokelau'),
('TO', 'Tonga'),
('TT', 'Trinidad and Tobago'),
('TN', 'Tunisia'),
('TR', 'Turkey'),
('TM', 'Turkmenistan'),
('TC', 'Turks and Caicos Islands'),
('TV', 'Tuvalu'),
('UG', 'Uganda'),
('UA', 'Ukraine'),
('AE', 'United Arab Emirates'),
('GB', 'United Kingdom'),
('US', 'United States'),
('UM', 'United States Minor Outlying Islands'),
('UY', 'Uruguay'),
('UZ', 'Uzbekistan'),
('VU', 'Vanuatu'),
('VE', 'Venezuela, Bolivarian Republic of'),
('VN', 'Viet Nam'),
('VG', 'Virgin Islands, British'),
('VI', 'Virgin Islands, U.S.'),
('WF', 'Wallis and Futuna'),
('EH', 'Western Sahara'),
('YE', 'Yemen'),
('ZM', 'Zambia'),
('ZW', 'Zimbabwe'),
])
@log_changes
class PatientAddress(db.Model, MetaModelMixin):
__tablename__ = 'patient_addresses'
id = uuid_pk_column()
patient_id = patient_id_column()
patient = patient_relationship('patient_addresses')
source_group_id = Column(Integer, ForeignKey('groups.id'), nullable=False)
source_group = relationship('Group')
source_type = Column(String, nullable=False)
from_date = Column(Date)
to_date = Column(Date)
address1 = Column(String)
address2 = Column(String)
address3 = Column(String)
address4 = Column(String)
postcode = Column(String)
country = Column(String)
@property
def full_address(self):
parts = []
parts.extend([
self.address1,
self.address2,
self.address3,
self.address4,
self.postcode,
self.country,
])
return '\n'.join(x for x in parts if x)
@property
def anonymised_postcode(self):
postcode = self.postcode
if postcode is None:
anonymised_postcode = None
else:
# Postcode outbound code
anonymised_postcode = postcode.split(' ')[0][:4]
return anonymised_postcode
Index('patient_addresses_patient_idx', PatientAddress.patient_id)
| renalreg/radar | radar/models/patient_addresses.py | Python | agpl-3.0 | 8,540 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import json
from flask import Flask
app = Flask(__name__)
app.config.from_object(__name__)
# Loads the restaurants
RESTAURANTS = {}
with open('./refuge/var/restaurants.json', 'r') as f:
RESTAURANTS = json.load(f)
AREAS = {
"Kirchberg": [49.6383, 6.1349, 49.6228, 6.1785],
"Gare": [49.6088, 6.1206, 49.5953, 6.1472],
"Esch-sur-Alzette": [49.5532, 5.8868, 49.4896, 5.9991],
"Sierck-Les-Bains": [49.4770, 6.3202, 49.4154, 6.4342],
"Sarreguemines": [49.3323, 6.5286, 48.9274, 7.5157]
}
jc_decaux_api_key = os.environ.get('JCDecauxAPIKEY', None)
openrouteservice_api_key = os.environ.get('openrouteserviceAPIKEY', None)
import openrouteservice
from openrouteservice.directions import directions
from openrouteservice import convert
CLIENT_OPEN_ROUTE_SERVICE = openrouteservice.Client(key=openrouteservice_api_key)
def process_itinerary(coords):
print(coords)
routes = directions(CLIENT_OPEN_ROUTE_SERVICE, coords)
distance = routes['routes'][0]['summary']['distance']
geometry = routes['routes'][0]['geometry']
path = convert.decode_polyline(geometry)['coordinates']
return distance, path
import refuge.views
| cedricbonhomme/c-dric-on-mange-o-midi | refuge/__init__.py | Python | agpl-3.0 | 1,267 |
import pytest
import pytz
from datetime import datetime as dt
from arctic.date import datetime_to_ms, ms_to_datetime, mktz, to_pandas_closed_closed, DateRange, OPEN_OPEN, CLOSED_CLOSED
from arctic.date._mktz import DEFAULT_TIME_ZONE_NAME
from arctic.date._util import to_dt
@pytest.mark.parametrize('pdt', [
dt(2007, 3, 25, 1, tzinfo=mktz('Europe/London')),
dt(2004, 10, 31, 23, 3, tzinfo=mktz('Europe/London')),
dt(1990, 4, 5, 0, 0, tzinfo=mktz('Europe/London')),
dt(2007, 3, 25, 1, tzinfo=mktz('EST')),
dt(2004, 10, 31, 23, 3, tzinfo=mktz('EST')),
dt(1990, 4, 5, 0, 0, tzinfo=mktz('EST')),
]
)
def test_datetime_to_ms_and_back(pdt):
i = datetime_to_ms(pdt)
pdt = pdt.astimezone(mktz())
pdt2 = ms_to_datetime(i)
assert pdt == pdt2
def test_datetime_to_ms_and_back_microseconds():
pdt = dt(2012, 8, 1, 12, 34, 56, 999999, tzinfo=mktz(DEFAULT_TIME_ZONE_NAME))
i = datetime_to_ms(pdt)
pdt2 = ms_to_datetime(i)
assert pdt != pdt2
assert pdt.year == pdt2.year
assert pdt.month == pdt2.month
assert pdt.day == pdt2.day
assert pdt.hour == pdt2.hour
assert pdt.minute == pdt2.minute
assert pdt.second == pdt2.second
# Microsecond precision loss inevitable.
assert pdt.microsecond // 1000 == pdt2.microsecond // 1000
def test_daterange_closedclosed_None():
assert to_pandas_closed_closed(None) is None
def test_daterange_closedclosed():
date_range = DateRange(dt(2013, 1, 1, tzinfo=mktz('Europe/London')),
dt(2014, 2, 1, tzinfo=mktz('Europe/London')), OPEN_OPEN)
expected = DateRange(dt(2013, 1, 1, 0, 0, 0, 1000, tzinfo=mktz('Europe/London')),
dt(2014, 1, 31, 23, 59, 59, 999000, tzinfo=mktz('Europe/London')),
CLOSED_CLOSED)
act = to_pandas_closed_closed(date_range)
assert act == expected
def test_daterange_closedclosed_no_tz():
date_range = DateRange(dt(2013, 1, 1),
dt(2014, 2, 1), OPEN_OPEN)
expected = DateRange(dt(2013, 1, 1, 0, 0, 0, 1000, tzinfo=mktz()),
dt(2014, 1, 31, 23, 59, 59, 999000, tzinfo=mktz()),
CLOSED_CLOSED)
act = to_pandas_closed_closed(date_range)
assert act == expected
def test_to_dt_0():
assert to_dt(0) == dt(1970, 1, 1, tzinfo=mktz('UTC'))
def test_to_dt_0_default():
assert to_dt(0, mktz('UTC')) == dt(1970, 1, 1, tzinfo=mktz('UTC'))
def test_to_dt_dt_no_tz():
with pytest.raises(ValueError):
assert to_dt(dt(1970, 1, 1)) == dt(1970, 1, 1, tzinfo=mktz())
def test_to_dt_dt_no_tz_default():
assert to_dt(dt(1970, 1, 1), mktz('UTC')) == dt(1970, 1, 1, tzinfo=mktz('UTC'))
def test_to_dt_dt_tz():
assert to_dt(dt(1970, 1, 1, tzinfo=mktz('UTC'))) == dt(1970, 1, 1, tzinfo=mktz('UTC'))
def test_to_dt_dt_tz_default():
assert to_dt(dt(1970, 1, 1, tzinfo=mktz('UTC')), mktz('Europe/London')) == dt(1970, 1, 1, tzinfo=mktz('UTC'))
def test_daterange_raises():
with pytest.raises(ValueError):
assert(DateRange(dt(2013, 1, 1), dt(2000, 1, 1)))
def test_daterange_eq():
dr = DateRange(dt(2013, 1, 1))
assert((dr == None) == False)
assert(dr == dr)
def test_daterange_lt():
dr = DateRange(dt(2013, 1, 1))
dr2 = DateRange(dt(2001, 1, 1))
assert(dr2 < dr)
dr.start = None
assert((dr2 < dr) == False)
| r0k3/arctic | tests/unit/date/test_util.py | Python | lgpl-2.1 | 3,570 |
# da vs turns module
import numpy as np
from scipy import optimize
import matplotlib.pyplot as pl
import glob, sys, os, time
from deskdb import SixDeskDB,tune_dir,mk_dir
import matplotlib
# ------------- basic functions -----------
def get_divisors(n):
"""finds the divisors of an integer number"""
large_divisors = []
for i in xrange(1, int(np.sqrt(n) + 1)):
if n % i is 0:
yield i
if i is not n / i:
large_divisors.insert(0, n / i)
for divisor in large_divisors:
yield divisor
def linear_fit(datx,daty,daterr):
'''Linear model fit with f(x)=p0+p1*x
(datx,daty): data, daterr: measurement error
return values (res,p0,p0err,p1,p1err):
- res: sum of residuals^2 normalized with the measurment error
- p0,p1: fit paramaeters
- p0err, p1err: error of fit parameters'''
fitfunc = lambda p,x: p[0]+p[1]*x#p[0]=Dinf, p[1]=b0
errfunc = lambda p,x,y,err: (y-fitfunc(p,x))/err
pinit = [0.1, 0.1]
#minimize
outfit=optimize.leastsq(errfunc, pinit,args=(datx,daty,daterr),full_output=1)
(p0,p1)=outfit[0]#(p[0],p[1])
var =outfit[1]#variance matrix
p0err =np.sqrt(var[0,0])#err p[0]
p1err =np.sqrt(var[1,1])#err p[1]
# res=sum((daty-fitfunc((p0,p1),datx))**2)/len(datx-2) #not weighted with error
res=sum((errfunc((p0,p1),datx,daty,daterr))**2)/len(datx)#weighted with error
return (res,p0,p0err,p1,p1err)
# ----------- functions necessary for the analysis -----------
#@profile
def get_min_turn_ang(s,t,a,it):
"""returns array with (angle,minimum sigma,sturn) of particles with lost turn number < it.
check if there is a particle with angle ang with lost turn number <it
if true: lost turn number and amplitude of the last stable particle is saved = particle "before" the particle with the smallest amplitude with nturns<it
if false: the smallest lost turn number and the largest amplitude is saved
"""
# s,t,a are ordered by angle,amplitude
angles,sigmas=t.shape# angles = number of angles, sigmas = number of amplitudes
ftype=[('angle',float),('sigma',float),('sturn',float)]
mta=np.zeros(angles,dtype=ftype)
# enumerate(a[:,0]) returns (0, a[0]), (1, a[1]), (2, a[2]), ... = iang, ang where iang = index of the array (0,1,2,...) for ang = angle (e.g. [1.5, ... , 1.5] , [3.0, ... ,3.0])
for iang,ang in enumerate(a[:,0]):
tang = t[iang]
sang = s[iang]
iturn = tang<it # select lost turn number < it
if(any(tang[iturn])):
sangit=sang[iturn].min()
argminit=sang.searchsorted(sangit) # get index of smallest amplitude with sturn<it - amplitudes are ordered ascending
mta[iang]=(ang,sang[argminit-1],tang[argminit-1])#last stable amplitude -> index argminit-1
else:
mta[iang]=(ang,sang.max(),tang.min())
return mta
def select_ang_surv(data,seed,nang):
"""returns data reduced to ((angmax+1)/nang)-1 angles -> nang being the divisor of angmax"""
angmax=len(data['angle'][:,0])#number of angles
print nang
if((nang not in list(get_divisors(angmax+1))) or ((angmax+1)/nang-1<3)):
print('%s is not a divisor of %s or two large (((angmax+1)/nang)-1<3)')%(nang,angmax+1)
sys.exit(0)
#define variables for only selection of angles
s,a,t=data['sigma'][nang::nang+1],data['angle'][nang::nang+1],data['sturn'][nang::nang+1]
ftype=[('angle',float),('sigma',float),('sturn',float)]
dataang=np.ndarray(np.shape(a),dtype=ftype)
dataang['sigma'],dataang['angle'],dataang['sturn']=s,a,t
return dataang
#@profile
def mk_da_vst(data,seed,tune,turnsl,turnstep):
"""returns 'seed','tunex','tuney','dawtrap','dastrap','dawsimp','dassimp',
'dawtraperr','dastraperr','dastraperrep','dastraperrepang',
'dastraperrepamp','dawsimperr','dassimperr','nturn','tlossmin',
'mtime'
the da is in steps of turnstep
das: integral over radius
das = 2/pi*int_0^(2pi)[r(theta)]dtheta=<r(theta)>
= 2/pi*dtheta*sum(a_i*r(theta_i))
daw: integral over phase space
daw = (int_0^(2pi)[(r(theta))^4*sin(2*theta)]dtheta)^1/4
= (dtheta*sum(a_i*r(theta_i)^4*sin(2*theta_i)))^1/4
trapezoidal rule (trap): a_i=(3/2,1, ... ,1,3/2)
simpson rule (simp): a_i=(55/24.,-1/6.,11/8.,1, ... 1,11/8.,-1/6.,55/24.)
numerical recipes open formulas 4.1.15 and 4.1.18
"""
mtime=time.time()
(tunex,tuney)=tune
s,a,t=data['sigma'],data['angle'],data['sturn']
tmax=np.max(t[s>0])#maximum number of turns
#set the 0 in t to tmax*100 in order to check if turnnumber<it (any(tang[tang<it])<it in get_min_turn_ang)
t[s==0]=tmax*100
angmax=len(a[:,0])#number of angles
angstep=np.pi/(2*(angmax+1))#step in angle in rad
ampstep=np.abs((s[s>0][1])-(s[s>0][0]))
ftype=[('seed',int),('tunex',float),('tuney',float),('turn_max',int),('dawtrap',float),('dastrap',float),('dawsimp',float),('dassimp',float),('dawtraperr',float),('dastraperr',float),('dastraperrep',float),('dastraperrepang',float),('dastraperrepamp',float),('dawsimperr',float),('dassimperr',float),('nturn',float),('tlossmin',float),('mtime',float)]
l_turnstep=len(np.arange(turnstep,tmax,turnstep))
daout=np.ndarray(l_turnstep,dtype=ftype)
for nm in daout.dtype.names:
daout[nm]=np.zeros(l_turnstep)
dacount=0
currentdawtrap=0
currenttlossmin=0
#define integration coefficients at beginning and end which are unequal to 1
ajtrap_s=np.array([3/2.])#Simpson rule
ajtrap_e=np.array([3/2.])
ajsimp_s=np.array([55/24.,-1/6.,11/8.])#Simpson rule
ajsimp_e=np.array([11/8.,-1/6.,55/24.])
warnsimp=True
for it in np.arange(turnstep,tmax,turnstep):
mta=get_min_turn_ang(s,t,a,it)
mta_angle=mta['angle']*np.pi/180#convert to rad
l_mta_angle=len(mta_angle)
mta_sigma=mta['sigma']
if(l_mta_angle>2):
# define coefficients for simpson rule (simp)
# ajtrap = [3/2.,1,....1,3/2.]
ajtrap=np.concatenate((ajtrap_s,np.ones(l_mta_angle-2),ajtrap_e))
else:
print('WARNING! mk_da_vst - You need at least 3 angles to calculate the da vs turns! Aborting!!!')
sys.exit(0)
if(l_mta_angle>6):
# define coefficients for simpson rule (simp)
# ajsimp = [55/24.,-1/6.,11/8.,1,....1,11/8.,-1/6.,55/24. ]
ajsimp=np.concatenate((ajsimp_s,np.ones(l_mta_angle-6),ajsimp_e))
calcsimp=True
else:
if(warnsimp):
print('WARNING! mk_da_vst - You need at least 7 angles to calculate the da vs turns with the simpson rule! da*simp* will be set to 0.')
warnsimp=False
calcsimp=False
# ---- trapezoidal rule (trap)
# integral
dawtrapint = ((ajtrap*(mta_sigma**4*np.sin(2*mta_angle))).sum())*angstep
dawtrap = (dawtrapint)**(1/4.)
dastrap = (2./np.pi)*(ajtrap*(mta_sigma)).sum()*angstep
# error
dawtraperrint = np.abs(((ajtrap*(2*(mta_sigma**3)*np.sin(2*mta_angle))).sum())*angstep*ampstep)
dawtraperr = np.abs(1/4.*dawtrapint**(-3/4.))*dawtraperrint
dastraperr = ampstep/2
dastraperrepang = ((np.abs(np.diff(mta_sigma))).sum())/(2*(angmax+1))
dastraperrepamp = ampstep/2
dastraperrep = np.sqrt(dastraperrepang**2+dastraperrepamp**2)
# ---- simpson rule (simp)
if(calcsimp):
# int
dawsimpint = (ajsimp*((mta_sigma**4)*np.sin(2*mta_angle))).sum()*angstep
dawsimp = (dawsimpint)**(1/4.)
dassimpint = (ajsimp*mta_sigma).sum()*angstep
dassimp = (2./np.pi)*dassimpint
# error
dawsimperrint = (ajsimp*(2*(mta_sigma**3)*np.sin(2*mta_angle))).sum()*angstep*ampstep
dawsimperr = np.abs(1/4.*dawsimpint**(-3/4.))*dawsimperrint
dassimperr = ampstep/2#simplified
else:
(dawsimp,dassimp,dawsimperr,dassimperr)=np.zeros(4)
tlossmin=np.min(mta['sturn'])
if(dawtrap!=currentdawtrap and it-turnstep >= 0 and tlossmin!=currenttlossmin):
daout[dacount]=(seed,tunex,tuney,turnsl,dawtrap,dastrap,dawsimp,dassimp,dawtraperr,dastraperr,dastraperrep,dastraperrepang,dastraperrepamp,dawsimperr,dassimperr,it-turnstep,tlossmin,mtime)
dacount=dacount+1
currentdawtrap =dawtrap
currenttlossmin=tlossmin
return daout[daout['dawtrap']>0]#delete 0 from errors
# ----------- functions to calculat the fit -----------
def get_fit_data(data,fitdat,fitdaterr,fitndrop,fitkap,b1):
'''linearize data for da vs turns fit according to model:
D(N) = Dinf+b0/(log(N^(exp(-b1))))^kappa'''
datx=1/(np.log(data['tlossmin'][fitndrop::]**np.exp(-b1))**fitkap)
# print (fitdat,fitdaterr)
daty=data[fitdat][fitndrop::]
if fitdaterr=='none':#case of no errors
daterr=np.ones(len(datx))
else:
daterr=data[fitdaterr][fitndrop::]
return datx,daty,daterr
def get_b1mean(db,tune,fitdat,fitdaterr,fitndrop,fitskap,fitekap,fitdkap):
'''returns (mean(b1),errmean(b1),std(b1)) over the seeds
with b1 being the fit parameter in:
D(N) = Dinf+b0/(log(N^(exp(-b1))))^kappa
and a linear relation is assumed between:
log(|b|)=log(|b0|)+b1*kappa <=> b=b0*exp(b1*kappa)
with b being the fit paramter in:
D(N) = Dinf+b/(log(N))^kappa
fitndrop=do not include first fitndrop data points
fitkap=kappa'''
if(not db.check_seeds()):
print('!!! Seeds are missing in database !!!')
ftype=[('seed',int),('res',float),('logb0',float),('logb0err',float),('b1',float),('b1err',float)]
lklog=np.zeros(len(db.get_db_seeds()),dtype=ftype)
ftype=[('kappa',float),('res',float),('dinf',float),('dinferr',float),('b',float),('berr',float)]
lkap=np.zeros(len(np.arange(fitskap,fitekap+fitdkap,fitdkap))-1,dtype=ftype)
ccs=0
for seed in db.get_db_seeds():
data=db.get_da_vst(seed,tune)
#start: scan over kappa
cck=0
for kap in np.arange(fitskap,fitekap+fitdkap,fitdkap):
if(abs(kap)>1.e-6):#for kappa=0: D(N)=Dinf+b/(log(N)^kappa)=D(N)=Dinf+b -> fit does not make sense
datx,daty,daterr=get_fit_data(data,fitdat,fitdaterr,fitndrop,kap,0)#fit D(N)=Dinf+b/(log(N)^kappa
lkap[cck]=(kap,)+linear_fit(datx,daty,daterr)
cck+=1
lklog[ccs]=(seed,)+linear_fit(lkap['kappa'],np.log(np.abs(lkap['b'])),1)#linear fit log(|b|)=log(|b0|)+b1*kappa for each seed
ccs+=1
return (np.mean(lklog['b1']),np.sqrt(np.mean(lklog['b1err']**2)),np.std(lklog['b1']))#error of mean value = sqrt(sum_i((1/n)*sigma_i**2))
def mk_da_vst_fit(db,tune,fitdat,fitdaterr,fitndrop,fitskap,fitekap,fitdkap):
'''1) a) fit D(N)=Dinf+b/(log(N))^kappa for all seeds and
scan range (skap,ekap,dkap)
b) assume linear dependence of b on kappa:
log(|b|)=log(|b0|)+b1*kappa
-> b1 for all seeds
c) calculate avg(b1) over all seeds
2) a) fit D(N)=Dinf+b0/(log(N)^(exp(-b1)))^kappa
for fixed b1=b1mean (obtained in 1))
and scan range (skap,ekap,dkap)
b) use (b0,kappa) with minimum residual'''
turnsl=db.env_var['turnsl']
mtime=time.time()
(tunex,tuney)=tune
print('calculating b1mean ...')
(b1mean,b1meanerr,b1std)=get_b1mean(db,tune,fitdat,fitdaterr,fitndrop,fitskap,fitekap,fitdkap)
print('average over %s seeds: b1mean=%s, b1meanerr=%s, b1std=%s'%(round(len(db.get_db_seeds())),round(b1mean,3),round(b1meanerr,3),round(b1std,3)))
print('start scan over kappa for fixed b1=%s to find kappa with minimum residual ...'%b1mean)
ftype=[('kappa',float),('dkappa',float),('res',float),('dinf',float),('dinferr',float),('b0',float),('b0err',float)]
lkap=np.zeros(len(np.arange(fitskap,fitekap+fitdkap,fitdkap))-1,dtype=ftype)#-1 as kappa=0 is not used
ftype=[('seed',float),('tunex',float),('tuney',float),('turn_max',int),('fitdat',np.str_, 30),('fitdaterr',np.str_, 30),('fitndrop',float),('kappa',float),('dkappa',float),('res',float),('dinf',float),('dinferr',float),('b0',float),('b0err',float),('b1mean',float),('b1meanerr',float),('b1std',float),('mtime',float)]
minkap=np.zeros(len(db.get_db_seeds()),dtype=ftype)
ccs=0
for seed in db.get_db_seeds():
data=db.get_da_vst(seed,tune)
#start: scan over kappa
cck=0
for kap in np.arange(fitskap,fitekap+fitdkap,fitdkap):
if(abs(kap)>1.e-6):#for kappa=0: D(N)=Dinf+b/(log(N)^kappa)=D(N)=Dinf+b -> fit does not make sense
datx,daty,daterr=get_fit_data(data,fitdat,fitdaterr,fitndrop,kap,b1mean)
lkap[cck]=(kap,fitdkap,)+linear_fit(datx,daty,daterr)
cck+=1
iminkap=np.argmin(lkap['res'])
minkap[ccs]=(seed,tunex,tuney,turnsl,fitdat,fitdaterr,fitndrop,)+tuple(lkap[iminkap])+(b1mean,b1meanerr,b1std,mtime,)
ccs+=1
print('... scan over kappa is finished!')
return minkap
# ----------- functions to reload and create DA.out files for previous scripts -----------
def save_daout_old(data,filename):
daoutold=data[['dawtrap','dastrap','dastraperrep','dastraperrepang','dastraperrepamp','nturn','tlossmin']]
np.savetxt(filename,daoutold,fmt='%.6f %.6f %.6f %.6f %.6f %d %d')
def reload_daout_old(filename):
ftype=[('dawtrap',float),('dastrap',float),('dastraperrep',float),('dastraperrepang',float),('dastraperrepamp',float),('nturn',float),('tlossmin',float)]
return np.loadtxt(filename,dtype=ftype,delimiter=' ')
def save_daout(data,filename):
daout=data[['seed','tunex','tuney','turn_max','dawtrap','dastrap','dawsimp','dassimp','dawtraperr','dastraperr','dastraperrep','dastraperrepang','dastraperrepamp','dawsimperr','dassimperr','nturn','tlossmin']]
np.savetxt(filename,daout,fmt='%d %.6f %.6f %d %.6f %.6f %.6f %.6f %.6f %.6f %.6f %.6f %.6f %.6f %.6f %d %d')
def save_davst_fit(data,filename):
fitdata=data[['seed','tunex','tuney','turn_max','fitdat','fitdaterr','fitndrop','kappa','dkappa','res','dinf','dinferr','b0','b0err','b1mean','b1meanerr','b1std']]
np.savetxt(filename,fitdata,fmt='%d %.5f %.5f %d %s %s %d %.5f %.5f %.5f %.5f %.5f %.5f %.5f %.5f %.5f %.5f')
def reload_daout(filename):
ftype=[('seed',int),('tunex',float),('tuney',float),('turn_max',int),('dawtrap',float),('dastrap',float),('dawsimp',float),('dassimp',float),('dawtraperr',float),('dastraperr',float),('dastraperrep',float),('dastraperrepang',float),('dastraperrepamp',float),('dawsimperr',float),('dassimperr',float),('nturn',float),('tlossmin',float),('mtime',float)]
return np.loadtxt(filename,dtype=ftype,delimiter=' ')
def save_dasurv(data,filename):
np.savetxt(filename,np.reshape(data,-1),fmt='%.8f %.8f %d')
def reload_dasurv(path):
ftype=[('angle', '<f8'), ('sigma', '<f8'), ('sturn', '<f8')]
data=np.loadtxt(glob.glob(path+'/dasurv.out*')[0],dtype=ftype,delimiter=' ')
angles=len(set(data['angle']))
return data.reshape(angles,-1)
def plot_surv_2d_stab(db,lbl,mksize,cl,seed,tune,ampmax):
'''survival plot: stable area of two studies'''
data=db.get_surv(seed,tune)
s,a,t=data['sigma'],data['angle'],data['sturn']
s,a,t=s[s>0],a[s>0],t[s>0]#delete 0 values
tmax=np.max(t)
sxstab=s[t==tmax]*np.cos(a[t==tmax]*np.pi/180)
systab=s[t==tmax]*np.sin(a[t==tmax]*np.pi/180)
pl.scatter(sxstab,systab,mksize,marker='o',color=cl,edgecolor='none',label=lbl)
pl.title('seed '+str(seed),fontsize=12)
pl.xlim([0,ampmax])
pl.ylim([0,ampmax])
pl.xlabel(r'Horizontal amplitude [$\sigma$]',labelpad=10,fontsize=12)
pl.ylabel(r'Vertical amplitude [$\sigma$]',labelpad=10,fontsize=12)
def plot_surv_2d_comp(db,dbcomp,lbl,complbl,seed,tune,ampmax):
'''survival plot: stable area of two studies'''
data=db.get_surv(seed,tune)
datacomp=dbcomp.get_surv(seed,tune)
pl.close('all')
pl.figure(figsize=(6,6))
plot_surv_2d_stab(db,lbl,10,'b',seed,tune,ampmax)
plot_surv_2d_stab(dbcomp,complbl,2,'r',seed,tune,ampmax)
pl.legend(loc='best')
def plot_comp_da_vst(db,dbcomp,ldat,ldaterr,lblname,complblname,seed,tune,ampmin,ampmax,tmax,slog,sfit,fitndrop):
"""plot dynamic aperture vs number of turns,
blue/green=simple average, red/orange=weighted average"""
pl.close('all')
pl.figure(figsize=(6,6))
for dbbb in [db,dbcomp]:
data=dbbb.get_da_vst(seed,tune)
if(dbbb.LHCDescrip==db.LHCDescrip):
lbl = lblname
fmtpl = 'bo'
fmtfit= 'b-'
if(dbbb.LHCDescrip==dbcomp.LHCDescrip):
lbl = complblname
fmtpl = 'ro'
fmtfit = 'r-'
# pl.errorbar(data[ldat[0]],data['tlossmin'],xerr=data[ldaterr[0]],fmt=fmtpl,markersize=2,label='%s %s'%(ldat[0],lbl))
pl.errorbar(data[ldat[0]],data['tlossmin'],xerr=data[ldaterr[0]],fmt=fmtpl,markersize=2,label='%s'%(lbl))
if(sfit):
fitdata=dbbb.get_da_vst_fit(seed,tune)
fitdata=fitdata[fitdata['fitdat']==ldat[0]]
fitdata=fitdata[fitdata['fitdaterr']==ldaterr[0]]
fitdata=fitdata[np.abs(fitdata['fitndrop']-float(fitndrop))<1.e-6]
if(len(fitdata)==1):
pl.plot(fitdata['dinf']+fitdata['b0']/(np.log(data['tlossmin']**np.exp(-fitdata['b1mean']))**fitdata['kappa']),data['tlossmin'],fmtfit)
else:
print('Warning: no fit data available or data ambigious!')
pl.title('seed '+str(seed),fontsize=16)
pl.xlim([ampmin,ampmax])
pl.xlabel(r'Dynamic aperture [$\sigma$]',labelpad=10,fontsize=16)
pl.ylabel(r'Number of turns',labelpad=15,fontsize=16)
plleg=pl.gca().legend(loc='best',fontsize=16)
for label in plleg.get_texts():
label.set_fontsize(12)
if(slog):
pl.ylim([5.e3,tmax])
pl.yscale('log')
else:
pl.ylim([0,tmax])
pl.gca().ticklabel_format(style='sci',axis='y',scilimits=(0,0))
def clean_dir_da_vst(db,files):
'''create directory structure and if force=true delete old files of da vs turns analysis'''
for seed in db.get_seeds():
for tune in db.get_db_tunes():
pp=db.mk_analysis_dir(seed,tune)# create directory
if(len(files)>0):#delete old plots and files
for filename in files:
ppf=os.path.join(pp,filename)
if(os.path.exists(ppf)): os.remove(ppf)
if(len(files)>0):
print('remove old {0} ... files in '+db.LHCDescrip).format(files)
# for error analysis - data is not saved in database but output files are generated
def RunDaVsTurnsAng(db,seed,tune,turnstep):
"""Da vs turns -- calculate da vs turns for divisors of angmax,
e.g. for angmax=29+1 for divisors [1, 2, 3, 5, 6] - last 2 [10,15] are omitted as the number of angles has to be larger than 3"""
# start analysis
try:
turnstep=int(float(turnstep))
except [ValueError,NameError,TypeError]:
print('Error in RunDaVsTurns: turnstep must be integer values!')
sys.exit(0)
if(seed not in db.get_db_seeds()):
print('WARNING: Seed %s is missing in database !!!'%seed)
sys.exit(0)
if(tune not in db.get_db_tunes()):
print('WARNING: tune %s is missing in database !!!'%tune)
sys.exit(0)
turnsl=db.env_var['turnsl']#get turnsl for outputfile names
seed=int(seed)
print('analyzing seed {0} and tune {1}...').format(str(seed),str(tune))
dirname=db.mk_analysis_dir(seed,tune)#directory struct already created in clean_dir_da_vst, only get dir name (string) here
print('... get survival data')
dasurvtot= db.get_surv(seed,tune)
a=dasurvtot['angle']
angmax=len(a[:,0])#number of angles
#use only divisors nang with (angmax+1)/nang-1>=3 = minimum number of angles for trapezoidal rule
divsall=np.array(list(get_divisors(angmax+1)))
divs=divsall[(angmax+1)/divsall-1>2]
print('... number of angles: %s, divisors: %s'%(angmax,str(divs)))
for nang in divs:
dirnameang='%s/%s'%(dirname,nang)
mk_dir(dirnameang)
dasurv=select_ang_surv(dasurvtot,seed,nang)
print('... calculate da vs turns')
daout=mk_da_vst(dasurv,seed,tune,turnsl,turnstep)
save_daout(daout,dirnameang)
print('... save da vs turns data in {0}/DA.out').format(dirnameang)
# in analysis - putting the pieces together
def RunDaVsTurns(db,force,outfile,outfileold,turnstep,davstfit,fitdat,fitdaterr,fitndrop,fitskap,fitekap,fitdkap,outfilefit):
'''Da vs turns -- calculate da vs turns for study dbname, if davstfit=True also fit the data'''
#---- calculate the da vs turns
try:
turnstep=int(float(turnstep))
except [ValueError,NameError,TypeError]:
print('Error in RunDaVsTurns: turnstep must be an integer values!')
sys.exit(0)
if(not db.check_seeds()):
print('!!! Seeds are missing in database !!!')
turnsl=db.env_var['turnsl']#get turnsl for outputfile names
turnse=db.env_var['turnse']
for seed in db.get_db_seeds():
seed=int(seed)
print('analyzing seed {0} ...').format(str(seed))
for tune in db.get_db_tunes():
print('analyzing tune {0} ...').format(str(tune))
dirname=db.mk_analysis_dir(seed,tune)#directory struct already created in clean_dir_da_vst, only get dir name (string) here
print('... get survival data')
dasurv= db.get_surv(seed,tune)
if dasurv is None:
print("ERROR: survival data could not be retrieved due to "+
"and error in the database or tracking data. Skip "
"this seed %s"%(seed))
continue
print('... get da vs turns data')
daout = db.get_da_vst(seed,tune)
if(len(daout)>0):#reload data, if input data has changed redo the analysis
an_mtime=daout['mtime'].min()
res_mtime=db.execute('SELECT max(mtime) FROM six_results')[0][0]
if res_mtime>an_mtime or force is True:
files=('DA.%s.out DAsurv.%s.out DA.%s.png DAsurv.%s.png DAsurv_log.%s.png DAsurv_comp.%s.png DAsurv_comp_log.%s.png'%(turnse,turnse,turnse,turnse,turnse,turnse,turnse)).split()+['DA.out','DAsurv.out','DA.png','DAsurv.png','DAsurv_log.png','DAsurv_comp.png','DAsurv_comp_log.png']
clean_dir_da_vst(db,files)# create directory structure and delete old files
print('... input data has changed or force=True - recalculate da vs turns')
daout=mk_da_vst(dasurv,seed,tune,turnsl,turnstep)
print('.... save data in database')
#check if old table name da_vsturn exists, if yes delete it
if(db.check_table('da_vsturn')):
print('... delete old table da_vsturn - table will be substituted by new table da_vst')
db.execute("DROP TABLE da_vsturn")
db.st_da_vst(daout,recreate=True)
else:#create data
print('... calculate da vs turns')
daout=mk_da_vst(dasurv,seed,tune,turnsl,turnstep)
print('.... save data in database')
db.st_da_vst(daout,recreate=False)
if(outfile):# create dasurv.out and da.out files
fnsurv='%s/DAsurv.%s.out'%(dirname,turnse)
save_dasurv(dasurv,fnsurv)
print('... save survival data in {0}').format(fnsurv)
fndaout='%s/DA.%s.out'%(dirname,turnse)
save_daout(daout,fndaout)
print('... save da vs turns data in {0}').format(fndaout)
if(outfileold):
fndaoutold='%s/DAold.%s.out'%(dirname,turnse)
save_daout_old(daout,fndaoutold)
print('... save da vs turns (old data format) data in {0}').format(fndaoutold)
#---- fit the data
if(davstfit):
if(fitdat in ['dawtrap','dastrap','dawsimp','dassimp']):
if(fitdaterr in ['none','dawtraperr','dastraperr','dastraperrep','dastraperrepang','dastraperrepamp','dawsimperr','dassimperr']):
try:
fitndrop=int(float(fitndrop))
except [ValueError,NameError,TypeError]:
print('Error in RunDaVsTurns: fitndrop must be an integer values! - Aborting!')
sys.exit(0)
try:
fitskap=float(fitskap)
fitekap=float(fitekap)
fitdkap=float(fitdkap)
except [ValueError,NameError,TypeError]:
print('Error in RunDaVsTurns: fitskap,fitekap and fitdkap must be an float values! - Aborting!')
sys.exit(0)
if((np.arange(fitskap,fitekap+fitdkap,fitdkap)).any()):
for tune in db.get_db_tunes():
print('fit da vs turns for tune {0} ...').format(str(tune))
fitdaout=mk_da_vst_fit(db,tune,fitdat,fitdaterr,fitndrop,fitskap,fitekap,fitdkap)
print('.... save fitdata in database')
db.st_da_vst_fit(fitdaout,recreate=False)
if(outfilefit):
(tunex,tuney)=tune
sixdesktunes="%g_%g"%(tunex,tuney)
fndot='%s/DAfit.%s.%s.%s.%s.%s.plot'%(db.mk_analysis_dir(),db.LHCDescrip,sixdesktunes,turnse,fitdat,fitdaterr)
save_davst_fit(fitdaout,fndot)
print('... save da vs turns fit data in {0}').format(fndot)
else:
print('Error in RunDaVsTurns: empty scan range for fitkap!')
else:
print("Error in -fitopt: <dataerr> has to be 'none','dawtraperr','dastraperr','dastraperrep','dastraperrepang','dastraperrepamp','dawsimperr' or 'dassimperr' - Aborting!")
sys.exit(0)
else:
print("Error in -fitopt: <data> has to be 'dawtrap','dastrap','dawsimp' or 'dassimp' - Aborting!")
sys.exit(0)
def PlotDaVsTurns(db,ldat,ldaterr,ampmaxsurv,ampmindavst,ampmaxdavst,tmax,plotlog,plotfit,fitndrop):
'''plot survival plots and da vs turns for list of data ldat and associated error ldaterr'''
turnsl=db.env_var['turnsl']
turnse=db.env_var['turnse']
print('Da vs turns -- create survival and da vs turns plots')
try:
ampmaxsurv =float(ampmaxsurv)
ampmindavst=float(ampmindavst)
ampmaxdavst=float(ampmaxdavst)
except [ValueError,NameError,TypeError]:
print('Error in PlotDaVsTurns: ampmaxsurv and amprangedavst must be float values!')
sys.exit(0)
#remove all files
if(plotlog):
files=('DA_log.png DAsurv.png DA_log.%s.png DAsurv.%s.png'%(turnse,turnse)).split()
else:
files=('DA.png DAsurv.png DA.%s.png DAsurv.%s.png'%(turnse,turnse)).split()
clean_dir_da_vst(db,files)# create directory structure and delete old files if force=true
if(not db.check_seeds()):
print('!!! Seeds are missing in database !!!')
for seed in db.get_db_seeds():
seed=int(seed)
for tune in db.get_db_tunes():
dirname=db.mk_analysis_dir(seed,tune)#directory struct already created in clean_dir_da_vst, only get dir name (string) here
pl.close('all')
pl.figure(figsize=(6,6))
db.plot_surv_2d(seed,tune,ampmaxsurv)#suvival plot
pl.savefig('%s/DAsurv.%s.png'%(dirname,turnse))
print('... saving plot %s/DAsurv.%s.png'%(dirname,turnse))
db.plot_da_vst(seed,tune,ldat,ldaterr,ampmindavst,ampmaxdavst,tmax,plotlog,plotfit,fitndrop)#da vs turns plot
if(plotlog==True):
pl.savefig('%s/DA_log.%s.png'%(dirname,turnse))
print('... saving plot %s/DA_log.%s.png'%(dirname,turnse))
else:
pl.savefig('%s/DA.%s.png'%(dirname,turnse))
print('... saving plot %s/DA.%s.png'%(dirname,turnse))
def PlotCompDaVsTurns(db,dbcomp,ldat,ldaterr,lblname,complblname,ampmaxsurv,ampmindavst,ampmaxdavst,tmax,plotlog,plotfit,fitndrop):
'''Comparison of two studies: survival plots (area of stable particles) and Da vs turns plots'''
matplotlib.rcParams.update({'font.size': 16})
turnsldb =db.env_var['turnsl']
turnsedb =db.env_var['turnse']
turnsldbcomp=dbcomp.env_var['turnsl']
turnsedbcomp=dbcomp.env_var['turnse']
if(not turnsldb==turnsldbcomp):
print('Warning! Maximum turn number turn_max of %s and %s differ!'%(db.LHCDescrip,dbcomp.LHCDescrip))
try:
ampmaxsurv=float(ampmaxsurv)
ampmindavst=float(ampmindavst)
ampmaxdavst=float(ampmaxdavst)
tmax=int(float(tmax))
except ValueError,NameError:
print('Error in PlotCompDaVsTurns: ampmaxsurv and amprangedavst must be float values and tmax an integer value!')
sys.exit(0)
#remove all files
if(plotlog):
files=('DA_comp_log.png DAsurv_comp.png DA_comp_log.%s.png DAsurv_comp.%s.png'%(turnsedb,turnsedb)).split()
else:
files=('DA_comp.png DAsurv_comp.png DA_comp.%s.png DAsurv_comp.%s.png'%(turnsedb,turnsedb)).split()
clean_dir_da_vst(db,files)# create directory structure and delete old files if force=true
# start analysis
if(not db.check_seeds()):
print('Seeds are missing in database!')
for seed in db.get_db_seeds():
seed=int(seed)
for tune in db.get_db_tunes():
if(seed in dbcomp.get_db_seeds() and tune in db.get_db_tunes()):
dirname=db.mk_analysis_dir(seed,tune)#directories already created with
pl.close('all')
plot_surv_2d_comp(db,dbcomp,lblname,complblname,seed,tune,ampmaxsurv)
pl.savefig('%s/DAsurv_comp.%s.png'%(dirname,turnsedb))
print('... saving plot %s/DAsurv_comp.%s.png'%(dirname,turnsedb))
plot_comp_da_vst(db,dbcomp,ldat,ldaterr,lblname,complblname,seed,tune,ampmindavst,ampmaxdavst,tmax,plotlog,plotfit,fitndrop)
if(plotlog==True):
pl.savefig('%s/DA_comp_log.%s.png'%(dirname,turnsedb),bbox_inches='tight')
print('... saving plot %s/DA_comp_log.%s.png'%(dirname,turnsedb))
else:
pl.savefig('%s/DA_comp.%s.png'%(dirname,turnsedb),bbox_inches='tight')
print('... saving plot %s/DA_comp.%s.png'%(dirname,turnsedb))
| mfittere/SixDeskDB | sixdeskdb/davsturns.py | Python | lgpl-2.1 | 28,687 |
#!/usr/bin/env python
# Aravis - Digital camera library
#
# Copyright (c) 2011-2012 Emmanuel Pacaud
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General
# Public License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330,
# Boston, MA 02111-1307, USA.
#
# Author: Emmanuel Pacaud <emmanuel@gnome.org>
# If you have installed aravis in a non standard location, you may need
# to make GI_TYPELIB_PATH point to the correct location. For example:
#
# export GI_TYPELIB_PATH=$GI_TYPELIB_PATH:/opt/bin/lib/girepositry-1.0/
#
# You may also have to give the path to libaravis.so, using LD_PRELOAD or
# LD_LIBRARY_PATH.
import gi
gi.require_version ('Aravis', '0.2')
from gi.repository import Aravis
print Aravis.Auto
print Aravis.Auto.OFF
print Aravis.BufferStatus
print Aravis.DebugLevel
print Aravis.DomNodeType
print Aravis.GvStreamPacketResend
print Aravis.GvspPacketType
print Aravis.PixelFormat
print Aravis.PixelFormat.MONO_8
| lu-zero/aravis | tests/python/arv-enum-test.py | Python | lgpl-2.1 | 1,513 |
#### import the simple module from the paraview
from paraview.simple import *
import glob, re, os, numpy, csv
def CreatePointSelection(ids):
source = IDSelectionSource()
source.FieldType = "POINT"
sids = []
for i in ids:
sids.append(0) #proc-id
sids.append(i) #cell-id
source.IDs = sids
return source
def selectElectrodeData(data, voi):
dataOverTime = PlotSelectionOverTime(Input=data, Selection=selection)
dataOverTime.OnlyReportSelectionStatistics = 0
dataOverTimeT = TransposeTable(Input=dataOverTime, VariablesofInterest=voi)
dataOverTimeT.UpdatePipeline()
dataOverTimeTT = TransposeTable(Input=dataOverTimeT)
dataOverTimeTT.Usethecolumnwithoriginalcolumnsname = 1
dataOverTimeTT.Addacolumnwithoriginalcolumnsname = 0
dataOverTimeTT.UpdatePipeline()
return dataOverTimeTT
path = os.getcwd() + "/"
#path = "C:\\Users\\John\\Desktop\\"
fileBase = "SteadyState_out"
#fileBase = "Ar_phi_2.5_eV_tOn_2_ns_tOff_100_ns_d_10_um_VHigh_45.6_V_VLow_1.0_V"
## Define selection ##
selection = CreatePointSelection(ids=[0])
## Get reader data ##
reader = ExodusIIReader(FileName=path+fileBase + '.e')
reader.GenerateObjectIdCellArray = 1
reader.GenerateGlobalElementIdArray = 1
reader.ElementVariables = reader.ElementVariables.Available
reader.PointVariables = reader.PointVariables.Available
reader.ElementBlocks = reader.ElementBlocks.Available
reader.ApplyDisplacements = 1
reader.DisplacementMagnitude = 1.0
## Get cathode and anode coordinates
calc = Calculator(Input=reader)
calc.ResultArrayName = 'coords'
calc.Function = 'coords'
calc.UpdatePipeline()
coordRange = calc.PointData['coords'].GetRange()
minX = coordRange[0]
maxX = coordRange[1]
Delete(calc)
del calc
## Prepare to extract electrode data ##
electrodeData = []
VariablesofInterest = ['Time', 'Voltage', 'Current_Arp', 'Current_em', 'tot_gas_current', 'Emission_energy_flux']
## Extract cathode data ##
cathodeValues = ExtractLocation(Input=reader)
cathodeValues.Location = [minX,0,0]
cathodeValues.Mode = 'Interpolate At Location'
electrodeData.append(selectElectrodeData(cathodeValues, VariablesofInterest))
## Extract anode data ##
anodeValues = ExtractLocation(Input=reader)
anodeValues.Location = [maxX,0,0]
anodeValues.Mode = 'Interpolate At Location'
electrodeData.append(selectElectrodeData(anodeValues, VariablesofInterest))
electrodeData.append(reader)
## Calculate average powers and efficiency ##
PowerAndEfficiency = ProgrammableFilter(Input=electrodeData)
PowerAndEfficiency.Script = """
from numpy import trapz
import numpy as np
for c, a, r, outTable in zip(inputs[0], inputs[1], inputs[2], output):
voltageDelta = 1E-1 # (V)
timeUnits = 1E-9 # (s/ns)
potential = c.RowData['Voltage'] - a.RowData['Voltage'] # (V)
loadVoltage = max( potential ) * np.ones(len(c.RowData['Voltage'])) # (V)
workFunctionDelta = 1
workFunctionDeltaVector = workFunctionDelta * np.ones(len(c.RowData['Voltage'])) # (eV)
appliedVoltage = numpy.round( potential - loadVoltage , 4 ) # (V)
ind = np.where( max( potential ) - voltageDelta < np.array(potential)) # (V)
time = c.RowData['Time'] - min(c.RowData['Time']) # (ns)
period = max(time) - min(time) # (ns)
offTime = max(time[ind]) - min(time[ind]) # (ns)
onTime = period - offTime # (ns)
# current density
j = a.RowData['tot_gas_current']
# The units stay the same because it is being integrated over ns, then divided by ns
# Time (ns)
outTable.RowData.append(time, 'time')
# Total current density leaving at the boundaries (A/m^2)
outTable.RowData.append(j, 'CurrentDensity')
# Cathode anode potential difference (V)
outTable.RowData.append(potential, 'CathodeAnodePotentialDifference')
# Output voltage (V)
outTable.RowData.append(workFunctionDeltaVector + potential, 'OutputVoltage')
# Production voltage (V)
outTable.RowData.append(workFunctionDeltaVector + loadVoltage, 'ProductionVoltage')
# Applied voltage (V)
outTable.RowData.append(appliedVoltage, 'AppliedVoltage')
# Net power (W/m^2)
outTable.RowData.append(j * (workFunctionDeltaVector + potential), 'NetPower')
# Power produced (W/m^2)
outTable.RowData.append(j * (workFunctionDeltaVector + loadVoltage), 'PowerProduced')
# Power power consumed (W/m^2)
outTable.RowData.append(j * appliedVoltage, 'PowerConsumed')
# ElectronCooling (W/m^2)
outTable.RowData.append(c.RowData['Emission_energy_flux'], 'ElectronCooling')
# Total current density leaving at the boundaries (A/m^2)
outTable.RowData.append(r.FieldData['Full_EmissionCurrent'], 'EmittedCurrentDensity') # Emitted current density (A/m^2)
outTable.RowData.append(r.FieldData['Thermionic_EmissionCurrent'], 'ThermionicEmissionCurrent') # Thermionic emitted current density (A/m^2)
outTable.RowData.append(r.FieldData['Native_EmissionCurrent'], 'NativeCurrentDensity') # Native emitted current density (A/m^2)
"""
PowerAndEfficiency.UpdatePipeline()
fname = glob.glob(path + 'TimeDependentData*.csv')
for f in fname:
os.remove(f)
writer = CreateWriter(path + 'TimeDependentData.csv', PowerAndEfficiency, Precision=13, UseScientificNotation=1)
writer.UpdatePipeline()
fname = glob.glob(path + 'TimeDependentData*.csv')
os.rename(fname[0] , path + 'TimeDependentData.csv')
for f in GetSources().values():
Delete(f)
del f
| jhaase1/zapdos | problems/Schottky_emission/transient/no_ballast/parametric/base/OldUnAveragedSystemData.py | Python | lgpl-2.1 | 5,514 |
##########################################################################
## # The Coq Proof Assistant / The Coq Development Team ##
## v # Copyright INRIA, CNRS and contributors ##
## <O___,, # (see version control and CREDITS file for authors & dates) ##
## \VV/ ###############################################################
## // # This file is distributed under the terms of the ##
## # GNU Lesser General Public License Version 2.1 ##
## # (see LICENSE file for the text of the license) ##
##########################################################################
"""A Coq domain for Sphinx.
Currently geared towards Coq's manual, rather than Coq source files, but one
could imagine extending it.
"""
# pylint: disable=missing-type-doc, missing-param-doc
# pylint: disable=missing-return-type-doc, missing-return-doc
# pylint: disable=too-few-public-methods, too-many-ancestors, arguments-differ
# pylint: disable=import-outside-toplevel, abstract-method, too-many-lines
import os
import re
from itertools import chain
from collections import defaultdict
from docutils import nodes, utils
from docutils.transforms import Transform
from docutils.parsers.rst import Directive, directives
from docutils.parsers.rst.roles import code_role #, set_classes
from docutils.parsers.rst.directives.admonitions import BaseAdmonition
from sphinx import addnodes
from sphinx.directives import ObjectDescription
from sphinx.domains import Domain, ObjType, Index
from sphinx.errors import ExtensionError
from sphinx.roles import XRefRole
from sphinx.util.docutils import ReferenceRole
from sphinx.util.logging import getLogger, get_node_location
from sphinx.util.nodes import set_source_info, set_role_source_info, make_refnode
from sphinx.writers.latex import LaTeXTranslator
from . import coqdoc
from .repl import ansicolors
from .repl.coqtop import CoqTop, CoqTopError
from .notations.parsing import ParseError
from .notations.sphinx import sphinxify
from .notations.plain import stringify_with_ellipses
# FIXME: Patch this in Sphinx
# https://github.com/coq/coq/issues/12361
def visit_desc_signature(self, node):
hyper = ''
if node.parent['objtype'] != 'describe' and node['ids']:
for id in node['ids']:
hyper += self.hypertarget(id)
self.body.append(hyper)
if not node.get('is_multiline'):
self._visit_signature_line(node)
else:
self.body.append('%\n\\pysigstartmultiline\n')
LaTeXTranslator.visit_desc_signature = visit_desc_signature
PARSE_ERROR = """{}:{} Parse error in notation!
Offending notation: {}
Error message: {}"""
def notation_to_sphinx(notation, source, line, rawtext=None):
"""Parse notation and wrap it in an inline node"""
try:
node = nodes.inline(rawtext or notation, '', *sphinxify(notation), classes=['notation'])
node.source, node.line = source, line
return node
except ParseError as e:
raise ExtensionError(PARSE_ERROR.format(os.path.basename(source), line, notation, e.msg)) from e
def notation_to_string(notation):
"""Parse notation and format it as a string with ellipses."""
try:
return stringify_with_ellipses(notation)
except ParseError as e:
# FIXME source and line aren't defined below — see cc93f419e0
raise ExtensionError(PARSE_ERROR.format(os.path.basename(source), line, notation, e.msg)) from e
def highlight_using_coqdoc(sentence):
"""Lex sentence using coqdoc, and yield inline nodes for each token"""
tokens = coqdoc.lex(utils.unescape(sentence, 1))
for classes, value in tokens:
yield nodes.inline(value, value, classes=classes)
def make_target(objtype, targetid):
"""Create a target to an object of type objtype and id targetid"""
return "coq:{}.{}".format(objtype, targetid)
def make_math_node(latex, docname, nowrap):
node = nodes.math_block(latex, latex)
node['label'] = None # Otherwise equations are numbered
node['nowrap'] = nowrap
node['docname'] = docname
node['number'] = None
return node
class CoqObject(ObjectDescription):
"""A generic Coq object for Sphinx; all Coq objects are subclasses of this.
The fields and methods to override are listed at the top of this class'
implementation. Each object supports the :name: option, which gives an
explicit name to link to.
See the comments and docstrings in CoqObject for more information.
"""
# The semantic domain in which this object lives (eg. “tac”, “cmd”, “chm”…).
# It matches exactly one of the roles used for cross-referencing.
subdomain = None # type: str
# The suffix to use in indices for objects of this type (eg. “(tac)”)
index_suffix = None # type: str
# The annotation to add to headers of objects of this type
# (eg. “Command”, “Theorem”)
annotation = None # type: str
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._sig_names = None
def _name_from_signature(self, signature): # pylint: disable=no-self-use, unused-argument
"""Convert a signature into a name to link to.
‘Signature’ is Sphinx parlance for an object's header (think “type
signature”); for example, the signature of the simplest form of the
``exact`` tactic is ``exact @id``.
Generates a name for the directive. Override this method to return None
to avoid generating a name automatically. This is a convenient way
to automatically generate names (link targets) without having to write
explicit names everywhere.
"""
m = re.match(r"[a-zA-Z0-9_ ]+", signature)
if m:
return m.group(0).strip()
def _render_signature(self, signature, signode):
"""Render a signature, placing resulting nodes into signode."""
raise NotImplementedError(self)
option_spec = {
# Explicit object naming
'name': directives.unchanged,
# Silence warnings produced by report_undocumented_coq_objects
'undocumented': directives.flag,
# noindex omits this object from its index
'noindex': directives.flag
}
def subdomain_data(self):
if self.subdomain is None:
raise ValueError()
return self.env.domaindata['coq']['objects'][self.subdomain]
def _render_annotation(self, signode):
if self.annotation:
annot_node = nodes.inline(self.annotation, self.annotation, classes=['sigannot'])
signode += addnodes.desc_annotation(self.annotation, '', annot_node)
signode += nodes.Text(' ')
def handle_signature(self, signature, signode):
"""Prefix signature with the proper annotation, then render it using
``_render_signature`` (for example, add “Command” in front of commands).
:returns: the names given to the resulting node.
"""
self._render_annotation(signode)
self._render_signature(signature, signode)
names = self._sig_names.get(signature)
if names is None:
name = self._name_from_signature(signature) # pylint: disable=assignment-from-none
# remove trailing ‘.’ found in commands, but not ‘...’ (ellipsis)
if name is not None and name.endswith(".") and not name.endswith("..."):
name = name[:-1]
names = [name] if name else None
return names
def _warn_if_duplicate_name(self, objects, name, signode):
"""Check that two objects in the same domain don't have the same name."""
if name in objects:
MSG = 'Duplicate name {} (other is in {}) attached to {}'
msg = MSG.format(name, self.env.doc2path(objects[name][0]), signode)
self.state_machine.reporter.warning(msg, line=self.lineno)
def _record_name(self, name, target_id, signode):
"""Record a `name` in the current subdomain, mapping it to `target_id`.
Warns if another object of the same name already exists; `signode` is
used in the warning.
"""
names_in_subdomain = self.subdomain_data()
self._warn_if_duplicate_name(names_in_subdomain, name, signode)
names_in_subdomain[name] = (self.env.docname, self.objtype, target_id)
def _target_id(self, name):
return make_target(self.objtype, nodes.make_id(name))
def _add_target(self, signode, name):
"""Register a link target ‘name’, pointing to signode."""
targetid = self._target_id(name)
if targetid not in self.state.document.ids:
signode['ids'].append(targetid)
signode['names'].append(name)
signode['first'] = (not self.names)
self._record_name(name, targetid, signode)
return targetid
def _add_index_entry(self, name, target):
"""Add `name` (pointing to `target`) to the main index."""
assert isinstance(name, str)
# remove trailing . , found in commands, but not ... (ellipsis)
trim = name.endswith(".") and not name.endswith("...")
index_text = name[:-1] if trim else name
if self.index_suffix:
index_text += " " + self.index_suffix
self.indexnode['entries'].append(('single', index_text, target, '', None))
def add_target_and_index(self, names, _, signode):
"""Attach a link target to `signode` and index entries for `names`.
This is only called (from ``ObjectDescription.run``) if ``:noindex:`` isn't specified."""
if names:
for name in names:
if isinstance(name, str) and name.startswith('_'):
continue
target = self._add_target(signode, name)
self._add_index_entry(name, target)
self.state.document.note_explicit_target(signode)
def _prepare_names(self):
"""Construct ``self._sig_names``, a map from signatures to names.
A node may have either one signature with no name, multiple signatures
with one name per signatures, or one signature with multiple names.
"""
sigs = self.get_signatures()
names = self.options.get("name")
if names is None:
self._sig_names = {}
else:
names = [n.strip() for n in names.split(";")]
if len(names) != len(sigs):
if len(sigs) != 1: #Multiple names for one signature
ERR = ("Expected {} semicolon-separated names, got {}. " +
"Please provide one name per signature line.")
raise self.error(ERR.format(len(names), len(sigs)))
self._sig_names = { sigs[0]: names }
else:
self._sig_names = { sig: [name] for (sig, name) in zip(sigs, names) }
def run(self):
self._prepare_names()
return super().run()
class DocumentableObject(CoqObject):
def _warn_if_undocumented(self):
document = self.state.document
config = document.settings.env.config
report = config.report_undocumented_coq_objects
if report and not self.content and "undocumented" not in self.options:
# This is annoyingly convoluted, but we don't want to raise warnings
# or interrupt the generation of the current node. For more details
# see https://github.com/sphinx-doc/sphinx/issues/4976.
msg = 'No contents in directive {}'.format(self.name)
node = document.reporter.info(msg, line=self.lineno)
getLogger(__name__).info(node.astext())
if report == "warning":
raise self.warning(msg)
def run(self):
self._warn_if_undocumented()
return super().run()
class PlainObject(DocumentableObject):
"""A base class for objects whose signatures should be rendered literally."""
def _render_signature(self, signature, signode):
signode += addnodes.desc_name(signature, signature)
class NotationObject(DocumentableObject):
"""A base class for objects whose signatures should be rendered as nested boxes.
Objects that inherit from this class can use the notation grammar (“{+ …}”,
“@…”, etc.) in their signature.
"""
def _render_signature(self, signature, signode):
position = self.state_machine.get_source_and_line(self.lineno)
tacn_node = notation_to_sphinx(signature, *position)
signode += addnodes.desc_name(signature, '', tacn_node)
class GallinaObject(PlainObject):
r"""A theorem.
Example::
.. thm:: Bound on the ceiling function
Let :math:`p` be an integer and :math:`c` a rational constant. Then
:math:`p \ge c \rightarrow p \ge \lceil{c}\rceil`.
"""
subdomain = "thm"
index_suffix = "(theorem)"
annotation = "Theorem"
class VernacObject(NotationObject):
"""A Coq command.
Example::
.. cmd:: Infix @string := @one_term {? ( {+, @syntax_modifier } ) } {? : @ident }
This command is equivalent to :n:`…`.
"""
subdomain = "cmd"
index_suffix = "(command)"
annotation = "Command"
def _name_from_signature(self, signature):
m = re.match(r"[a-zA-Z ]+", signature)
return m.group(0).strip() if m else None
class VernacVariantObject(VernacObject):
"""A variant of a Coq command.
Example::
.. cmd:: Axiom @ident : @term.
This command links :token:`term` to the name :token:`term` as its specification in
the global context. The fact asserted by :token:`term` is thus assumed as a
postulate.
.. cmdv:: Parameter @ident : @term.
This is equivalent to :n:`Axiom @ident : @term`.
"""
index_suffix = "(command variant)"
annotation = "Variant"
def _name_from_signature(self, signature):
return None
class TacticObject(NotationObject):
"""A tactic, or a tactic notation.
Example::
.. tacn:: do @natural @expr
:token:`expr` is evaluated to ``v`` which must be a tactic value. …
"""
subdomain = "tacn"
index_suffix = "(tactic)"
annotation = "Tactic"
class AttributeObject(NotationObject):
"""An attribute.
Example::
.. attr:: local
"""
subdomain = "attr"
index_suffix = "(attribute)"
annotation = "Attribute"
def _name_from_signature(self, signature):
return notation_to_string(signature)
class TacticVariantObject(TacticObject):
"""A variant of a tactic.
Example::
.. tacn:: fail
This is the always-failing tactic: it does not solve any goal. It is
useful for defining other tacticals since it can be caught by
:tacn:`try`, :tacn:`repeat`, :tacn:`match goal`, or the branching
tacticals. …
.. tacv:: fail @natural
The number is the failure level. If no level is specified, it
defaults to 0. …
"""
index_suffix = "(tactic variant)"
annotation = "Variant"
def _name_from_signature(self, signature):
return None
class OptionObject(NotationObject):
"""A Coq option (a setting with non-boolean value, e.g. a string or numeric value).
Example::
.. opt:: Hyps Limit @natural
:name Hyps Limit
Controls the maximum number of hypotheses displayed in goals after
application of a tactic.
"""
subdomain = "opt"
index_suffix = "(option)"
annotation = "Option"
class FlagObject(NotationObject):
"""A Coq flag (i.e. a boolean setting).
Example::
.. flag:: Nonrecursive Elimination Schemes
Controls whether types declared with the keywords
:cmd:`Variant` and :cmd:`Record` get an automatic declaration of
induction principles.
"""
subdomain = "flag"
index_suffix = "(flag)"
annotation = "Flag"
class TableObject(NotationObject):
"""A Coq table, i.e. a setting that is a set of values.
Example::
.. table:: Search Blacklist @string
:name: Search Blacklist
Controls ...
"""
subdomain = "table"
index_suffix = "(table)"
annotation = "Table"
class ProductionObject(CoqObject):
r"""A grammar production.
Use ``.. prodn`` to document grammar productions instead of Sphinx
`production lists
<http://www.sphinx-doc.org/en/stable/markup/para.html#directive-productionlist>`_.
prodn displays multiple productions together with alignment similar to ``.. productionlist``,
however unlike ``.. productionlist``\ s, this directive accepts notation syntax.
Example::
.. prodn:: occ_switch ::= { {? {| + | - } } {* @natural } }
term += let: @pattern := @term in @term
| second_production
The first line defines "occ_switch", which must be unique in the document. The second
references and expands the definition of "term", whose main definition is elsewhere
in the document. The third form is for continuing the
definition of a nonterminal when it has multiple productions. It leaves the first
column in the output blank.
"""
subdomain = "prodn"
#annotation = "Grammar production"
# handle_signature is called for each line of input in the prodn::
# 'signatures' accumulates them in order to combine the lines into a single table:
signatures = None # FIXME this should be in init, shouldn't it?
def _render_signature(self, signature, signode):
raise NotImplementedError(self)
SIG_ERROR = ("{}: Invalid syntax in ``.. prodn::`` directive"
+ "\nExpected ``name ::= ...`` or ``name += ...``"
+ " (e.g. ``pattern += constr:(@ident)``)\n"
+ " in `{}`")
def handle_signature(self, signature, signode):
parts = signature.split(maxsplit=1)
if parts[0].strip() == "|" and len(parts) == 2:
lhs = ""
op = "|"
rhs = parts[1].strip()
else:
parts = signature.split(maxsplit=2)
if len(parts) != 3:
loc = os.path.basename(get_node_location(signode))
raise ExtensionError(ProductionObject.SIG_ERROR.format(loc, signature))
lhs, op, rhs = (part.strip() for part in parts)
if op not in ["::=", "+="]:
loc = os.path.basename(get_node_location(signode))
raise ExtensionError(ProductionObject.SIG_ERROR.format(loc, signature))
parts = rhs.split(" ", maxsplit=1)
rhs = parts[0].strip()
tag = parts[1].strip() if len(parts) == 2 else ""
self.signatures.append((lhs, op, rhs, tag))
return [('token', lhs)] if op == '::=' else None
def _add_index_entry(self, name, target):
pass
def _target_id(self, name):
return 'grammar-token-{}'.format(nodes.make_id(name[1]))
def _record_name(self, name, targetid, signode):
env = self.state.document.settings.env
objects = env.domaindata['std']['objects']
self._warn_if_duplicate_name(objects, name, signode)
objects[name] = env.docname, targetid
def run(self):
self.signatures = []
indexnode = super().run()[0] # makes calls to handle_signature
table = nodes.container(classes=['prodn-table'])
tgroup = nodes.container(classes=['prodn-column-group'])
for _ in range(4):
tgroup += nodes.container(classes=['prodn-column'])
table += tgroup
tbody = nodes.container(classes=['prodn-row-group'])
table += tbody
# create rows
for signature in self.signatures:
lhs, op, rhs, tag = signature
position = self.state_machine.get_source_and_line(self.lineno)
row = nodes.container(classes=['prodn-row'])
entry = nodes.container(classes=['prodn-cell-nonterminal'])
if lhs != "":
target_name = 'grammar-token-' + nodes.make_id(lhs)
target = nodes.target('', '', ids=[target_name], names=[target_name])
# putting prodn-target on the target node won't appear in the tex file
inline = nodes.inline(classes=['prodn-target'])
inline += target
entry += inline
entry += notation_to_sphinx('@'+lhs, *position)
else:
entry += nodes.Text('')
row += entry
entry = nodes.container(classes=['prodn-cell-op'])
entry += nodes.Text(op)
row += entry
entry = nodes.container(classes=['prodn-cell-production'])
entry += notation_to_sphinx(rhs, *position)
row += entry
entry = nodes.container(classes=['prodn-cell-tag'])
entry += nodes.Text(tag)
row += entry
tbody += row
return [indexnode, table] # only this node goes into the doc
class ExceptionObject(NotationObject):
"""An error raised by a Coq command or tactic.
This commonly appears nested in the ``.. tacn::`` that raises the
exception.
Example::
.. tacv:: assert @form by @tactic
This tactic applies :n:`@tactic` to solve the subgoals generated by
``assert``.
.. exn:: Proof is not complete
Raised if :n:`@tactic` does not fully solve the goal.
"""
subdomain = "exn"
index_suffix = "(error)"
annotation = "Error"
# Uses “exn” since “err” already is a CSS class added by “writer_aux”.
# Generate names automatically
def _name_from_signature(self, signature):
return notation_to_string(signature)
class WarningObject(NotationObject):
"""An warning raised by a Coq command or tactic..
Do not mistake this for ``.. warning::``; this directive is for warning
messages produced by Coq.
Example::
.. warn:: Ambiguous path
When the coercion :token:`qualid` is added to the inheritance graph, non
valid coercion paths are ignored.
"""
subdomain = "warn"
index_suffix = "(warning)"
annotation = "Warning"
# Generate names automatically
def _name_from_signature(self, signature):
return notation_to_string(signature)
def NotationRole(role, rawtext, text, lineno, inliner, options={}, content=[]):
#pylint: disable=unused-argument, dangerous-default-value
"""Any text using the notation syntax (``@id``, ``{+, …}``, etc.).
Use this to explain tactic equivalences. For example, you might write
this::
:n:`generalize @term as @ident` is just like :n:`generalize @term`, but
it names the introduced hypothesis :token:`ident`.
Note that this example also uses ``:token:``. That's because ``ident`` is
defined in the Coq manual as a grammar production, and ``:token:``
creates a link to that. When referring to a placeholder that happens to be
a grammar production, ``:token:`…``` is typically preferable to ``:n:`@…```.
"""
notation = utils.unescape(text, 1)
position = inliner.reporter.get_source_and_line(lineno)
return [nodes.literal(rawtext, '', notation_to_sphinx(notation, *position, rawtext=rawtext))], []
def coq_code_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
#pylint: disable=dangerous-default-value
"""Coq code.
Use this for Gallina and Ltac snippets::
:g:`apply plus_comm; reflexivity`
:g:`Set Printing All.`
:g:`forall (x: t), P(x)`
"""
options['language'] = 'Coq'
return code_role(role, rawtext, text, lineno, inliner, options, content)
## Too heavy:
## Forked from code_role to use our custom tokenizer; this doesn't work for
## snippets though: for example CoqDoc swallows the parentheses around this:
## “(a: A) (b: B)”
# set_classes(options)
# classes = ['code', 'coq']
# code = utils.unescape(text, 1)
# node = nodes.literal(rawtext, '', *highlight_using_coqdoc(code), classes=classes)
# return [node], []
CoqCodeRole = coq_code_role
class CoqtopDirective(Directive):
r"""A reST directive to describe interactions with Coqtop.
Usage::
.. coqtop:: options…
Coq code to send to coqtop
Example::
.. coqtop:: in reset
Print nat.
Definition a := 1.
The blank line after the directive is required. If you begin a proof,
use the ``abort`` option to reset coqtop for the next example.
Here is a list of permissible options:
- Display options (choose exactly one)
- ``all``: Display input and output
- ``in``: Display only input
- ``out``: Display only output
- ``none``: Display neither (useful for setup commands)
- Behavior options
- ``reset``: Send a ``Reset Initial`` command before running this block
- ``fail``: Don't die if a command fails, implies ``warn`` (so no need to put both)
- ``warn``: Don't die if a command emits a warning
- ``restart``: Send a ``Restart`` command before running this block (only works in proof mode)
- ``abort``: Send an ``Abort All`` command after running this block (leaves all pending proofs if any)
``coqtop``\ 's state is preserved across consecutive ``.. coqtop::`` blocks
of the same document (``coqrst`` creates a single ``coqtop`` process per
reST source file). Use the ``reset`` option to reset Coq's state.
"""
has_content = True
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = { 'name': directives.unchanged }
directive_name = "coqtop"
def run(self):
# Uses a ‘container’ instead of a ‘literal_block’ to disable
# Pygments-based post-processing (we could also set rawsource to '')
content = '\n'.join(self.content)
args = self.arguments[0].split()
node = nodes.container(content, coqtop_options = set(args),
classes=['coqtop', 'literal-block'])
self.add_name(node)
return [node]
class CoqdocDirective(Directive):
"""A reST directive to display Coqtop-formatted source code.
Usage::
.. coqdoc::
Coq code to highlight
Example::
.. coqdoc::
Definition test := 1.
"""
# TODO implement this as a Pygments highlighter?
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = True
option_spec = { 'name': directives.unchanged }
directive_name = "coqdoc"
def run(self):
# Uses a ‘container’ instead of a ‘literal_block’ to disable
# Pygments-based post-processing (we could also set rawsource to '')
content = '\n'.join(self.content)
node = nodes.inline(content, '', *highlight_using_coqdoc(content))
wrapper = nodes.container(content, node, classes=['coqdoc', 'literal-block'])
self.add_name(wrapper)
return [wrapper]
class ExampleDirective(BaseAdmonition):
"""A reST directive for examples.
This behaves like a generic admonition; see
http://docutils.sourceforge.net/docs/ref/rst/directives.html#generic-admonition
for more details.
Optionally, any text immediately following the ``.. example::`` header is
used as the example's title.
Example::
.. example:: Adding a hint to a database
The following adds ``plus_comm`` to the ``plu`` database:
.. coqdoc::
Hint Resolve plus_comm : plu.
"""
node_class = nodes.admonition
directive_name = "example"
optional_arguments = 1
def run(self):
# ‘BaseAdmonition’ checks whether ‘node_class’ is ‘nodes.admonition’,
# and uses arguments[0] as the title in that case (in other cases, the
# title is unset, and it is instead set in the HTML visitor).
assert len(self.arguments) <= 1
self.arguments = [": ".join(['Example'] + self.arguments)]
self.options['classes'] = ['admonition', 'note']
return super().run()
class PreambleDirective(Directive):
r"""A reST directive to include a TeX file.
Mostly useful to let MathJax know about `\def`\s and `\newcommand`\s. The
contents of the TeX file are wrapped in a math environment, as MathJax
doesn't process LaTeX definitions otherwise.
Usage::
.. preamble:: preamble.tex
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {}
directive_name = "preamble"
def run(self):
document = self.state.document
env = document.settings.env
if not document.settings.file_insertion_enabled:
msg = 'File insertion disabled'
return [document.reporter.warning(msg, line=self.lineno)]
rel_fname, abs_fname = env.relfn2path(self.arguments[0])
env.note_dependency(rel_fname)
with open(abs_fname, encoding="utf-8") as ltx:
latex = ltx.read()
node = make_math_node(latex, env.docname, nowrap=False)
node['classes'] = ["math-preamble"]
set_source_info(self, node)
return [node]
class InferenceDirective(Directive):
r"""A reST directive to format inference rules.
This also serves as a small illustration of the way to create new Sphinx
directives.
Usage::
.. inference:: name
newline-separated premises
--------------------------
conclusion
Example::
.. inference:: Prod-Pro
\WTEG{T}{s}
s \in \Sort
\WTE{\Gamma::(x:T)}{U}{\Prop}
-----------------------------
\WTEG{\forall~x:T,U}{\Prop}
"""
required_arguments = 1
optional_arguments = 0
has_content = True
final_argument_whitespace = True
directive_name = "inference"
@staticmethod
def prepare_latex_operand(op):
# TODO: Could use a fancier inference class in LaTeX
return '%\n\\hspace{3em}%\n'.join(op.strip().splitlines())
def prepare_latex(self, content):
parts = re.split('^ *----+ *$', content, flags=re.MULTILINE)
if len(parts) != 2:
raise self.error('Expected two parts in ‘inference’ directive, separated by a rule (----).')
top, bottom = tuple(InferenceDirective.prepare_latex_operand(p) for p in parts)
return "%\n".join(("\\frac{", top, "}{", bottom, "}"))
def run(self):
self.assert_has_content()
title = self.arguments[0]
content = '\n'.join(self.content)
latex = self.prepare_latex(content)
docname = self.state.document.settings.env.docname
math_node = make_math_node(latex, docname, nowrap=False)
tid = nodes.make_id(title)
target = nodes.target('', '', ids=['inference-' + tid])
self.state.document.note_explicit_target(target)
term, desc = nodes.term('', title), nodes.description('', math_node)
dli = nodes.definition_list_item('', term, desc)
dl = nodes.definition_list(content, target, dli)
set_source_info(self, dl)
return [dl]
class AnsiColorsParser():
"""Parse ANSI-colored output from Coqtop into Sphinx nodes."""
# Coqtop's output crashes ansi.py, because it contains a bunch of extended codes
# This class is a fork of the original ansi.py, released under a BSD license in sphinx-contribs
COLOR_PATTERN = re.compile('\x1b\\[([^m]+)m')
def __init__(self):
self.new_nodes, self.pending_nodes = [], []
def _finalize_pending_nodes(self):
self.new_nodes.extend(self.pending_nodes)
self.pending_nodes = []
def _add_text(self, raw, beg, end):
if beg < end:
text = raw[beg:end]
if self.pending_nodes:
self.pending_nodes[-1].append(nodes.Text(text))
else:
self.new_nodes.append(nodes.inline('', text))
def colorize_str(self, raw):
"""Parse raw (an ANSI-colored output string from Coqtop) into Sphinx nodes."""
last_end = 0
for match in AnsiColorsParser.COLOR_PATTERN.finditer(raw):
self._add_text(raw, last_end, match.start())
last_end = match.end()
classes = ansicolors.parse_ansi(match.group(1))
if 'ansi-reset' in classes:
self._finalize_pending_nodes()
else:
node = nodes.inline()
self.pending_nodes.append(node)
node['classes'].extend(classes)
self._add_text(raw, last_end, len(raw))
self._finalize_pending_nodes()
return self.new_nodes
class CoqtopBlocksTransform(Transform):
"""Filter handling the actual work for the coqtop directive
Adds coqtop's responses, colorizes input and output, and merges consecutive
coqtop directives for better visual rendition.
"""
default_priority = 10
@staticmethod
def is_coqtop_block(node):
return isinstance(node, nodes.Element) and 'coqtop_options' in node
@staticmethod
def split_lines(source):
r"""Split Coq input in chunks
A chunk is a minimal sequence of consecutive lines of the input that
ends with a '.'
>>> split_lines('A.\nB.''')
['A.', 'B.']
>>> split_lines('A.\n\nB.''')
['A.', '\nB.']
>>> split_lines('A.\n\nB.\n''')
['A.', '\nB.']
>>> split_lines("SearchPattern (_ + _ = _ + _).\n"
... "SearchPattern (nat -> bool).\n"
... "SearchPattern (forall l : list _, _ l l).")
... # doctest: +NORMALIZE_WHITESPACE
['SearchPattern (_ + _ = _ + _).',
'SearchPattern (nat -> bool).',
'SearchPattern (forall l : list _, _ l l).']
>>> split_lines('SearchHead le.\nSearchHead (@eq bool).')
['SearchHead le.', 'SearchHead (@eq bool).']
"""
return re.split(r"(?<=(?<!\.)\.)\n", source.strip())
@staticmethod
def parse_options(node):
"""Parse options according to the description in CoqtopDirective."""
options = node['coqtop_options']
# Behavior options
opt_reset = 'reset' in options
opt_fail = 'fail' in options
opt_warn = 'warn' in options
opt_restart = 'restart' in options
opt_abort = 'abort' in options
options = options - {'reset', 'fail', 'warn', 'restart', 'abort'}
unexpected_options = list(options - {'all', 'none', 'in', 'out'})
if unexpected_options:
loc = os.path.basename(get_node_location(node))
raise ExtensionError("{}: Unexpected options for .. coqtop:: {}".format(loc,unexpected_options))
# Display options
if len(options) != 1:
loc = os.path.basename(get_node_location(node))
raise ExtensionError("{}: Exactly one display option must be passed to .. coqtop::".format(loc))
opt_all = 'all' in options
opt_input = 'in' in options
opt_output = 'out' in options
return {
'reset': opt_reset,
'fail': opt_fail,
# if errors are allowed, then warnings too
# and they should be displayed as warnings, not errors
'warn': opt_warn or opt_fail,
'restart': opt_restart,
'abort': opt_abort,
'input': opt_input or opt_all,
'output': opt_output or opt_all
}
@staticmethod
def block_classes(should_show, contents=None):
"""Compute classes to add to a node containing contents.
:param should_show: Whether this node should be displayed"""
is_empty = contents is not None and re.match(r"^\s*$", contents)
return ['coqtop-hidden'] if is_empty or not should_show else []
@staticmethod
def make_rawsource(pairs, opt_input, opt_output):
blocks = []
for sentence, output in pairs:
output = AnsiColorsParser.COLOR_PATTERN.sub("", output).strip()
if opt_input:
blocks.append(sentence)
if output and opt_output:
blocks.append(re.sub("^", " ", output, flags=re.MULTILINE) + "\n")
return '\n'.join(blocks)
def add_coq_output_1(self, repl, node):
options = self.parse_options(node)
pairs = []
if options['restart']:
repl.sendone('Restart.')
if options['reset']:
repl.sendone('Reset Initial.')
repl.send_initial_options()
if options['fail']:
repl.sendone('Unset Coqtop Exit On Error.')
if options['warn']:
repl.sendone('Set Warnings "default".')
for sentence in self.split_lines(node.rawsource):
pairs.append((sentence, repl.sendone(sentence)))
if options['abort']:
repl.sendone('Abort All.')
if options['fail']:
repl.sendone('Set Coqtop Exit On Error.')
if options['warn']:
repl.sendone('Set Warnings "+default".')
dli = nodes.definition_list_item()
for sentence, output in pairs:
# Use Coqdoc to highlight input
in_chunks = highlight_using_coqdoc(sentence)
dli += nodes.term(sentence, '', *in_chunks, classes=self.block_classes(options['input']))
# Parse ANSI sequences to highlight output
out_chunks = AnsiColorsParser().colorize_str(output)
dli += nodes.definition(output, *out_chunks, classes=self.block_classes(options['output'], output))
node.clear()
node.rawsource = self.make_rawsource(pairs, options['input'], options['output'])
node['classes'].extend(self.block_classes(options['input'] or options['output']))
node += nodes.inline('', '', classes=['coqtop-reset'] * options['reset'])
node += nodes.definition_list(node.rawsource, dli)
def add_coqtop_output(self):
"""Add coqtop's responses to a Sphinx AST
Finds nodes to process using is_coqtop_block."""
with CoqTop(color=True) as repl:
repl.send_initial_options()
for node in self.document.traverse(CoqtopBlocksTransform.is_coqtop_block):
try:
self.add_coq_output_1(repl, node)
except CoqTopError as err:
import textwrap
MSG = ("{}: Error while sending the following to coqtop:\n{}" +
"\n coqtop output:\n{}" +
"\n Full error text:\n{}")
indent = " "
loc = get_node_location(node)
le = textwrap.indent(str(err.last_sentence), indent)
bef = textwrap.indent(str(err.before), indent)
fe = textwrap.indent(str(err.err), indent)
raise ExtensionError(MSG.format(loc, le, bef, fe))
@staticmethod
def merge_coqtop_classes(kept_node, discarded_node):
discarded_classes = discarded_node['classes']
if not 'coqtop-hidden' in discarded_classes:
kept_node['classes'] = [c for c in kept_node['classes']
if c != 'coqtop-hidden']
@staticmethod
def merge_consecutive_coqtop_blocks(_app, doctree, _):
"""Merge consecutive divs wrapping lists of Coq sentences; keep ‘dl’s separate."""
for node in doctree.traverse(CoqtopBlocksTransform.is_coqtop_block):
if node.parent:
rawsources, names = [node.rawsource], set(node['names'])
for sibling in node.traverse(include_self=False, descend=False,
siblings=True, ascend=False):
if CoqtopBlocksTransform.is_coqtop_block(sibling):
CoqtopBlocksTransform.merge_coqtop_classes(node, sibling)
rawsources.append(sibling.rawsource)
names.update(sibling['names'])
node.extend(sibling.children)
node.parent.remove(sibling)
sibling.parent = None
else:
break
node.rawsource = "\n\n".join(rawsources)
node['names'] = list(names)
def apply(self):
self.add_coqtop_output()
class CoqSubdomainsIndex(Index):
"""Index subclass to provide subdomain-specific indices.
Just as in the original manual, we want to have separate indices for each
Coq subdomain (tactics, commands, options, etc)"""
name, localname, shortname, subdomains = None, None, None, [] # Must be overwritten
def generate(self, docnames=None):
content = defaultdict(list)
items = chain(*(self.domain.data['objects'][subdomain].items()
for subdomain in self.subdomains))
for itemname, (docname, _, anchor) in sorted(items, key=lambda x: x[0].lower()):
if docnames and docname not in docnames:
continue
entries = content[itemname[0].lower()]
entries.append([itemname, 0, docname, anchor, '', '', ''])
collapse = False
content = sorted(content.items())
return content, collapse
class CoqVernacIndex(CoqSubdomainsIndex):
name, localname, shortname, subdomains = "cmdindex", "Command Index", "commands", ["cmd"]
class CoqTacticIndex(CoqSubdomainsIndex):
name, localname, shortname, subdomains = "tacindex", "Tactic Index", "tactics", ["tacn"]
class CoqAttributeIndex(CoqSubdomainsIndex):
name, localname, shortname, subdomains = "attrindex", "Attribute Index", "attributes", ["attr"]
class CoqOptionIndex(CoqSubdomainsIndex):
name, localname, shortname, subdomains = "optindex", "Flags, options and Tables Index", "options", ["flag", "opt", "table"]
class CoqGallinaIndex(CoqSubdomainsIndex):
name, localname, shortname, subdomains = "thmindex", "Gallina Index", "theorems", ["thm"]
class CoqExceptionIndex(CoqSubdomainsIndex):
name, localname, shortname, subdomains = "exnindex", "Errors and Warnings Index", "errors", ["exn", "warn"]
class IndexXRefRole(XRefRole):
"""A link to one of our domain-specific indices."""
lowercase = True
innernodeclass = nodes.inline
warn_dangling = True
def process_link(self, env, refnode, has_explicit_title, title, target):
if not has_explicit_title:
index = CoqDomain.find_index_by_name(target)
if index:
title = index.localname
return title, target
class StdGlossaryIndex(Index):
name, localname, shortname = "glossindex", "Glossary", "terms"
def generate(self, docnames=None):
content = defaultdict(list)
for ((type, itemname), (docname, anchor)) in self.domain.data['objects'].items():
if type == 'term':
entries = content[itemname[0].lower()]
entries.append([itemname, 0, docname, anchor, '', '', ''])
content = sorted(content.items())
return content, False
def GrammarProductionRole(typ, rawtext, text, lineno, inliner, options={}, content=[]):
"""A grammar production not included in a ``prodn`` directive.
Useful to informally introduce a production, as part of running text.
Example::
:production:`string` indicates a quoted string.
You're not likely to use this role very commonly; instead, use a ``prodn``
directive and reference its tokens using ``:token:`…```.
"""
#pylint: disable=dangerous-default-value, unused-argument
env = inliner.document.settings.env
targetid = nodes.make_id('grammar-token-{}'.format(text))
target = nodes.target('', '', ids=[targetid])
inliner.document.note_explicit_target(target)
code = nodes.literal(rawtext, text, role=typ.lower())
node = nodes.inline(rawtext, '', target, code, classes=['inline-grammar-production'])
set_role_source_info(inliner, lineno, node)
env.domaindata['std']['objects']['token', text] = env.docname, targetid
return [node], []
GrammarProductionRole.role_name = "production"
def GlossaryDefRole(typ, rawtext, text, lineno, inliner, options={}, content=[]):
"""Marks the definition of a glossary term inline in the text. Matching :term:`XXX`
constructs will link to it. Use the form :gdef:`text <term>` to display "text"
for the definition of "term", such as when "term" must be capitalized or plural
for grammatical reasons. The term will also appear in the Glossary Index.
Examples::
A :gdef:`prime` number is divisible only by itself and 1.
:gdef:`Composite <composite>` numbers are the non-prime numbers.
"""
#pylint: disable=dangerous-default-value, unused-argument
env = inliner.document.settings.env
std = env.domaindata['std']['objects']
m = ReferenceRole.explicit_title_re.match(text)
if m:
(text, term) = m.groups()
text = text.strip()
else:
term = text
key = ('term', term)
if key in std:
MSG = 'Duplicate object: {}; other is at {}'
msg = MSG.format(term, env.doc2path(std[key][0]))
inliner.document.reporter.warning(msg, line=lineno)
targetid = nodes.make_id('term-{}'.format(term))
std[key] = (env.docname, targetid)
target = nodes.target('', '', ids=[targetid], names=[term])
inliner.document.note_explicit_target(target)
node = nodes.inline(rawtext, '', target, nodes.Text(text), classes=['term-defn'])
set_role_source_info(inliner, lineno, node)
return [node], []
GlossaryDefRole.role_name = "gdef"
class CoqDomain(Domain):
"""A domain to document Coq code.
Sphinx has a notion of “domains”, used to tailor it to a specific language.
Domains mostly consist in descriptions of the objects that we wish to
describe (for Coq, this includes tactics, tactic notations, options,
exceptions, etc.), as well as domain-specific roles and directives.
Each domain is responsible for tracking its objects, and resolving
references to them. In the case of Coq, this leads us to define Coq
“subdomains”, which classify objects into categories in which names must be
unique. For example, a tactic and a theorem may share a name, but two
tactics cannot be named the same.
"""
name = 'coq'
label = 'Coq'
object_types = {
# ObjType (= directive type) → (Local name, *xref-roles)
'cmd': ObjType('cmd', 'cmd'),
'cmdv': ObjType('cmdv', 'cmd'),
'tacn': ObjType('tacn', 'tacn'),
'tacv': ObjType('tacv', 'tacn'),
'opt': ObjType('opt', 'opt'),
'flag': ObjType('flag', 'flag'),
'table': ObjType('table', 'table'),
'attr': ObjType('attr', 'attr'),
'thm': ObjType('thm', 'thm'),
'prodn': ObjType('prodn', 'prodn'),
'exn': ObjType('exn', 'exn'),
'warn': ObjType('warn', 'exn'),
'index': ObjType('index', 'index', searchprio=-1)
}
directives = {
# Note that some directives live in the same semantic subdomain; ie
# there's one directive per object type, but some object types map to
# the same role.
'cmd': VernacObject,
'cmdv': VernacVariantObject,
'tacn': TacticObject,
'tacv': TacticVariantObject,
'opt': OptionObject,
'flag': FlagObject,
'table': TableObject,
'attr': AttributeObject,
'thm': GallinaObject,
'prodn' : ProductionObject,
'exn': ExceptionObject,
'warn': WarningObject,
}
roles = {
# Each of these roles lives in a different semantic “subdomain”
'cmd': XRefRole(warn_dangling=True),
'tacn': XRefRole(warn_dangling=True),
'opt': XRefRole(warn_dangling=True),
'flag': XRefRole(warn_dangling=True),
'table': XRefRole(warn_dangling=True),
'attr': XRefRole(warn_dangling=True),
'thm': XRefRole(warn_dangling=True),
'prodn' : XRefRole(warn_dangling=True),
'exn': XRefRole(warn_dangling=True),
'warn': XRefRole(warn_dangling=True),
# This one is special
'index': IndexXRefRole(),
# These are used for highlighting
'n': NotationRole,
'g': CoqCodeRole
}
indices = [CoqVernacIndex, CoqTacticIndex, CoqOptionIndex, CoqGallinaIndex, CoqExceptionIndex, CoqAttributeIndex]
data_version = 1
initial_data = {
# Collect everything under a key that we control, since Sphinx adds
# others, such as “version”
'objects' : { # subdomain → name → docname, objtype, targetid
'cmd': {},
'tacn': {},
'opt': {},
'flag': {},
'table': {},
'attr': {},
'thm': {},
'prodn' : {},
'exn': {},
'warn': {},
}
}
@staticmethod
def find_index_by_name(targetid):
for index in CoqDomain.indices:
if index.name == targetid:
return index
return None
def get_objects(self):
# Used for searching and object inventories (intersphinx)
for _, objects in self.data['objects'].items():
for name, (docname, objtype, targetid) in objects.items():
yield (name, name, objtype, docname, targetid, self.object_types[objtype].attrs['searchprio'])
for index in self.indices:
yield (index.name, index.localname, 'index', "coq-" + index.name, '', -1)
def merge_domaindata(self, docnames, otherdata):
DUP = "Duplicate declaration: '{}' also defined in '{}'.\n"
for subdomain, their_objects in otherdata['objects'].items():
our_objects = self.data['objects'][subdomain]
for name, (docname, objtype, targetid) in their_objects.items():
if docname in docnames:
if name in our_objects:
self.env.warn(docname, DUP.format(name, our_objects[name][0]))
our_objects[name] = (docname, objtype, targetid)
def resolve_xref(self, env, fromdocname, builder, role, targetname, node, contnode):
# ‘target’ is the name that was written in the document
# ‘role’ is where this xref comes from; it's exactly one of our subdomains
if role == 'index':
index = CoqDomain.find_index_by_name(targetname)
if index:
return make_refnode(builder, fromdocname, "coq-" + index.name, '', contnode, index.localname)
else:
resolved = self.data['objects'][role].get(targetname)
if resolved:
(todocname, _, targetid) = resolved
return make_refnode(builder, fromdocname, todocname, targetid, contnode, targetname)
return None
def clear_doc(self, docname_to_clear):
for subdomain_objects in self.data['objects'].values():
for name, (docname, _, _) in list(subdomain_objects.items()):
if docname == docname_to_clear:
del subdomain_objects[name]
def is_coqtop_or_coqdoc_block(node):
return (isinstance(node, nodes.Element) and
('coqtop' in node['classes'] or 'coqdoc' in node['classes']))
def simplify_source_code_blocks_for_latex(app, doctree, fromdocname): # pylint: disable=unused-argument
"""Simplify coqdoc and coqtop blocks.
In HTML mode, this does nothing; in other formats, such as LaTeX, it
replaces coqdoc and coqtop blocks by plain text sources, which will use
pygments if available. This prevents the LaTeX builder from getting
confused.
"""
is_html = app.builder.tags.has("html")
for node in doctree.traverse(is_coqtop_or_coqdoc_block):
if is_html:
node.rawsource = '' # Prevent pygments from kicking in
elif 'coqtop-hidden' in node['classes']:
node.parent.remove(node)
else:
node.replace_self(nodes.literal_block(node.rawsource, node.rawsource, language="Coq"))
COQ_ADDITIONAL_DIRECTIVES = [CoqtopDirective,
CoqdocDirective,
ExampleDirective,
InferenceDirective,
PreambleDirective]
COQ_ADDITIONAL_ROLES = [GrammarProductionRole,
GlossaryDefRole]
def setup(app):
"""Register the Coq domain"""
# A few sanity checks:
subdomains = set(obj.subdomain for obj in CoqDomain.directives.values())
found = set (obj for obj in chain(*(idx.subdomains for idx in CoqDomain.indices)))
assert subdomains.issuperset(found), "Missing subdomains: {}".format(found.difference(subdomains))
assert subdomains.issubset(CoqDomain.roles.keys()), \
"Missing from CoqDomain.roles: {}".format(subdomains.difference(CoqDomain.roles.keys()))
# Add domain, directives, and roles
app.add_domain(CoqDomain)
app.add_index_to_domain('std', StdGlossaryIndex)
for role in COQ_ADDITIONAL_ROLES:
app.add_role(role.role_name, role)
for directive in COQ_ADDITIONAL_DIRECTIVES:
app.add_directive(directive.directive_name, directive)
app.add_transform(CoqtopBlocksTransform)
app.connect('doctree-resolved', simplify_source_code_blocks_for_latex)
app.connect('doctree-resolved', CoqtopBlocksTransform.merge_consecutive_coqtop_blocks)
# Add extra styles
app.add_css_file("ansi.css")
app.add_css_file("coqdoc.css")
app.add_js_file("notations.js")
app.add_css_file("notations.css")
app.add_css_file("pre-text.css")
# Tell Sphinx about extra settings
app.add_config_value("report_undocumented_coq_objects", None, 'env')
# ``env_version`` is used by Sphinx to know when to invalidate
# coqdomain-specific bits in its caches. It should be incremented when the
# contents of ``env.domaindata['coq']`` change. See
# `https://github.com/sphinx-doc/sphinx/issues/4460`.
meta = { "version": "0.1",
"env_version": 2,
"parallel_read_safe": True }
return meta
| gares/coq | doc/tools/coqrst/coqdomain.py | Python | lgpl-2.1 | 54,478 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Copyright © 2014 Martin Ueding <dev@martin-ueding.de>
# Licensed under The Lesser GNU Public License Version 2 (or later)
from setuptools import setup, find_packages
setup(
author="David Pine",
description="Least squares linear fit for numpy library of Python",
license="LGPL2",
name="linfit",
packages=find_packages(),
install_requires=[
'numpy',
],
url="https://github.com/djpine/linfit",
download_url="https://github.com/djpine/linfit",
version="2014.9.3",
)
| djpine/linfit | setup.py | Python | lgpl-2.1 | 560 |
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import os
from rapidsms.router import Router
from rapidsms.backends.backend import Backend
from rapidsms.app import App
# a really dumb Logger stand-in
class MockLogger (list):
def __init__(self):
# enable logging during tests with an
# environment variable, since the runner
# doesn't seem to have args
self.to_console = os.environ.get("verbose", False)
def write (self, *args):
if self.to_console:
if len(args) == 3:
print args[2]
else:
print args[2] % args[3:]
self.append(args)
# a subclass of Router with all the moving parts replaced
class MockRouter (Router):
def __init__ (self):
Router.__init__(self)
self.logger = MockLogger()
def add_backend (self, backend):
self.backends.append(backend)
# def add_app (self, app):
# app.configure()
# self.apps.append(app)
def start (self):
self.running = True
self.start_all_backends()
self.start_all_apps()
def stop (self):
self.running = False
self.stop_all_backends()
class MockBackend (Backend):
def start (self):
self._running = True
self.outgoing = []
def run (self):
while self.running:
msg = self.next_message(0.25)
if msg is not None: self.outgoing.append(msg)
# a subclass of App with all the moving parts replaced
class MockApp (App):
def configure (self):
self.calls = []
def start (self):
self.calls.append(("start",))
def parse (self, message):
self.calls.append(("parse", message))
def handle (self, message):
self.calls.append(("handle", message))
def cleanup (self, message):
self.calls.append(("cleanup", message))
def outgoing (self, message):
self.calls.append(("outgoing", message))
def stop (self):
self.calls.append(("stop",))
class EchoApp (MockApp):
def handle (self, message):
MockApp.handle(self, message)
message.respond(message.peer + ": " + message.text)
| ewheeler/rapidsms-core | lib/rapidsms/tests/harness.py | Python | lgpl-3.0 | 2,151 |
from torrentstatus.plugin import iTorrentAction
from torrentstatus.utorrent.connection import Connection
from contextlib import contextmanager
from torrentstatus.bearlang import BearLang
from torrentstatus.settings import config, labels_config
from torrentstatus.utils import intTryParse
@contextmanager
def utorrent_connection(host, username, password):
try:
conn = Connection(host, username, password).utorrent(None)
except Exception as err:
yield None, err
else:
try:
yield conn, None
finally:
pass
def get_new_torrent_labels(labels, args):
"""Transforms torrent labels and args passing them into a BearLang Instance
Parameters:
labels (Dict) A dict of label and rules for that label
args (Dict) A dict of arguments, will be passed to Bearlang
Returns:
a list of labels that match the rules defined.
"""
new_labels = []
for label, ruleset in labels.items():
# multiple rules accepted when configparser uses MultiOrderedDict
rules = ruleset.split("\n")
for rule in rules:
rule = rule.strip()
parser = BearLang(rule, args)
is_match = parser.execute()
print("\nrule:{0}, label:{1}, ismatch: {2}\n".format(rule, label, is_match))
if is_match:
new_labels.append(label)
return new_labels
settings = config.getSettingsAsDict()
class SetLabelsOnStart(iTorrentAction):
def onstart(self, pluginconfig, utorrentargs):
tempargs = vars(utorrentargs)
# Use labels definition from config file and match them up against
# provided input to the main script
labels = labels_config.getSettingsAsDict()
new_labels = get_new_torrent_labels(labels, tempargs)
#only connect to utorrent if we need to do a label change
if new_labels and intTryParse(settings["webui_enable"]) == 1:
with utorrent_connection(settings["webui_host"],
settings["webui_username"],
settings["webui_password"]) as (conn, err):
if err:
print("Could not connect to webui, make sure webui_host, "
"webui_username and webui_password is correctly "
"defined in configuration file. Error:{0}".format(err))
else:
print("Connection to utorrent web ui ok")
print ("Got torrent '{0}' with hash {1} and tracker {2}. \n Setting new_labels: {3}"
.format(utorrentargs.torrentname, utorrentargs.hash, utorrentargs.tracker, new_labels))
if utorrentargs.debug:
print("debug mode on, not doing update")
return
#remove existing label
conn.torrent_set_props([{utorrentargs.hash: {'label': ''}}])
#set new labels
for new_label in new_labels:
conn.torrent_set_props([{utorrentargs.hash: {'label': new_label}}])
return True
else:
print("Not trying to connect to webui")
return False
| dabear/torrentstatus | torrentstatus/plugins/builtin.torrent.onstart.settorrentlabels.py | Python | lgpl-3.0 | 3,296 |
# tcpserv
#
# Copyright (c) 2015 Christian Sengstock, All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library.
"""
Simple python socket helper library to implement
stateless tcp-servers.
Usage:
# Interface
>>> from tcpserv import listen, request
# Define server logic by a handler function:
# Gets a request string and returns a response string
>>> def my_handler(request): return "".join(reversed(request))
# Start the server
>>> listen("localhost", 55555, my_handler)
# Make requests
>>> for i in xrange(100):
>>> print request("localhost", 55555, "request %d" % i)
"""
import thread
import socket
import struct
DATA_SIZE_TYPE = "!I" # unsigned 4-byte int, network byte-order
# num of bytes; should always be 4;
# don't know if struct ensures this.
DATA_SIZE_LEN = len(struct.pack(DATA_SIZE_TYPE, 0))
if DATA_SIZE_LEN != 4:
raise ValueError(
"To work on different machines struct <!I> type should have " + \
"4 bytes. This is an implementation error!")
MAX_DATA = 2**(DATA_SIZE_LEN*8)
def listen(host, port, handler):
"""
Listens on "host:port" for requests
and forwards traffic to the handler.
The handler return value is then send
to the client socket. A simple
echo server handler:
>>> def my_handler(request_string) return request_string
The function blocks forever. Surround
with an appropriate signal handler
to quit the call (e.g., wait for
a KeyboardInterrupt event):
>>> try:
>>> listen("localhost", 55555, my_handler)
>>> except KeyboardInterrupt, e:
>>> pass
Args:
host<str>: Listening host
port<int>: Listening port
handler<function>:
Function 'f(request_string)->response_string'
processing the request.
"""
# Taken from
# http://code.activestate.com/recipes/578247-basic-threaded-python-tcp-server/
# Starts a new handler-thread for each request.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((host, port))
sock.listen(1)
while 1:
clientsock, addr = sock.accept()
thread.start_new_thread(_server, (clientsock, handler))
def request(host, port, data):
"""
Sends data to server listening on "host:port" and returns
the response.
Args:
host<str>: Server host
port<int>: Server port
data<str>: Request data
Returns<str>:
The response data
"""
if type(data) != str:
raise ValueError("data must be of type <str>")
if len(data) > MAX_DATA:
raise ValueError("request data must have len <= %d", MAX_DATA)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, port))
b4 = struct.pack(DATA_SIZE_TYPE, len(data))
sock.sendall(b4)
sock.sendall(data)
b4 = _recvn(sock, DATA_SIZE_LEN)
n = struct.unpack(DATA_SIZE_TYPE, b4)[0]
data = _recvn(sock, n)
sock.close()
return data
def _recvn(sock, n):
"""
Reads exactly n bytes from the socket.
"""
buf = []
m = 0
while m < n:
pack = sock.recv(n-m)
m += len(pack)
buf.append(pack)
return "".join(buf)
def _server(clientsock, handler):
"""
Reads the request from the client socket
and calls the handler callback to process the data.
Sends back the response (return value of the
handler callback) to the client socket.
"""
b4 = _recvn(clientsock, DATA_SIZE_LEN)
n = struct.unpack(DATA_SIZE_TYPE, b4)[0]
req = _recvn(clientsock, n)
resp = handler(req)
if type(resp) != str:
raise ValueError("handler return value must be of type <str>")
if len(resp) > MAX_DATA:
raise ValueError("handler return value must have len <= %d", MAX_DATA)
b4 = struct.pack(DATA_SIZE_TYPE, len(resp))
clientsock.sendall(b4)
clientsock.sendall(resp)
def _test():
import time
def echo_handler(data):
return data
thread.start_new_thread(listen, ("localhost", 55555, echo_handler))
# listen("localhost", 55555, echo_handler)
time.sleep(1)
print "generating data..."
data = "1"*(2**28)
print "starting communication..."
for i in xrange(1000):
print "request", i
resp = request("localhost", 55555, data)
print "received %.02f KB" % (len(resp)/1000.0)
print "validation..."
assert len(resp) == len(data)
#for j,c in enumerate(data):
# assert(resp[j] == c)
if __name__ == "__main__":
_test() | csengstock/tcpserv | tcpserv.py | Python | lgpl-3.0 | 5,176 |
############################ Copyrights and license ############################
# #
# Copyright 2018 bbi-yggy <yossarian@blackbirdinteractive.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
import github.GithubObject
import github.ProjectColumn
from . import Consts
class Project(github.GithubObject.CompletableGithubObject):
"""
This class represents Projects. The reference can be found here http://developer.github.com/v3/projects
"""
def __repr__(self):
return self.get__repr__({"name": self._name.value})
@property
def body(self):
"""
:type: string
"""
self._completeIfNotSet(self._body)
return self._body.value
@property
def columns_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._columns_url)
return self._columns_url.value
@property
def created_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def creator(self):
"""
:type: :class:`github.NamedUser.NamedUser`
"""
self._completeIfNotSet(self._creator)
return self._creator.value
@property
def html_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._html_url)
return self._html_url.value
@property
def id(self):
"""
:type: integer
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def name(self):
"""
:type: string
"""
self._completeIfNotSet(self._name)
return self._name.value
@property
def node_id(self):
"""
:type: string
"""
self._completeIfNotSet(self._node_id)
return self._node_id.value
@property
def number(self):
"""
:type: integer
"""
self._completeIfNotSet(self._number)
return self._number.value
@property
def owner_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._owner_url)
return self._owner_url.value
@property
def state(self):
"""
:type: string
"""
self._completeIfNotSet(self._state)
return self._state.value
@property
def updated_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
def delete(self):
"""
:calls: `DELETE /projects/:project_id <https://developer.github.com/v3/projects/#delete-a-project>`_
:rtype: None
"""
headers, data = self._requester.requestJsonAndCheck(
"DELETE", self.url, headers={"Accept": Consts.mediaTypeProjectsPreview}
)
def edit(
self,
name=github.GithubObject.NotSet,
body=github.GithubObject.NotSet,
state=github.GithubObject.NotSet,
organization_permission=github.GithubObject.NotSet,
private=github.GithubObject.NotSet,
):
"""
:calls: `PATCH /projects/:project_id <https://developer.github.com/v3/projects/#update-a-project>`_
:param name: string
:param body: string
:param state: string
:param organization_permission: string
:param private: bool
:rtype: None
"""
assert name is github.GithubObject.NotSet or isinstance(name, str), name
assert body is github.GithubObject.NotSet or isinstance(name, str), body
assert state is github.GithubObject.NotSet or isinstance(name, str), state
assert organization_permission is github.GithubObject.NotSet or isinstance(
organization_permission, str
), organization_permission
assert private is github.GithubObject.NotSet or isinstance(
private, bool
), private
patch_parameters = dict()
if name is not github.GithubObject.NotSet:
patch_parameters["name"] = name
if body is not github.GithubObject.NotSet:
patch_parameters["body"] = body
if state is not github.GithubObject.NotSet:
patch_parameters["state"] = state
if organization_permission is not github.GithubObject.NotSet:
patch_parameters["organization_permission"] = organization_permission
if private is not github.GithubObject.NotSet:
patch_parameters["private"] = private
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
self.url,
input=patch_parameters,
headers={"Accept": Consts.mediaTypeProjectsPreview},
)
self._useAttributes(data)
def get_columns(self):
"""
:calls: `GET /projects/:project_id/columns <https://developer.github.com/v3/projects/columns/#list-project-columns>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.ProjectColumn.ProjectColumn`
"""
return github.PaginatedList.PaginatedList(
github.ProjectColumn.ProjectColumn,
self._requester,
self.columns_url,
None,
{"Accept": Consts.mediaTypeProjectsPreview},
)
def create_column(self, name):
"""
calls: `POST /projects/:project_id/columns <https://developer.github.com/v3/projects/columns/#create-a-project-column>`_
:param name: string
"""
assert isinstance(name, str), name
post_parameters = {"name": name}
import_header = {"Accept": Consts.mediaTypeProjectsPreview}
headers, data = self._requester.requestJsonAndCheck(
"POST", self.url + "/columns", headers=import_header, input=post_parameters
)
return github.ProjectColumn.ProjectColumn(
self._requester, headers, data, completed=True
)
def _initAttributes(self):
self._body = github.GithubObject.NotSet
self._columns_url = github.GithubObject.NotSet
self._created_at = github.GithubObject.NotSet
self._creator = github.GithubObject.NotSet
self._html_url = github.GithubObject.NotSet
self._id = github.GithubObject.NotSet
self._name = github.GithubObject.NotSet
self._node_id = github.GithubObject.NotSet
self._number = github.GithubObject.NotSet
self._owner_url = github.GithubObject.NotSet
self._state = github.GithubObject.NotSet
self._updated_at = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "body" in attributes: # pragma no branch
self._body = self._makeStringAttribute(attributes["body"])
if "columns_url" in attributes: # pragma no branch
self._columns_url = self._makeStringAttribute(attributes["columns_url"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "creator" in attributes: # pragma no branch
self._creator = self._makeClassAttribute(
github.NamedUser.NamedUser, attributes["creator"]
)
if "html_url" in attributes: # pragma no branch
self._html_url = self._makeStringAttribute(attributes["html_url"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
if "node_id" in attributes: # pragma no branch
self._node_id = self._makeStringAttribute(attributes["node_id"])
if "number" in attributes: # pragma no branch
self._number = self._makeIntAttribute(attributes["number"])
if "owner_url" in attributes: # pragma no branch
self._owner_url = self._makeStringAttribute(attributes["owner_url"])
if "state" in attributes: # pragma no branch
self._state = self._makeStringAttribute(attributes["state"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
| ahmad88me/PyGithub | github/Project.py | Python | lgpl-3.0 | 10,167 |
# -*- coding: utf-8 -*-
# Copyright (c) 2015 by Pablo Martín <goinnn@gmail.com>
#
# This software is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import datetime
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
from news.models import NewsItem, NewsItemHistory
class NewsModelHistoryTestCase(TestCase):
def test_1_insert(self):
news_item_count = NewsItem.objects.count()
news_item_history_count = NewsItemHistory.objects.count()
self.assertEqual(news_item_count, 0)
self.assertEqual(news_item_history_count, 0)
news_item = NewsItem.objects.create(title='My first news item',
publish_date=datetime.datetime.now())
news_item_count = NewsItem.objects.count()
news_item_history_count = NewsItemHistory.objects.count()
self.assertEqual(news_item_count, 1)
self.assertEqual(news_item_history_count, 1)
news_item = NewsItem.objects.get(pk=1)
news_item_history = NewsItemHistory.objects.get(history=news_item)
self.assertEqual(news_item.title, news_item_history.title)
self.assertEqual(news_item.description, news_item_history.description)
self.assertEqual(news_item.publish_date, news_item_history.publish_date)
self.assertEqual(str(news_item_history), 'insert')
def test_2_update(self):
news_item = NewsItem.objects.create(title='My first news item',
publish_date=datetime.datetime.now())
news_item_count = NewsItem.objects.count()
news_item_history_count = NewsItemHistory.objects.count()
self.assertEqual(news_item_count, 1)
self.assertEqual(news_item_history_count, 1)
news_item = NewsItem.objects.get(pk=1)
news_item.title = 'My first news item (updated)'
news_item.save()
news_item_count = NewsItem.objects.count()
news_item_history_count = NewsItemHistory.objects.count()
self.assertEqual(news_item_count, 1)
self.assertEqual(news_item_history_count, 2)
news_item = NewsItem.objects.get(pk=1)
news_item_history = NewsItemHistory.objects.filter(history=news_item).order_by('pk')[1]
self.assertEqual(news_item.title, news_item_history.title)
self.assertEqual(news_item.description, news_item_history.description)
self.assertEqual(news_item.publish_date, news_item_history.publish_date)
self.assertEqual(str(news_item_history), 'update')
def test_3_delete(self):
news_item = NewsItem.objects.create(title='My first news item',
publish_date=datetime.datetime.now())
news_item_count = NewsItem.objects.count()
news_item_history_count = NewsItemHistory.objects.count()
self.assertEqual(news_item_count, 1)
self.assertEqual(news_item_history_count, 1)
news_item = NewsItem.objects.get(pk=1)
news_item.delete()
news_item_count = NewsItem.objects.count()
news_item_history_count = NewsItemHistory.objects.count()
news_item_history = NewsItemHistory.objects.get(pk=2)
self.assertEqual(news_item_count, 0)
self.assertEqual(news_item_history_count, 2)
self.assertEqual(str(news_item_history), 'delete')
def test_4_adminsite(self):
username = 'myuser'
email = 'myuser@example.com'
password = username
User.objects.create_superuser(username, email, password)
self.client.login(username=username, password=password)
news_item = NewsItem.objects.create(title='My first news item',
publish_date=datetime.datetime.now())
res = self.client.get(reverse('admin:news_newsitemhistory_change', args=(news_item.pk,)))
self.assertEqual(res.status_code, 200) | goinnn/django-model-history | example/news/tests.py | Python | lgpl-3.0 | 4,539 |
# chat_client.py
import sys, socket, select
def chat_client():
if(len(sys.argv) < 3) :
print 'Usage : python chat_client.py hostname port'
sys.exit()
host = sys.argv[1]
port = int(sys.argv[2])
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(2)
# connect to remote host
try :
s.connect((host, port))
except :
print 'Unable to connect'
sys.exit()
print 'Connected to remote host. You can start sending messages'
sys.stdout.write('[Me] '); sys.stdout.flush()
while 1:
socket_list = [sys.stdin, s]
# Get the list sockets which are readable
read_sockets, write_sockets, error_sockets = select.select(socket_list , [], [])
for sock in read_sockets:
if sock == s:
# incoming message from remote server, s
data = sock.recv(4096)
if not data :
print '\nDisconnected from chat server'
sys.exit()
else :
#print data
sys.stdout.write(data)
sys.stdout.write('[Me] '); sys.stdout.flush()
else :
# user entered a message
msg = sys.stdin.readline()
s.send(msg)
sys.stdout.write('[Me] '); sys.stdout.flush()
if __name__ == "__main__":
sys.exit(chat_client())
| hantsaniala/KrC | chat_client.py | Python | lgpl-3.0 | 1,445 |
# -*- coding: utf-8 -*-
# Copyright(C) 2017 Vincent A
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from weboob.tools.backend import Module
from weboob.capabilities.weather import CapWeather
from .browser import LameteoagricoleBrowser
__all__ = ['LameteoagricoleModule']
class LameteoagricoleModule(Module, CapWeather):
NAME = 'lameteoagricole'
DESCRIPTION = u'lameteoagricole website'
MAINTAINER = u'Vincent A'
EMAIL = 'dev@indigo.re'
LICENSE = 'AGPLv3+'
VERSION = '2.1'
BROWSER = LameteoagricoleBrowser
def iter_city_search(self, pattern):
return self.browser.iter_cities(pattern)
def get_current(self, city_id):
return self.browser.get_current(city_id)
def iter_forecast(self, city_id):
return self.browser.iter_forecast(city_id)
| laurentb/weboob | modules/lameteoagricole/module.py | Python | lgpl-3.0 | 1,524 |
# ID-Fits
# Copyright (c) 2015 Institut National de l'Audiovisuel, INA, All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library.
import numpy as np
def readPtsLandmarkFile(filename, landmarks_number):
f = open(filename)
# Skip first 3 lines
for i in range(3):
f.readline()
# Read landmarks position
landmarks = np.empty((landmarks_number, 2), dtype=np.float)
for i in range(landmarks_number):
landmarks[i] = np.array([float(x) for x in f.readline().split()])
return landmarks
| ina-foss/ID-Fits | lib/datasets/landmarks_file.py | Python | lgpl-3.0 | 1,121 |
import os.path
from pyneuroml.lems.LEMSSimulation import LEMSSimulation
import shutil
import os
from pyneuroml.pynml import read_neuroml2_file, get_next_hex_color, print_comment_v, print_comment
import random
def generate_lems_file_for_neuroml(sim_id,
neuroml_file,
target,
duration,
dt,
lems_file_name,
target_dir,
gen_plots_for_all_v = True,
plot_all_segments = False,
gen_plots_for_only = [], # List of populations
gen_plots_for_quantities = {}, # Dict with displays vs lists of quantity paths
gen_saves_for_all_v = True,
save_all_segments = False,
gen_saves_for_only = [], # List of populations
gen_saves_for_quantities = {}, # Dict with file names vs lists of quantity paths
copy_neuroml = True,
seed=None):
if seed:
random.seed(seed) # To ensure same LEMS file (e.g. colours of plots) are generated every time for the same input
file_name_full = '%s/%s'%(target_dir,lems_file_name)
print_comment_v('Creating LEMS file at: %s for NeuroML 2 file: %s'%(file_name_full,neuroml_file))
ls = LEMSSimulation(sim_id, duration, dt, target)
nml_doc = read_neuroml2_file(neuroml_file, include_includes=True, verbose=True)
quantities_saved = []
if not copy_neuroml:
rel_nml_file = os.path.relpath(os.path.abspath(neuroml_file), os.path.abspath(target_dir))
print_comment_v("Including existing NeuroML file (%s) as: %s"%(neuroml_file, rel_nml_file))
ls.include_neuroml2_file(rel_nml_file, include_included=True, relative_to_dir=os.path.abspath(target_dir))
else:
print_comment_v("Copying NeuroML file (%s) to: %s (%s)"%(neuroml_file, target_dir, os.path.abspath(target_dir)))
if os.path.abspath(os.path.dirname(neuroml_file))!=os.path.abspath(target_dir):
shutil.copy(neuroml_file, target_dir)
neuroml_file_name = os.path.basename(neuroml_file)
ls.include_neuroml2_file(neuroml_file_name, include_included=False)
for include in nml_doc.includes:
incl_curr = '%s/%s'%(os.path.dirname(neuroml_file),include.href)
print_comment_v(' - Including %s located at %s'%(include.href, incl_curr))
shutil.copy(incl_curr, target_dir)
ls.include_neuroml2_file(include.href, include_included=False)
sub_doc = read_neuroml2_file(incl_curr)
for include in sub_doc.includes:
incl_curr = '%s/%s'%(os.path.dirname(neuroml_file),include.href)
print_comment_v(' -- Including %s located at %s'%(include.href, incl_curr))
shutil.copy(incl_curr, target_dir)
ls.include_neuroml2_file(include.href, include_included=False)
if gen_plots_for_all_v or gen_saves_for_all_v or len(gen_plots_for_only)>0 or len(gen_saves_for_only)>0 :
for network in nml_doc.networks:
for population in network.populations:
quantity_template = "%s[%i]/v"
component = population.component
size = population.size
cell = None
segment_ids = []
if plot_all_segments:
for c in nml_doc.cells:
if c.id == component:
cell = c
for segment in cell.morphology.segments:
segment_ids.append(segment.id)
segment_ids.sort()
if population.type and population.type == 'populationList':
quantity_template = "%s/%i/"+component+"/v"
size = len(population.instances)
if gen_plots_for_all_v or population.id in gen_plots_for_only:
print_comment('Generating %i plots for %s in population %s'%(size, component, population.id))
disp0 = 'DispPop__%s'%population.id
ls.create_display(disp0, "Voltages of %s"%disp0, "-90", "50")
for i in range(size):
if plot_all_segments:
quantity_template_seg = "%s/%i/"+component+"/%i/v"
for segment_id in segment_ids:
quantity = quantity_template_seg%(population.id, i, segment_id)
ls.add_line_to_display(disp0, "v in seg %i %s"%(segment_id,safe_variable(quantity)), quantity, "1mV", get_next_hex_color())
else:
quantity = quantity_template%(population.id, i)
ls.add_line_to_display(disp0, "v %s"%safe_variable(quantity), quantity, "1mV", get_next_hex_color())
if gen_saves_for_all_v or population.id in gen_saves_for_only:
print_comment('Saving %i values of v for %s in population %s'%(size, component, population.id))
of0 = 'Volts_file__%s'%population.id
ls.create_output_file(of0, "%s.%s.v.dat"%(sim_id,population.id))
for i in range(size):
if save_all_segments:
quantity_template_seg = "%s/%i/"+component+"/%i/v"
for segment_id in segment_ids:
quantity = quantity_template_seg%(population.id, i, segment_id)
ls.add_column_to_output_file(of0, 'v_%s'%safe_variable(quantity), quantity)
quantities_saved.append(quantity)
else:
quantity = quantity_template%(population.id, i)
ls.add_column_to_output_file(of0, 'v_%s'%safe_variable(quantity), quantity)
quantities_saved.append(quantity)
for display in gen_plots_for_quantities.keys():
quantities = gen_plots_for_quantities[display]
ls.create_display(display, "Plots of %s"%display, "-90", "50")
for q in quantities:
ls.add_line_to_display(display, safe_variable(q), q, "1", get_next_hex_color())
for file_name in gen_saves_for_quantities.keys():
quantities = gen_saves_for_quantities[file_name]
ls.create_output_file(file_name, file_name)
for q in quantities:
ls.add_column_to_output_file(file_name, safe_variable(q), q)
ls.save_to_file(file_name=file_name_full)
return quantities_saved
# Mainly for NEURON etc.
def safe_variable(quantity):
return quantity.replace(' ','_').replace('[','_').replace(']','_').replace('/','_')
| 34383c/pyNeuroML | pyneuroml/lems/__init__.py | Python | lgpl-3.0 | 7,440 |
# Copyright 2020 by Kurt Rathjen. All Rights Reserved.
#
# This library is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. This library is distributed in the
# hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
import re
import os
import time
import copy
import logging
import webbrowser
from functools import partial
from studiovendor.Qt import QtGui
from studiovendor.Qt import QtCore
from studiovendor.Qt import QtWidgets
import studioqt
import studiolibrary
import studiolibrary.widgets
__all__ = ["LibraryWindow"]
logger = logging.getLogger(__name__)
class PreviewFrame(QtWidgets.QFrame):
pass
class SidebarFrame(QtWidgets.QFrame):
pass
class GlobalSignal(QtCore.QObject):
"""
Triggered for all library instance.
"""
folderSelectionChanged = QtCore.Signal(object, object)
class LibraryWindow(QtWidgets.QWidget):
_instances = {}
DEFAULT_NAME = "Default"
DEFAULT_SETTINGS = {
"library": {
"sortBy": ["name:asc"],
"groupBy": ["category:asc"]
},
"paneSizes": [130, 280, 180],
"geometry": [-1, -1, 820, 780],
"trashFolderVisible": False,
"sidebarWidgetVisible": True,
"previewWidgetVisible": True,
"menuBarWidgetVisible": True,
"statusBarWidgetVisible": True,
"recursiveSearchEnabled": True,
"itemsWidget": {
"spacing": 2,
"padding": 6,
"zoomAmount": 80,
"textVisible": True,
},
"searchWidget": {
"text": "",
},
"filterByMenu": {
"Folder": False
},
"theme": {
"accentColor": "rgb(70, 160, 210, 255)",
"backgroundColor": "rgb(60, 64, 79, 255)",
}
}
TRASH_ENABLED = True
TEMP_PATH_MENU_ENABLED = False
DPI_ENABLED = studiolibrary.config.get("scaleFactorEnabled", False)
ICON_COLOR = QtGui.QColor(255, 255, 255, 200)
ICON_BADGE_COLOR = QtGui.QColor(230, 230, 0)
# Customize widget classes
SORTBY_MENU_CLASS = studiolibrary.widgets.SortByMenu
GROUPBY_MENU_CLASS = studiolibrary.widgets.GroupByMenu
FILTERBY_MENU_CLASS = studiolibrary.widgets.FilterByMenu
ITEMS_WIDGET_CLASS = studiolibrary.widgets.ItemsWidget
SEARCH_WIDGET_CLASS = studiolibrary.widgets.SearchWidget
STATUS_WIDGET_CLASS = studiolibrary.widgets.StatusWidget
MENUBAR_WIDGET_CLASS = studiolibrary.widgets.MenuBarWidget
SIDEBAR_WIDGET_CLASS = studiolibrary.widgets.SidebarWidget
# Customize library classe
LIBRARY_CLASS = studiolibrary.Library
globalSignal = GlobalSignal()
# Local signal
loaded = QtCore.Signal()
lockChanged = QtCore.Signal(object)
itemRenamed = QtCore.Signal(str, str)
itemSelectionChanged = QtCore.Signal(object)
folderRenamed = QtCore.Signal(str, str)
folderSelectionChanged = QtCore.Signal(object)
@staticmethod
def instances():
"""
Return all the LibraryWindow instances that have been initialised.
:rtype: list[LibraryWindow]
"""
return LibraryWindow._instances.values()
@staticmethod
def destroyInstances():
"""Delete all library widget instances."""
for widget in LibraryWindow.instances():
widget.destroy()
LibraryWindow._instances = {}
@classmethod
def instance(
cls,
name="",
path="",
show=True,
lock=False,
superusers=None,
lockRegExp=None,
unlockRegExp=None,
**kwargs
):
"""
Return the library widget for the given name.
:type name: str
:type path: str
:type show: bool
:type lock: bool
:type superusers: list[str]
:type lockRegExp: str
:type unlockRegExp: str
:rtype: LibraryWindow
"""
name = name or studiolibrary.defaultLibrary()
libraryWindow = LibraryWindow._instances.get(name)
if not libraryWindow:
studioqt.installFonts(studiolibrary.resource.get("fonts"))
libraryWindow = cls(name=name)
LibraryWindow._instances[name] = libraryWindow
kwargs_ = {
"lock": lock,
"show": show,
"superusers": superusers,
"lockRegExp": lockRegExp,
"unlockRegExp": unlockRegExp
}
libraryWindow.setKwargs(kwargs_)
libraryWindow.setLocked(lock)
libraryWindow.setSuperusers(superusers)
libraryWindow.setLockRegExp(lockRegExp)
libraryWindow.setUnlockRegExp(unlockRegExp)
if path:
libraryWindow.setPath(path)
if show:
libraryWindow.show(**kwargs)
return libraryWindow
def __init__(self, parent=None, name="", path=""):
"""
Create a new instance of the Library Widget.
:type parent: QtWidgets.QWidget or None
:type name: str
:type path: str
"""
QtWidgets.QWidget.__init__(self, parent)
self.setObjectName("studiolibrary")
version = studiolibrary.version()
studiolibrary.sendAnalytics("MainWindow", version=version)
self.setWindowIcon(studiolibrary.resource.icon("icon_black"))
self._dpi = 1.0
self._path = ""
self._items = []
self._name = name or self.DEFAULT_NAME
self._theme = None
self._kwargs = {}
self._isDebug = False
self._isLocked = False
self._isLoaded = False
self._previewWidget = None
self._currentItem = None
self._library = None
self._lightbox = None
self._refreshEnabled = False
self._progressBar = None
self._superusers = None
self._lockRegExp = None
self._unlockRegExp = None
self._settingsWidget = None
self._checkForUpdateThread = None
self._trashEnabled = self.TRASH_ENABLED
self._itemsHiddenCount = 0
self._itemsVisibleCount = 0
self._isTrashFolderVisible = False
self._sidebarWidgetVisible = True
self._previewWidgetVisible = True
self._statusBarWidgetVisible = True
# --------------------------------------------------------------------
# Create Widgets
# --------------------------------------------------------------------
library = self.LIBRARY_CLASS(libraryWindow=self)
library.dataChanged.connect(self.refresh)
library.searchTimeFinished.connect(self._searchFinished)
self._sidebarFrame = SidebarFrame(self)
self._previewFrame = PreviewFrame(self)
self._itemsWidget = self.ITEMS_WIDGET_CLASS(self)
self._itemsWidget.installEventFilter(self)
self._itemsWidget.keyPressed.connect(self._keyPressed)
tip = "Search all current items."
self._searchWidget = self.SEARCH_WIDGET_CLASS(self)
self._searchWidget.setToolTip(tip)
self._searchWidget.setStatusTip(tip)
self._sortByMenu = self.SORTBY_MENU_CLASS(self)
self._groupByMenu = self.GROUPBY_MENU_CLASS(self)
self._filterByMenu = self.FILTERBY_MENU_CLASS(self)
self._statusWidget = self.STATUS_WIDGET_CLASS(self)
# Add the update available button to the status widget
self._updateAvailableButton = QtWidgets.QPushButton(self._statusWidget)
self._updateAvailableButton.setObjectName("updateAvailableButton")
self._updateAvailableButton.setText("Update Available")
self._updateAvailableButton.hide()
self._updateAvailableButton.clicked.connect(self.openReleasesUrl)
self.statusWidget().layout().addWidget(self._updateAvailableButton)
self._menuBarWidget = self.MENUBAR_WIDGET_CLASS(self)
self._sidebarWidget = self.SIDEBAR_WIDGET_CLASS(self)
self._sortByMenu.setDataset(library)
self._groupByMenu.setDataset(library)
self._filterByMenu.setDataset(library)
self._itemsWidget.setDataset(library)
self._searchWidget.setDataset(library)
self._sidebarWidget.setDataset(library)
self.setLibrary(library)
# --------------------------------------------------------------------
# Setup the menu bar buttons
# --------------------------------------------------------------------
iconColor = self.iconColor()
name = "New Item"
icon = studiolibrary.resource.icon("add_28")
icon.setColor(iconColor)
tip = "Add a new item to the selected folder"
self.addMenuBarAction(name, icon, tip, callback=self.showNewMenu)
self._menuBarWidget.addWidget(self._searchWidget)
name = "Filters"
icon = studiolibrary.resource.icon("filter")
icon.setColor(iconColor)
tip = "Filter the current results by type.\n" \
"CTRL + Click will hide the others and show the selected one."
self.addMenuBarAction(name, icon, tip, callback=self.showFilterByMenu)
name = "Item View"
icon = studiolibrary.resource.icon("view_settings")
icon.setColor(iconColor)
tip = "Change the style of the item view"
self.addMenuBarAction(name, icon, tip, callback=self.showItemViewMenu)
name = "Group By"
icon = studiolibrary.resource.icon("groupby")
icon.setColor(iconColor)
tip = "Group the current items in the view by column"
self.addMenuBarAction(name, icon, tip, callback=self.showGroupByMenu)
name = "Sort By"
icon = studiolibrary.resource.icon("sortby")
icon.setColor(iconColor)
tip = "Sort the current items in the view by column"
self.addMenuBarAction(name, icon, tip, callback=self.showSortByMenu)
name = "View"
icon = studiolibrary.resource.icon("view")
icon.setColor(iconColor)
tip = "Choose to show/hide both the preview and navigation pane.\n" \
"CTRL + Click will hide the menu bar as well."
self.addMenuBarAction(name, icon, tip, callback=self.toggleView)
name = "Sync items"
icon = studiolibrary.resource.icon("sync")
icon.setColor(iconColor)
tip = "Sync with the filesystem"
self.addMenuBarAction(name, icon, tip, callback=self.sync)
name = "Settings"
icon = studiolibrary.resource.icon("settings")
icon.setColor(iconColor)
tip = "Settings menu"
self.addMenuBarAction(name, icon, tip, callback=self.showSettingsMenu)
# -------------------------------------------------------------------
# Setup Layout
# -------------------------------------------------------------------
layout = QtWidgets.QVBoxLayout(self)
layout.setSpacing(0)
layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(layout)
layout = QtWidgets.QVBoxLayout(self)
layout.setContentsMargins(0, 1, 0, 0)
self._previewFrame.setLayout(layout)
layout = QtWidgets.QVBoxLayout(self)
layout.setContentsMargins(0, 1, 0, 0)
self._sidebarFrame.setLayout(layout)
self._sidebarFrame.layout().addWidget(self._sidebarWidget)
self._splitter = QtWidgets.QSplitter(QtCore.Qt.Horizontal, self)
self._splitter.setSizePolicy(QtWidgets.QSizePolicy.Ignored,
QtWidgets.QSizePolicy.Expanding)
self._splitter.setHandleWidth(2)
self._splitter.setChildrenCollapsible(False)
self._splitter.insertWidget(0, self._sidebarFrame)
self._splitter.insertWidget(1, self._itemsWidget)
self._splitter.insertWidget(2, self._previewFrame)
self._splitter.setStretchFactor(0, False)
self._splitter.setStretchFactor(1, True)
self._splitter.setStretchFactor(2, False)
self.layout().addWidget(self._menuBarWidget)
self.layout().addWidget(self._splitter)
self.layout().addWidget(self._statusWidget)
vbox = QtWidgets.QVBoxLayout()
self._previewFrame.setLayout(vbox)
self._previewFrame.layout().setSpacing(0)
self._previewFrame.layout().setContentsMargins(0, 0, 0, 0)
self._previewFrame.setMinimumWidth(5)
# -------------------------------------------------------------------
# Setup Connections
# -------------------------------------------------------------------
itemsWidget = self.itemsWidget()
itemsWidget.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
itemsWidget.itemMoved.connect(self._itemMoved)
itemsWidget.itemDropped.connect(self._itemDropped)
itemsWidget.itemSelectionChanged.connect(self._itemSelectionChanged)
itemsWidget.customContextMenuRequested.connect(self.showItemsContextMenu)
sidebarWidget = self.sidebarWidget()
sidebarWidget.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
sidebarWidget.itemDropped.connect(self._itemDropped)
sidebarWidget.itemSelectionChanged.connect(self._folderSelectionChanged)
sidebarWidget.customContextMenuRequested.connect(self.showFolderMenu)
sidebarWidget.settingsMenuRequested.connect(self._foldersMenuRequested)
self.folderSelectionChanged.connect(self.updateLock)
self.updateViewButton()
self.updateFiltersButton()
self.updatePreviewWidget()
self.checkForUpdate()
if path:
self.setPath(path)
def _keyPressed(self, event):
"""
Triggered from the items widget on key press event.
:type event: QKeyEvent
"""
text = event.text().strip()
if not text.isalpha() and not text.isdigit():
return
if text and not self.searchWidget().hasFocus():
self.searchWidget().setFocus()
self.searchWidget().setText(text)
def _searchFinished(self):
self.showRefreshMessage()
def _foldersMenuRequested(self, menu):
"""
Triggered when the folders settings menu has been requested.
:type menu: QtWidgets.QMenu
"""
action = QtWidgets.QAction("Change Path", menu)
action.triggered.connect(self.showChangePathDialog)
menu.addAction(action)
menu.addSeparator()
def _itemMoved(self, item):
"""
Triggered when the custom order has changed.
:type item: studiolibrary.LibraryItem
:rtype: None
"""
self.saveCustomOrder()
def _itemSelectionChanged(self):
"""
Triggered when an item is selected or deselected.
:rtype: None
"""
item = self.itemsWidget().selectedItem()
self.setPreviewWidgetFromItem(item)
self.itemSelectionChanged.emit(item)
def _itemDropped(self, event):
"""
Triggered when items are dropped on the items widget or sidebar widget.
:type event: QtCore.QEvent
:rtype: None
"""
mimeData = event.mimeData()
if mimeData.hasUrls():
urls = mimeData.urls()
path = self.selectedFolderPath()
items = self.createItemsFromUrls(urls)
if self.isMoveItemsEnabled():
self.showMoveItemsDialog(items, dst=path)
elif not self.isCustomOrderEnabled():
msg = 'Please sort by "Custom Order" to reorder items!'
self.showInfoMessage(msg)
def _folderSelectionChanged(self):
"""
Triggered when a folder is selected or deselected.
:rtype: None
"""
path = self.selectedFolderPath()
self.folderSelectionChanged.emit(path)
self.globalSignal.folderSelectionChanged.emit(self, path)
def checkForUpdate(self):
"""Check if there are any new versions available."""
class _Thread(QtCore.QThread):
def __init__(self, parent, func):
super(_Thread, self).__init__(parent)
self._func = func
self._result = None
def run(self):
try:
self._result = self._func()
except Exception as error:
logger.exception(error)
def result(self):
return self._result
if studiolibrary.config.get("checkForUpdateEnabled"):
self._checkForUpdateThread = _Thread(self, studiolibrary.isLatestRelease)
self._checkForUpdateThread.finished.connect(self.checkForUpdateFinished)
self._checkForUpdateThread.start()
def checkForUpdateFinished(self):
"""Triggered when the check for update thread has finished."""
if self._checkForUpdateThread.result():
self._updateAvailableButton.show()
else:
self._updateAvailableButton.hide()
def destroy(self):
"""Destroy the current library window instance."""
self.hide()
self.closePreviewWidget()
self.close()
self.itemsWidget().clear()
self.library().clear()
super(LibraryWindow, self).destroy()
def setKwargs(self, kwargs):
"""
Set the key word arguments used to open the window.
:type kwargs: dict
"""
self._kwargs.update(kwargs)
def library(self):
"""
Return the library model object.
:rtype: studiolibrary.Library
"""
return self._library
def setLibrary(self, library):
"""
Set the library model.
:type library: studiolibrary.Library
:rtype: None
"""
self._library = library
def statusWidget(self):
"""
Return the status widget.
:rtype: studioqt.StatusWidget
"""
return self._statusWidget
def searchWidget(self):
"""
Return the search widget.
:rtype: studioqt.SearchWidget
"""
return self._searchWidget
def menuBarWidget(self):
"""
Return the menu bar widget.
:rtype: MenuBarWidget
"""
return self._menuBarWidget
def name(self):
"""
Return the name of the library.
:rtype: str
"""
return self._name
def path(self):
"""
Return the root path for the library.
:rtype: str
"""
return self._path
def setPath(self, path):
"""
Set the root path for the library.
:type path: str
:rtype: None
"""
path = studiolibrary.realPath(path)
if path == self.path():
logger.debug("The root path is already set.")
return
self._path = path
library = self.library()
library.setPath(path)
if not os.path.exists(library.databasePath()):
self.sync()
self.refresh()
self.library().search()
self.updatePreviewWidget()
@studioqt.showArrowCursor
def showPathErrorDialog(self):
"""
Called when the current root path doesn't exist on refresh.
:rtype: None
"""
path = self.path()
title = "Path Error"
text = 'The current root path does not exist "{path}". ' \
'Please select a new root path to continue.'
text = text.format(path=path)
dialog = studiolibrary.widgets.createMessageBox(self, title, text)
dialog.setHeaderColor("rgb(230, 80, 80)")
dialog.show()
dialog.accepted.connect(self.showChangePathDialog)
@studioqt.showArrowCursor
def showWelcomeDialog(self):
"""
Called when there is no root path set for the library.
:rtype: None
"""
name = self.name()
title = "Welcome"
title = title.format(studiolibrary.version(), name)
icon = studiolibrary.resource.get('icons/icon_white.png')
text = "Before you get started please choose a folder " \
"location for storing the data. A network folder is " \
"recommended for sharing within a studio."
dialog = studiolibrary.widgets.createMessageBox(self, title, text, headerIcon=icon)
dialog.show()
dialog.accepted.connect(self.showChangePathDialog)
def showChangePathDialog(self):
"""
Show a file browser dialog for changing the root path.
:rtype: None
"""
path = self._showChangePathDialog()
if path:
self.setPath(path)
else:
self.refresh()
@studioqt.showArrowCursor
def _showChangePathDialog(self):
"""
Open the file dialog for setting a new root path.
:rtype: str
"""
path = self.path()
if not path:
path = os.path.expanduser("~")
path = QtWidgets.QFileDialog.getExistingDirectory(
None,
"Choose a root folder location",
path
)
return studiolibrary.normPath(path)
def isRefreshEnabled(self):
"""
Return True if the LibraryWindow can be refreshed.
:rtype: bool
"""
return self._refreshEnabled
def setRefreshEnabled(self, enable):
"""
If enable is False, all updates will be ignored.
:rtype: bool
"""
self.library().setSearchEnabled(enable)
self._refreshEnabled = enable
@studioqt.showWaitCursor
def sync(self):
"""
Sync any data that might be out of date with the model.
:rtype: None
"""
progressBar = self.statusWidget().progressBar()
@studioqt.showWaitCursor
def _sync():
elapsedTime = time.time()
self.library().sync(progressCallback=self.setProgressBarValue)
elapsedTime = time.time() - elapsedTime
msg = "Synced items in {0:.3f} seconds."
msg = msg.format(elapsedTime)
self.statusWidget().showInfoMessage(msg)
self.setProgressBarValue("Done")
studioqt.fadeOut(progressBar, duration=500, onFinished=progressBar.close)
self.setProgressBarValue("Syncing")
studioqt.fadeIn(progressBar, duration=1, onFinished=_sync)
progressBar.show()
def setProgressBarValue(self, label, value=-1):
"""Set the progress bar label and value"""
progressBar = self.statusWidget().progressBar()
if value == -1:
self.statusWidget().progressBar().reset()
value = 100
progressBar.setValue(value)
progressBar.setText(label)
def refresh(self):
"""
Refresh all sidebar items and library items.
:rtype: None
"""
if self.isRefreshEnabled():
self.update()
def update(self):
"""Update the library widget and the data. """
self.refreshSidebar()
self.updateWindowTitle()
# -----------------------------------------------------------------
# Methods for the sidebar widget
# -----------------------------------------------------------------
def sidebarWidget(self):
"""
Return the sidebar widget.
:rtype: studioqt.SidebarWidget
"""
return self._sidebarWidget
def selectedFolderPath(self):
"""
Return the selected folder items.
:rtype: str or None
"""
return self.sidebarWidget().selectedPath()
def selectedFolderPaths(self):
"""
Return the selected folder items.
:rtype: list[str]
"""
return self.sidebarWidget().selectedPaths()
def selectFolderPath(self, path):
"""
Select the given folder paths.
:type path: str
:rtype: None
"""
self.selectFolderPaths([path])
def selectFolderPaths(self, paths):
"""
Select the given folder paths.
:type paths: list[str]
:rtype: None
"""
self.sidebarWidget().selectPaths(paths)
@studioqt.showWaitCursor
def refreshSidebar(self):
"""
Refresh the state of the sidebar widget.
:rtype: None
"""
path = self.path()
if not path:
return self.showWelcomeDialog()
elif not os.path.exists(path):
return self.showPathErrorDialog()
self.updateSidebar()
def updateSidebar(self):
"""
Update the folders to be shown in the folders widget.
:rtype: None
"""
data = {}
root = self.path()
queries = [{'filters': [('type', 'is', 'Folder')]}]
items = self.library().findItems(queries)
for item in items:
data[item.path()] = item.itemData()
self.sidebarWidget().setData(data, root=root)
def setFolderData(self, path, data):
"""
Convenience method for setting folder data.
:type path: str
:type data: dict
"""
self.sidebarWidget().setItemData(path, data)
def createFolderContextMenu(self):
"""
Return the folder menu for the selected folders.
:rtype: QtWidgets.QMenu
"""
path = self.selectedFolderPath()
items = []
if path:
queries = [{"filters": [("path", "is", path)]}]
items = self.library().findItems(queries)
if items:
self._items_ = items
return self.createItemContextMenu(items)
# -----------------------------------------------------------------
# Methods for the items widget
# -----------------------------------------------------------------
def itemsWidget(self):
"""
Return the widget the contains all the items.
:rtype: studiolibrary.widgets.ItemsWidget
"""
return self._itemsWidget
def selectPath(self, path):
"""
Select the item with the given path.
:type path: str
:rtype: None
"""
self.selectPaths([path])
def selectPaths(self, paths):
"""
Select items with the given paths.
:type paths: list[str]
:rtype: None
"""
selection = self.selectedItems()
self.clearPreviewWidget()
self.itemsWidget().clearSelection()
self.itemsWidget().selectPaths(paths)
if self.selectedItems() != selection:
self._itemSelectionChanged()
def selectItems(self, items):
"""
Select the given items.
:type items: list[studiolibrary.LibraryItem]
:rtype: None
"""
paths = [item.path() for item in items]
self.selectPaths(paths)
def scrollToSelectedItem(self):
"""
Scroll the item widget to the selected item.
:rtype: None
"""
self.itemsWidget().scrollToSelectedItem()
def refreshSelection(self):
"""
Refresh the current item selection.
:rtype: None
"""
items = self.selectedItems()
self.itemsWidget().clearSelection()
self.selectItems(items)
def selectedItems(self):
"""
Return the selected items.
:rtype: list[studiolibrary.LibraryItem]
"""
return self._itemsWidget.selectedItems()
def clearItems(self):
"""
Remove all the loaded items.
:rtype: list[studiolibrary.LibraryItem]
"""
self.itemsWidget().clear()
def items(self):
"""
Return all the loaded items.
:rtype: list[studiolibrary.LibraryItem]
"""
return self._items
def addItem(self, item, select=False):
"""
Add the given item to the itemsWidget.
:type item: studiolibrary.LibraryItem
:type select: bool
:rtype: None
"""
self.addItems([item], select=select)
def addItems(self, items, select=False):
"""
Add the given items to the itemsWidget.
:type items: list[studiolibrary.LibraryItem]
:type select: bool
:rtype: None
"""
self.itemsWidget().addItems(items)
self._items.extend(items)
if select:
self.selectItems(items)
self.scrollToSelectedItem()
def createItemsFromUrls(self, urls):
"""
Return a new list of items from the given urls.
:rtype: list[studiolibrary.LibraryItem]
"""
return self.library().itemsFromUrls(urls, libraryWindow=self)
# -----------------------------------------------------------------
# Support for custom context menus
# -----------------------------------------------------------------
def addMenuBarAction(self, name, icon, tip, callback=None):
"""
Add a button/action to menu bar widget.
:type name: str
:type icon: QtWidget.QIcon
:type tip: str
:type callback: func
:type: QtWidget.QAction
"""
# The method below is needed to fix an issue with PySide2.
def _callback():
callback()
action = self.menuBarWidget().addAction(name)
if icon:
action.setIcon(icon)
if tip:
action.setToolTip(tip)
action.setStatusTip(tip)
if callback:
action.triggered.connect(_callback)
return action
def showFilterByMenu(self):
"""
Show the filters menu.
:rtype: None
"""
widget = self.menuBarWidget().findToolButton("Filters")
point = widget.mapToGlobal(QtCore.QPoint(0, widget.height()))
self._filterByMenu.show(point)
self.updateFiltersButton()
def showGroupByMenu(self):
"""
Show the group by menu at the group button.
:rtype: None
"""
widget = self.menuBarWidget().findToolButton("Group By")
point = widget.mapToGlobal(QtCore.QPoint(0, widget.height()))
self._groupByMenu.show(point)
def showSortByMenu(self):
"""
Show the sort by menu at the sort button.
:rtype: None
"""
widget = self.menuBarWidget().findToolButton("Sort By")
point = widget.mapToGlobal(QtCore.QPoint(0, widget.height()))
self._sortByMenu.show(point)
def showItemViewMenu(self):
"""
Show the item settings menu.
:rtype: None
"""
menu = self.itemsWidget().createSettingsMenu()
widget = self.menuBarWidget().findToolButton("Item View")
point = widget.mapToGlobal(QtCore.QPoint(0, widget.height()))
menu.exec_(point)
def createNewItemMenu(self):
"""
Return the create new item menu for adding new folders and items.
:rtype: QtWidgets.QMenu
"""
color = self.iconColor()
icon = studiolibrary.resource.icon("add", color=color)
menu = QtWidgets.QMenu(self)
menu.setIcon(icon)
menu.setTitle("New")
def _sortKey(item):
return item.MENU_ORDER
for cls in sorted(studiolibrary.registeredItems(), key=_sortKey):
action = cls.createAction(menu, self)
if action:
icon = studioqt.Icon(action.icon())
icon.setColor(self.iconColor())
action.setIcon(icon)
menu.addAction(action)
return menu
def settingsValidator(self, **kwargs):
"""
The validator used for the settings dialog.
:type kwargs: dict
"""
fields = []
color = kwargs.get("accentColor")
if color and self.theme().accentColor().toString() != color:
self.theme().setAccentColor(color)
color = kwargs.get("backgroundColor")
if color and self.theme().backgroundColor().toString() != color:
self.theme().setBackgroundColor(color)
path = kwargs.get("path", "")
if not os.path.exists(path):
fields.append(
{
"name": "path",
"value": path,
"error": "Path does not exists!"
}
)
scaleFactor = kwargs.get("scaleFactor")
scaleFactorMap = {"Small": 1.0, "Large": 1.5}
value = scaleFactorMap.get(scaleFactor, 1.0)
if value != self.dpi():
self.setDpi(value)
return fields
def settingsAccepted(self, **kwargs):
"""
Called when the user has accepted the changes in the settings dialog.
:type kwargs: dict
"""
path = kwargs.get("path")
if path and path != self.path():
self.setPath(path)
self.saveSettings()
def showSettingDialog(self):
"""Show the settings dialog."""
accentColor = self.theme().accentColor().toString()
backgroundColor = self.theme().backgroundColor().toString()
form = {
"title": "Settings",
"description": "Your local settings",
"layout": "vertical",
"schema": [
# {"name": "name", "type": "string", "default": self.name()},
{"name": "path", "type": "path", "value": self.path()},
{
"name": "accentColor",
"type": "color",
"value": accentColor,
"colors": [
"rgb(225, 110, 110, 255)",
# "rgb(220, 135, 100, 255)",
"rgb(225, 150, 70, 255)",
"rgb(225, 180, 35, 255)",
"rgb(90, 175, 130, 255)",
"rgb(100, 175, 160, 255)",
"rgb(70, 160, 210, 255)",
"rgb(110, 125, 220, 255)",
"rgb(100, 120, 150, 255)",
]
},
{
"name": "backgroundColor",
"type": "color",
"value": backgroundColor,
"colors": [
"rgb(45, 45, 48, 255)",
"rgb(55, 55, 60, 255)",
"rgb(68, 68, 70, 255)",
"rgb(80, 60, 80, 255)",
"rgb(85, 60, 60, 255)",
"rgb(60, 75, 75, 255)",
"rgb(60, 64, 79, 255)",
"rgb(245, 245, 255, 255)",
]
},
],
"validator": self.settingsValidator,
"accepted": self.settingsAccepted,
}
if self.DPI_ENABLED:
value = 'Large' if self.dpi() > 1 else "Small"
form["schema"].append(
{
"name": "scaleFactor",
"type": "buttonGroup",
"title": "Scale Factor (DPI)",
"value": value,
"items": [
"Small",
"Large",
]
},
)
self._settingsWidget = studiolibrary.widgets.FormDialog(form=form)
self._settingsWidget.setObjectName("settingsDialog")
self._settingsWidget.acceptButton().setText("Save")
self._lightbox = studiolibrary.widgets.Lightbox(self)
self._lightbox.setWidget(self._settingsWidget)
self._lightbox.show()
def createSettingsMenu(self):
"""
Return the settings menu for changing the library widget.
:rtype: studioqt.Menu
"""
menu = studioqt.Menu("", self)
menu.setTitle("Settings")
action = menu.addAction("Sync")
action.triggered.connect(self.sync)
menu.addSeparator()
action = menu.addAction("Settings")
action.triggered.connect(self.showSettingDialog)
menu.addSeparator()
librariesMenu = studiolibrary.widgets.LibrariesMenu(libraryWindow=self)
menu.addMenu(librariesMenu)
menu.addSeparator()
action = QtWidgets.QAction("Show Menu", menu)
action.setCheckable(True)
action.setChecked(self.isMenuBarWidgetVisible())
action.triggered[bool].connect(self.setMenuBarWidgetVisible)
menu.addAction(action)
action = QtWidgets.QAction("Show Sidebar", menu)
action.setCheckable(True)
action.setChecked(self.isFoldersWidgetVisible())
action.triggered[bool].connect(self.setFoldersWidgetVisible)
menu.addAction(action)
action = QtWidgets.QAction("Show Preview", menu)
action.setCheckable(True)
action.setChecked(self.isPreviewWidgetVisible())
action.triggered[bool].connect(self.setPreviewWidgetVisible)
menu.addAction(action)
action = QtWidgets.QAction("Show Status", menu)
action.setCheckable(True)
action.setChecked(self.isStatusBarWidgetVisible())
action.triggered[bool].connect(self.setStatusBarWidgetVisible)
menu.addAction(action)
menu.addSeparator()
action = QtWidgets.QAction("Save Settings", menu)
action.triggered.connect(self.saveSettings)
menu.addAction(action)
action = QtWidgets.QAction("Reset Settings", menu)
action.triggered.connect(self.resetSettings)
menu.addAction(action)
action = QtWidgets.QAction("Open Settings", menu)
action.triggered.connect(self.openSettings)
menu.addAction(action)
if self.TEMP_PATH_MENU_ENABLED:
action = QtWidgets.QAction("Open Temp Path", menu)
action.triggered.connect(self.openTempPath)
menu.addAction(action)
menu.addSeparator()
if self.trashEnabled():
action = QtWidgets.QAction("Show Trash Folder", menu)
action.setEnabled(self.trashFolderExists())
action.setCheckable(True)
action.setChecked(self.isTrashFolderVisible())
action.triggered[bool].connect(self.setTrashFolderVisible)
menu.addAction(action)
menu.addSeparator()
action = QtWidgets.QAction("Enable Recursive Search", menu)
action.setCheckable(True)
action.setChecked(self.isRecursiveSearchEnabled())
action.triggered[bool].connect(self.setRecursiveSearchEnabled)
menu.addAction(action)
menu.addSeparator()
viewMenu = self.itemsWidget().createSettingsMenu()
menu.addMenu(viewMenu)
menu.addSeparator()
action = QtWidgets.QAction("Debug Mode", menu)
action.setCheckable(True)
action.setChecked(self.isDebug())
action.triggered[bool].connect(self.setDebugMode)
menu.addAction(action)
action = QtWidgets.QAction("Report Issue", menu)
action.triggered.connect(self.reportIssue)
menu.addAction(action)
action = QtWidgets.QAction("Help", menu)
action.triggered.connect(self.help)
menu.addAction(action)
return menu
def showNewMenu(self):
"""
Creates and shows the new menu at the new action button.
:rtype: QtWidgets.QAction
"""
menu = self.createNewItemMenu()
point = self.menuBarWidget().rect().bottomLeft()
point = self.menuBarWidget().mapToGlobal(point)
return menu.exec_(point)
def showSettingsMenu(self):
"""
Show the settings menu at the current cursor position.
:rtype: QtWidgets.QAction
"""
menu = self.createSettingsMenu()
point = self.menuBarWidget().rect().bottomRight()
point = self.menuBarWidget().mapToGlobal(point)
# Align menu to the left of the cursor.
menu.move(-1000, -1000) # Fix display bug on linux
menu.show()
x = point.x() - menu.width()
point.setX(x)
return menu.exec_(point)
def showFolderMenu(self, pos=None):
"""
Show the folder context menu at the current cursor position.
:type pos: None or QtCore.QPoint
:rtype: QtWidgets.QAction
"""
menu = self.createFolderContextMenu()
point = QtGui.QCursor.pos()
point.setX(point.x() + 3)
point.setY(point.y() + 3)
action = menu.exec_(point)
menu.close()
return action
def showItemsContextMenu(self, pos=None):
"""
Show the item context menu at the current cursor position.
:type pos: QtGui.QPoint
:rtype QtWidgets.QAction
"""
items = self.itemsWidget().selectedItems()
menu = self.createItemContextMenu(items)
point = QtGui.QCursor.pos()
point.setX(point.x() + 3)
point.setY(point.y() + 3)
action = menu.exec_(point)
menu.close()
return action
def createItemContextMenu(self, items):
"""
Return the item context menu for the given items.
:type items: list[studiolibrary.LibraryItem]
:rtype: studiolibrary.ContextMenu
"""
menu = studioqt.Menu(self)
item = None
if items:
item = items[-1]
item.contextMenu(menu)
if not self.isLocked():
menu.addMenu(self.createNewItemMenu())
if item:
editMenu = studioqt.Menu(menu)
editMenu.setTitle("Edit")
menu.addMenu(editMenu)
item.contextEditMenu(editMenu)
if self.trashEnabled():
editMenu.addSeparator()
callback = partial(self.showMoveItemsToTrashDialog, items)
action = QtWidgets.QAction("Move to Trash", editMenu)
action.setEnabled(not self.isTrashSelected())
action.triggered.connect(callback)
editMenu.addAction(action)
menu.addSeparator()
menu.addMenu(self.createSettingsMenu())
return menu
def saveCustomOrder(self):
"""
Convenience method for saving the custom order.
:rtype: None
"""
self.library().saveItemData(self.library()._items, emitDataChanged=True)
# -------------------------------------------------------------------
# Support for moving items with drag and drop
# -------------------------------------------------------------------
def isCustomOrderEnabled(self):
"""
Return True if sorting by "Custom Order" is enabled.
:rtype: bool
"""
return 'Custom Order' in str(self.library().sortBy())
def isMoveItemsEnabled(self):
"""
Return True if moving items via drag and drop is enabled.
:rtype: bool
"""
paths = self.selectedFolderPaths()
if len(paths) != 1:
return False
if self.selectedItems():
return False
return True
def createMoveItemsDialog(self):
"""
Create and return a dialog for moving items.
:rtype: studiolibrary.widgets.MessageBox
"""
text = 'Would you like to copy or move the selected item/s?'
dialog = studiolibrary.widgets.createMessageBox(self, "Move or Copy items?", text)
dialog.buttonBox().clear()
dialog.addButton(u'Copy', QtWidgets.QDialogButtonBox.AcceptRole)
dialog.addButton(u'Move', QtWidgets.QDialogButtonBox.AcceptRole)
dialog.addButton(u'Cancel', QtWidgets.QDialogButtonBox.RejectRole)
return dialog
def showMoveItemsDialog(self, items, dst):
"""
Show the move items dialog for the given items.
:type items: list[studiolibrary.LibraryItem]
:type dst: str
:rtype: None
"""
Copy = 0
Cancel = 2
# Check if the items are moving to another folder.
for item in items:
if os.path.dirname(item.path()) == dst:
return
dialog = self.createMoveItemsDialog()
action = dialog.exec_()
dialog.close()
if action == Cancel:
return
copy = action == Copy
self.moveItems(items, dst, copy=copy)
def moveItems(self, items, dst, copy=False, force=False):
"""
Move the given items to the destination folder path.
:type items: list[studiolibrary.LibraryItem]
:type dst: str
:type copy: bool
:type force: bool
:rtype: None
"""
self.itemsWidget().clearSelection()
movedItems = []
try:
self.library().blockSignals(True)
for item in items:
path = dst + "/" + os.path.basename(item.path())
if force:
path = studiolibrary.generateUniquePath(path)
if copy:
item.copy(path)
else:
item.rename(path)
movedItems.append(item)
except Exception as error:
self.showExceptionDialog("Move Error", error)
raise
finally:
self.library().blockSignals(False)
self.refresh()
self.selectItems(movedItems)
self.scrollToSelectedItem()
# -----------------------------------------------------------------------
# Support for search
# -----------------------------------------------------------------------
def isPreviewWidgetVisible(self):
"""
Return True if the PreviewWidget is visible, otherwise return False.
:rtype: bool
"""
return self._previewWidgetVisible
def isFoldersWidgetVisible(self):
"""
Return True if the FoldersWidget is visible, otherwise return False.
:rtype: bool
"""
return self._sidebarWidgetVisible
def isStatusBarWidgetVisible(self):
"""
Return True if the StatusWidget is visible, otherwise return False.
:rtype: bool
"""
return self._statusBarWidgetVisible
def isMenuBarWidgetVisible(self):
"""
Return True if the MenuBarWidget is visible, otherwise return False.
:rtype: bool
"""
return self.menuBarWidget().isExpanded()
def setPreviewWidgetVisible(self, value):
"""
If the value is True then show the PreviewWidget, otherwise hide.
:type value: bool
"""
value = bool(value)
self._previewWidgetVisible = value
if value:
self._previewFrame.show()
else:
self._previewFrame.hide()
self.updateViewButton()
def setFoldersWidgetVisible(self, value):
"""
If the value is True then show the FoldersWidget, otherwise hide.
:type value: bool
"""
value = bool(value)
self._sidebarWidgetVisible = value
if value:
self._sidebarFrame.show()
else:
self._sidebarFrame.hide()
self.updateViewButton()
def setMenuBarWidgetVisible(self, value):
"""
If the value is True then show the tMenuBarWidget, otherwise hide.
:type value: bool
"""
value = bool(value)
if value:
self.menuBarWidget().expand()
else:
self.menuBarWidget().collapse()
def setStatusBarWidgetVisible(self, value):
"""
If the value is True then show the StatusBarWidget, otherwise hide.
:type value: bool
"""
value = bool(value)
self._statusBarWidgetVisible = value
if value:
self.statusWidget().show()
else:
self.statusWidget().hide()
# -----------------------------------------------------------------------
# Support for search
# -----------------------------------------------------------------------
def itemsVisibleCount(self):
"""
Return the number of items visible.
:rtype: int
"""
return self._itemsVisibleCount
def itemsHiddenCount(self):
"""
Return the number of items hidden.
:rtype: int
"""
return self._itemsHiddenCount
def setSearchText(self, text):
"""
Set the search widget text..
:type text: str
:rtype: None
"""
self.searchWidget().setText(text)
# -----------------------------------------------------------------------
# Support for custom preview widgets
# -----------------------------------------------------------------------
def setCreateWidget(self, widget):
"""
:type widget: QtWidgets.QWidget
:rtype: None
"""
self.setPreviewWidgetVisible(True)
self.itemsWidget().clearSelection()
# Force the preview pane to expand when creating a new item.
fsize, rsize, psize = self._splitter.sizes()
if psize < 150:
self.setSizes((fsize, rsize, 180))
self.setPreviewWidget(widget)
def clearPreviewWidget(self):
"""
Set the default preview widget.
"""
self._previewWidget = None
widget = studiolibrary.widgets.PlaceholderWidget()
self.setPreviewWidget(widget)
def updatePreviewWidget(self):
"""Update the current preview widget."""
self.setPreviewWidgetFromItem(self._currentItem, force=True)
def setPreviewWidgetFromItem(self, item, force=False):
"""
:type item: studiolibrary.LibraryItem
:rtype: None
"""
if not force and self._currentItem == item:
logger.debug("The current item preview widget is already set.")
return
self._currentItem = item
if item:
self.closePreviewWidget()
try:
item.showPreviewWidget(self)
except Exception as error:
self.showErrorMessage(error)
self.clearPreviewWidget()
raise
else:
self.clearPreviewWidget()
def previewWidget(self):
"""
Return the current preview widget.
:rtype: QtWidgets.QWidget
"""
return self._previewWidget
def setPreviewWidget(self, widget):
"""
Set the preview widget.
:type widget: QtWidgets.QWidget
:rtype: None
"""
if self._previewWidget == widget:
msg = 'Preview widget already contains widget "{0}"'
msg.format(widget)
logger.debug(msg)
else:
self.closePreviewWidget()
self._previewWidget = widget
if self._previewWidget:
self._previewFrame.layout().addWidget(self._previewWidget)
self._previewWidget.show()
def closePreviewWidget(self):
"""
Close and delete the preview widget.
:rtype: None
"""
layout = self._previewFrame.layout()
while layout.count():
item = layout.takeAt(0)
item.widget().hide()
item.widget().close()
item.widget().deleteLater()
self._previewWidget = None
# -----------------------------------------------------------------------
# Support for saving and loading the widget state
# -----------------------------------------------------------------------
def resetSettings(self):
"""
Reset the settings to the default settings.
:rtype: str
"""
self.setSettings(self.DEFAULT_SETTINGS)
def geometrySettings(self):
"""
Return the geometry values as a list.
:rtype: list[int]
"""
settings = (
self.window().geometry().x(),
self.window().geometry().y(),
self.window().geometry().width(),
self.window().geometry().height()
)
return settings
def setGeometrySettings(self, settings):
"""
Set the geometry of the widget with the given values.
:type settings: list[int]
:rtype: None
"""
x, y, width, height = settings
screenGeometry = QtWidgets.QApplication.desktop().screenGeometry()
screenWidth = screenGeometry.width()
screenHeight = screenGeometry.height()
if x <= 0 or y <= 0 or x >= screenWidth or y >= screenHeight:
self.centerWindow(width, height)
else:
self.window().setGeometry(x, y, width, height)
def settings(self):
"""
Return a dictionary with the widget settings.
:rtype: dict
"""
settings = {}
settings['dpi'] = self.dpi()
settings['kwargs'] = self._kwargs
settings['geometry'] = self.geometrySettings()
settings['paneSizes'] = self._splitter.sizes()
if self.theme():
settings['theme'] = self.theme().settings()
settings["library"] = self.library().settings()
settings["trashFolderVisible"] = self.isTrashFolderVisible()
settings["sidebarWidgetVisible"] = self.isFoldersWidgetVisible()
settings["previewWidgetVisible"] = self.isPreviewWidgetVisible()
settings["menuBarWidgetVisible"] = self.isMenuBarWidgetVisible()
settings["statusBarWidgetVisible"] = self.isStatusBarWidgetVisible()
settings['itemsWidget'] = self.itemsWidget().settings()
settings['searchWidget'] = self.searchWidget().settings()
settings['sidebarWidget'] = self.sidebarWidget().settings()
settings["recursiveSearchEnabled"] = self.isRecursiveSearchEnabled()
settings['filterByMenu'] = self._filterByMenu.settings()
settings["path"] = self.path()
return settings
def setSettings(self, settings):
"""
Set the widget settings from the given dictionary.
:type settings: dict
"""
defaults = copy.deepcopy(self.DEFAULT_SETTINGS)
settings = studiolibrary.update(defaults, settings)
isRefreshEnabled = self.isRefreshEnabled()
try:
self.setRefreshEnabled(False)
self.itemsWidget().setToastEnabled(False)
geometry = settings.get("geometry")
if geometry:
self.setGeometrySettings(geometry)
themeSettings = settings.get("theme")
if themeSettings:
self.setThemeSettings(themeSettings)
if not self.path():
path = settings.get("path")
if path and os.path.exists(path):
self.setPath(path)
dpi = settings.get("dpi", 1.0)
self.setDpi(dpi)
sizes = settings.get('paneSizes')
if sizes and len(sizes) == 3:
self.setSizes(sizes)
value = settings.get("sidebarWidgetVisible")
if value is not None:
self.setFoldersWidgetVisible(value)
value = settings.get("menuBarWidgetVisible")
if value is not None:
self.setMenuBarWidgetVisible(value)
value = settings.get("previewWidgetVisible")
if value is not None:
self.setPreviewWidgetVisible(value)
value = settings.get("statusBarWidgetVisible")
if value is not None:
self.setStatusBarWidgetVisible(value)
value = settings.get('searchWidget')
if value is not None:
self.searchWidget().setSettings(value)
value = settings.get("recursiveSearchEnabled")
if value is not None:
self.setRecursiveSearchEnabled(value)
value = settings.get('filterByMenu')
if value is not None:
self._filterByMenu.setSettings(value)
finally:
self.reloadStyleSheet()
self.setRefreshEnabled(isRefreshEnabled)
self.refresh()
value = settings.get('library')
if value is not None:
self.library().setSettings(value)
value = settings.get('trashFolderVisible')
if value is not None:
self.setTrashFolderVisible(value)
value = settings.get('sidebarWidget', {})
self.sidebarWidget().setSettings(value)
value = settings.get('itemsWidget', {})
self.itemsWidget().setSettings(value)
self.itemsWidget().setToastEnabled(True)
self.updateFiltersButton()
def updateSettings(self, settings):
"""
Save the given path to the settings on disc.
:type settings: dict
:rtype: None
"""
data = self.readSettings()
data.update(settings)
self.saveSettings(data)
def openTempPath(self):
"""Launch the system explorer to the temp directory."""
path = studiolibrary.tempPath()
studiolibrary.showInFolder(path)
def openSettings(self):
"""Launch the system explorer to the open directory."""
path = studiolibrary.settingsPath()
studiolibrary.showInFolder(path)
def saveSettings(self, data=None):
"""
Save the settings to the settings path set in the config.
:type data: dict or None
:rtype: None
"""
settings = studiolibrary.readSettings()
settings.setdefault(self.name(), {})
settings[self.name()].update(data or self.settings())
studiolibrary.saveSettings(settings)
self.showToastMessage("Saved")
@studioqt.showWaitCursor
def loadSettings(self):
"""
Load the user settings from disc.
:rtype: None
"""
self.reloadStyleSheet()
settings = self.readSettings()
self.setSettings(settings)
def readSettings(self):
"""
Get the user settings from disc.
:rtype: dict
"""
key = self.name()
data = studiolibrary.readSettings()
return data.get(key, {})
def isLoaded(self):
"""
Return True if the Studio Library has been shown
:rtype: bool
"""
return self._isLoaded
def setLoaded(self, loaded):
"""
Set if the widget has been shown.
:type loaded: bool
:rtype: None
"""
self._isLoaded = loaded
def setSizes(self, sizes):
"""
:type sizes: (int, int, int)
:rtype: None
"""
fSize, cSize, pSize = sizes
if pSize == 0:
pSize = 200
if fSize == 0:
fSize = 120
self._splitter.setSizes([fSize, cSize, pSize])
self._splitter.setStretchFactor(1, 1)
def centerWindow(self, width=None, height=None):
"""
Center the widget to the center of the desktop.
:rtype: None
"""
geometry = self.frameGeometry()
if width:
geometry.setWidth(width)
if height:
geometry.setHeight(height)
desktop = QtWidgets.QApplication.desktop()
pos = desktop.cursor().pos()
screen = desktop.screenNumber(pos)
centerPoint = desktop.screenGeometry(screen).center()
geometry.moveCenter(centerPoint)
self.window().setGeometry(geometry)
# -----------------------------------------------------------------------
# Overloading events
# -----------------------------------------------------------------------
def event(self, event):
"""
:type event: QtWidgets.QEvent
:rtype: QtWidgets.QEvent
"""
if isinstance(event, QtGui.QKeyEvent):
if studioqt.isControlModifier() and event.key() == QtCore.Qt.Key_F:
self.searchWidget().setFocus()
if isinstance(event, QtGui.QStatusTipEvent):
self.statusWidget().showInfoMessage(event.tip())
return QtWidgets.QWidget.event(self, event)
def keyReleaseEvent(self, event):
"""
:type event: QtGui.QKeyEvent
:rtype: None
"""
for item in self.selectedItems():
item.keyReleaseEvent(event)
QtWidgets.QWidget.keyReleaseEvent(self, event)
def closeEvent(self, event):
"""
:type event: QtWidgets.QEvent
:rtype: None
"""
self.saveSettings()
QtWidgets.QWidget.closeEvent(self, event)
def show(self, **kwargs):
"""
Overriding this method to always raise_ the widget on show.
Developers can use the kwargs to set platform dependent show options used in subclasses.
:rtype: None
"""
QtWidgets.QWidget.show(self)
self.setWindowState(QtCore.Qt.WindowNoState)
self.raise_()
def showEvent(self, event):
"""
:type event: QtWidgets.QEvent
:rtype: None
"""
QtWidgets.QWidget.showEvent(self, event)
if not self.isLoaded():
self.setLoaded(True)
self.setRefreshEnabled(True)
self.loadSettings()
# -----------------------------------------------------------------------
# Support for themes and custom style sheets
# -----------------------------------------------------------------------
def dpi(self):
"""
Return the current dpi for the library widget.
:rtype: float
"""
if not self.DPI_ENABLED:
return 1.0
return float(self._dpi)
def setDpi(self, dpi):
"""
Set the current dpi for the library widget.
:rtype: float
"""
if not self.DPI_ENABLED:
dpi = 1.0
self._dpi = dpi
self.itemsWidget().setDpi(dpi)
self.menuBarWidget().setDpi(dpi)
self.sidebarWidget().setDpi(dpi)
self.statusWidget().setFixedHeight(20 * dpi)
self._splitter.setHandleWidth(2 * dpi)
self.showToastMessage("DPI: {0}".format(int(dpi * 100)))
self.reloadStyleSheet()
def iconColor(self):
"""
Return the icon color.
:rtype: studioqt.Color
"""
return self.ICON_COLOR
def setThemeSettings(self, settings):
"""
Set the theme from the given settings.
:type settings: dict
:rtype: None
"""
theme = studiolibrary.widgets.Theme()
theme.setSettings(settings)
self.setTheme(theme)
def setTheme(self, theme):
"""
Set the theme.
:type theme: studioqt.Theme
:rtype: None
"""
self._theme = theme
self._theme.updated.connect(self.reloadStyleSheet)
self.reloadStyleSheet()
def theme(self):
"""
Return the current theme.
:rtype: studioqt.Theme
"""
if not self._theme:
self._theme = studiolibrary.widgets.Theme()
return self._theme
def reloadStyleSheet(self):
"""
Reload the style sheet to the current theme.
:rtype: None
"""
theme = self.theme()
theme.setDpi(self.dpi())
options = theme.options()
styleSheet = theme.styleSheet()
color = studioqt.Color.fromString(options["ITEM_TEXT_COLOR"])
self.itemsWidget().setTextColor(color)
color = studioqt.Color.fromString(options["ITEM_TEXT_SELECTED_COLOR"])
self.itemsWidget().setTextSelectedColor(color)
color = studioqt.Color.fromString(options["ITEM_BACKGROUND_COLOR"])
self.itemsWidget().setItemBackgroundColor(color)
color = studioqt.Color.fromString(options["BACKGROUND_COLOR"])
self.itemsWidget().setBackgroundColor(color)
color = studioqt.Color.fromString(
options["ITEM_BACKGROUND_HOVER_COLOR"])
self.itemsWidget().setBackgroundHoverColor(color)
color = studioqt.Color.fromString(
options["ITEM_BACKGROUND_SELECTED_COLOR"])
self.itemsWidget().setBackgroundSelectedColor(color)
self.setStyleSheet(styleSheet)
# Reloading the style sheets is needed for OSX
self.itemsWidget().setStyleSheet(self.itemsWidget().styleSheet())
self.searchWidget().setStyleSheet(self.searchWidget().styleSheet())
self.menuBarWidget().setStyleSheet(self.menuBarWidget().styleSheet())
self.sidebarWidget().setStyleSheet(self.sidebarWidget().styleSheet())
self.previewWidget().setStyleSheet(self.previewWidget().styleSheet())
if self._settingsWidget:
self._settingsWidget.setStyleSheet(self._settingsWidget.styleSheet())
self.searchWidget().update()
self.menuBarWidget().update()
self.sidebarWidget().update()
# -----------------------------------------------------------------------
# Support for the Trash folder.
# -----------------------------------------------------------------------
def trashEnabled(self):
"""
Return True if moving items to trash.
:rtype: bool
"""
return self._trashEnabled
def setTrashEnabled(self, enable):
"""
Enable items to be trashed.
:type enable: bool
:rtype: None
"""
self._trashEnabled = enable
def isPathInTrash(self, path):
"""
Return True if the given path is in the Trash path.
:rtype: bool
"""
return "trash" in path.lower()
def trashPath(self):
"""
Return the trash path for the library.
:rtype: str
"""
path = self.path()
return u'{0}/{1}'.format(path, "Trash")
def trashFolderExists(self):
"""
Return True if the trash folder exists.
:rtype: bool
"""
return os.path.exists(self.trashPath())
def createTrashFolder(self):
"""
Create the trash folder if it does not exist.
:rtype: None
"""
path = self.trashPath()
if not os.path.exists(path):
os.makedirs(path)
def isTrashFolderVisible(self):
"""
Return True if the trash folder is visible to the user.
:rtype: bool
"""
return self._isTrashFolderVisible
def setTrashFolderVisible(self, visible):
"""
Enable the trash folder to be visible to the user.
:type visible: str
:rtype: None
"""
self._isTrashFolderVisible = visible
if visible:
query = {
'name': 'trash_query',
'filters': []
}
else:
query = {
'name': 'trash_query',
'filters': [('path', 'not_contains', 'Trash')]
}
self.library().addGlobalQuery(query)
self.updateSidebar()
self.library().search()
def isTrashSelected(self):
"""
Return True if the selected folders is in the trash.
:rtype: bool
"""
folders = self.selectedFolderPaths()
for folder in folders:
if self.isPathInTrash(folder):
return True
items = self.selectedItems()
for item in items:
if self.isPathInTrash(item.path()):
return True
return False
def moveItemsToTrash(self, items):
"""
Move the given items to trash path.
:items items: list[studiolibrary.LibraryItem]
:rtype: None
"""
self.createTrashFolder()
self.moveItems(items, dst=self.trashPath(), force=True)
def showMoveItemsToTrashDialog(self, items=None):
"""
Show the "Move to trash" dialog for the selected items.
:type items: list[studiolibrary.LibraryItem] or None
:rtype: None
"""
items = items or self.selectedItems()
if items:
title = "Move to trash?"
text = "Are you sure you want to move the selected" \
"item/s to the trash?"
result = self.showQuestionDialog(title, text)
if result == QtWidgets.QDialogButtonBox.Yes:
self.moveItemsToTrash(items)
# -----------------------------------------------------------------------
# Support for message boxes
# -----------------------------------------------------------------------
def showToastMessage(self, text, duration=1000):
"""
A convenience method for showing the toast widget with the given text.
:type text: str
:type duration: int
:rtype: None
"""
self.itemsWidget().showToastMessage(text, duration)
def showInfoMessage(self, text):
"""
A convenience method for showing an info message to the user.
:type text: str
:rtype: None
"""
self.statusWidget().showInfoMessage(text)
def showErrorMessage(self, text):
"""
A convenience method for showing an error message to the user.
:type text: str
:rtype: None
"""
self.statusWidget().showErrorMessage(text)
self.setStatusBarWidgetVisible(True)
def showWarningMessage(self, text):
"""
A convenience method for showing a warning message to the user.
:type text: str
:rtype: None
"""
self.statusWidget().showWarningMessage(text)
self.setStatusBarWidgetVisible(True)
def showRefreshMessage(self):
"""Show how long the current refresh took."""
itemCount = len(self.library().results())
elapsedTime = self.library().searchTime()
plural = ""
if itemCount > 1:
plural = "s"
msg = "Found {0} item{1} in {2:.3f} seconds."
msg = msg.format(itemCount, plural, elapsedTime)
self.statusWidget().showInfoMessage(msg)
logger.debug(msg)
def showInfoDialog(self, title, text):
"""
A convenience method for showing an information dialog to the user.
:type title: str
:type text: str
:rtype: QMessageBox.StandardButton
"""
buttons = QtWidgets.QDialogButtonBox.Ok
return studiolibrary.widgets.MessageBox.question(self, title, text, buttons=buttons)
def showErrorDialog(self, title, text):
"""
A convenience method for showing an error dialog to the user.
:type title: str
:type text: str
:rtype: QMessageBox.StandardButton
"""
self.showErrorMessage(text)
return studiolibrary.widgets.MessageBox.critical(self, title, text)
def showExceptionDialog(self, title, error):
"""
A convenience method for showing an error dialog to the user.
:type title: str
:type error: Exception
:rtype: QMessageBox.StandardButton
"""
logger.exception(error)
self.showErrorDialog(title, error)
def showQuestionDialog(self, title, text, buttons=None):
"""
A convenience method for showing a question dialog to the user.
:type title: str
:type text: str
:type buttons: list[QMessageBox.StandardButton]
:rtype: QMessageBox.StandardButton
"""
buttons = buttons or \
QtWidgets.QDialogButtonBox.Yes | \
QtWidgets.QDialogButtonBox.No | \
QtWidgets.QDialogButtonBox.Cancel
return studiolibrary.widgets.MessageBox.question(self, title, text, buttons=buttons)
def updateWindowTitle(self):
"""
Update the window title with the version and lock status.
:rtype: None
"""
title = "Studio Library - "
title += studiolibrary.version() + " - " + self.name()
if self.isLocked():
title += " (Locked)"
self.setWindowTitle(title)
# -----------------------------------------------------------------------
# Support for locking via regex
# -----------------------------------------------------------------------
def updateCreateItemButton(self):
"""
Update the plus icon depending on if the library widget is locked.
:rtype: None
"""
action = self.menuBarWidget().findAction("New Item")
if self.isLocked():
icon = studiolibrary.resource.icon("lock")
action.setEnabled(False)
else:
icon = studiolibrary.resource.icon("add_28")
action.setEnabled(True)
icon.setColor(self.iconColor())
action.setIcon(icon)
def isLocked(self):
"""
Return lock state of the library.
:rtype: bool
"""
return self._isLocked
def setLocked(self, value):
"""
Set the state of the widget to not editable.
:type value: bool
:rtype: None
"""
self._isLocked = value
self.sidebarWidget().setLocked(value)
self.itemsWidget().setLocked(value)
self.updateCreateItemButton()
self.updateWindowTitle()
self.lockChanged.emit(value)
def superusers(self):
"""
Return the superusers for the widget.
:rtype: list[str]
"""
return self._superusers
def setSuperusers(self, superusers):
"""
Set the valid superusers for the library widget.
This will lock all folders unless the current user is a superuser.
:type superusers: list[str]
:rtype: None
"""
self._superusers = superusers
self.updateLock()
def lockRegExp(self):
"""
Return the lock regexp used for locking the widget.
:rtype: str
"""
return self._lockRegExp
def setLockRegExp(self, regExp):
"""
Set the lock regexp used for locking the widget.
Lock only folders that contain the given regExp in their path.
:type regExp: str
:rtype: None
"""
self._lockRegExp = regExp
self.updateLock()
def unlockRegExp(self):
"""
Return the unlock regexp used for unlocking the widget.
:rtype: str
"""
return self._unlockRegExp
def setUnlockRegExp(self, regExp):
"""
Return the unlock regexp used for locking the widget.
Unlock only folders that contain the given regExp in their path.
:type regExp: str
:rtype: None
"""
self._unlockRegExp = regExp
self.updateLock()
def isLockRegExpEnabled(self):
"""
Return True if either the lockRegExp or unlockRegExp has been set.
:rtype: bool
"""
return not (
self.superusers() is None
and self.lockRegExp() is None
and self.unlockRegExp() is None
)
def updateLock(self):
"""
Update the lock state for the library.
This is triggered when the user clicks on a folder.
:rtype: None
"""
if not self.isLockRegExpEnabled():
return
superusers = self.superusers() or []
reLocked = re.compile(self.lockRegExp() or "")
reUnlocked = re.compile(self.unlockRegExp() or "")
if studiolibrary.user() in superusers:
self.setLocked(False)
elif reLocked.match("") and reUnlocked.match(""):
if superusers:
# Lock if only the superusers arg is used
self.setLocked(True)
else:
# Unlock if no keyword arguments are used
self.setLocked(False)
else:
folders = self.selectedFolderPaths()
# Lock the selected folders that match the reLocked regx
if not reLocked.match(""):
for folder in folders:
if reLocked.search(folder):
self.setLocked(True)
return
self.setLocked(False)
# Unlock the selected folders that match the reUnlocked regx
if not reUnlocked.match(""):
for folder in folders:
if reUnlocked.search(folder):
self.setLocked(False)
return
self.setLocked(True)
# -----------------------------------------------------------------------
# Misc
# -----------------------------------------------------------------------
def isCompactView(self):
"""
Return True if both the folder and preview widget are hidden
:rtype: bool
"""
return not self.isFoldersWidgetVisible() and not self.isPreviewWidgetVisible()
def toggleView(self):
"""
Toggle the preview widget and folder widget visible.
:rtype: None
"""
compact = self.isCompactView()
if studioqt.isControlModifier():
compact = False
self.setMenuBarWidgetVisible(compact)
self.setPreviewWidgetVisible(compact)
self.setFoldersWidgetVisible(compact)
def updateViewButton(self):
"""
Update the icon for the view action.
:rtype: None
"""
compact = self.isCompactView()
action = self.menuBarWidget().findAction("View")
if not compact:
icon = studiolibrary.resource.icon("view_all")
else:
icon = studiolibrary.resource.icon("view_compact")
icon.setColor(self.iconColor())
action.setIcon(icon)
def updateFiltersButton(self):
"""Update the icon for the filters menu."""
action = self.menuBarWidget().findAction("Filters")
if self._filterByMenu.isActive():
icon = studiolibrary.resource.icon("filter")
icon.setColor(self.iconColor())
icon.setBadge(18, 1, 9, 9, color=self.ICON_BADGE_COLOR)
else:
icon = studiolibrary.resource.icon("filter")
icon.setColor(self.iconColor())
action.setIcon(icon)
def isRecursiveSearchEnabled(self):
"""
Return True if recursive search is enabled.
:rtype: bool
"""
return self.sidebarWidget().isRecursive()
def setRecursiveSearchEnabled(self, value):
"""
Enable recursive search for searching sub folders.
:type value: int
:rtype: None
"""
self.sidebarWidget().setRecursive(value)
@staticmethod
def help():
"""Open the help url from the config file in a web browser."""
webbrowser.open(studiolibrary.config.get("helpUrl"))
@staticmethod
def reportIssue():
"""Open the report issue url and submit a new issue."""
webbrowser.open(studiolibrary.config.get("reportIssueUrl"))
@staticmethod
def openReleasesUrl():
"""Open the help url from the config file in a web browser."""
webbrowser.open(studiolibrary.config.get("releasesUrl"))
def setDebugMode(self, value):
"""
:type value: bool
"""
self._isDebug = value
studiolibrary.setDebugMode(value)
def isDebug(self):
"""
:rtype: bool
"""
return self._isDebug
| krathjen/studiolibrary | src/studiolibrary/librarywindow.py | Python | lgpl-3.0 | 80,070 |
"""
5.1 - Probabilistic Modeling: Error Propagation
================================================
In this example we will show how easy we can propagate uncertainty from
GemPy parameters to final structural models.
"""
# %%
import sys, os
sys.path.append("../../gempy")
os.environ["THEANO_FLAGS"] = "mode=FAST_RUN,device=cuda"
import gempy as gp
from gempy.bayesian.fields import compute_prob, calculate_ie_masked
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(1234)
# %%
# Model definition
# ----------------
#
# In the previous example we assume constant thickness to be able to
# reduce the problem to one dimension. This keeps the probabilistic model
# fairly simple since we do not need to deel with complex geometric
# structures. Unfortunaly, geology is all about dealing with complex three
# dimensional structures. In the moment data spread across the physical
# space, the probabilistic model will have to expand to relate data from
# different locations. In other words, the model will need to include
# either interpolations, regressions or some other sort of spatial
# functions. In this paper, we use an advance universal co-kriging
# interpolator. Further implications of using this method will be discuss
# below but for this lets treat is a simple spatial interpolation in order
# to keep the focus on the constraction of the probabilistic model.
#
# %%
geo_model = gp.create_model('2-layers')
gp.init_data(geo_model, extent=[0, 12e3, -2e3, 2e3, 0, 4e3], resolution=[100, 10, 200])
# %%
geo_model.add_surfaces('surface 1')
geo_model.add_surfaces('surface 2')
geo_model.add_surfaces('basement')
dz = geo_model._grid.regular_grid.dz
geo_model.add_surface_values([dz, 0, 0], ['dz'])
geo_model.add_surface_values(np.array([2.6, 2.4, 3.2]), ['density'])
# %%
geo_model.add_surface_points(3e3, 0, 3.05e3, 'surface 1')
geo_model.add_surface_points(9e3, 0, 3.05e3, 'surface 1')
geo_model.add_surface_points(3e3, 0, 1.02e3, 'surface 2')
geo_model.add_surface_points(9e3, 0, 1.02e3, 'surface 2')
geo_model.add_orientations(6e3, 0, 4e3, 'surface 1', [0, 0, 1])
# %%
gp.plot_2d(geo_model)
plt.show()
# %%
# Adding topography
# -----------------
# %%
geo_model.set_topography()
# %%
# Setting up our area
# -------------------
# Lets imagine we have two boreholes and 1 gravity device. From the boreholes
# we can estimate the location of the interfaces of our layers. That will be
# enough to create the first model.
# %%
def plot_geo_setting():
device_loc = np.array([[6e3, 0, 3700]])
p2d = gp.plot_2d(geo_model, show_topography=True)
well_1 = 3.5e3
well_2 = 3.62e3
p2d.axes[0].scatter([3e3], [well_1], marker='^', s=400, c='#71a4b3', zorder=10)
p2d.axes[0].scatter([9e3], [well_2], marker='^', s=400, c='#71a4b3', zorder=10)
p2d.axes[0].scatter(device_loc[:, 0], device_loc[:, 2], marker='x', s=400, c='#DA8886', zorder=10)
p2d.axes[0].vlines(3e3, .5e3, well_1, linewidth=4, color='gray')
p2d.axes[0].vlines(9e3, .5e3, well_2, linewidth=4, color='gray')
p2d.axes[0].vlines(3e3, .5e3, well_1)
p2d.axes[0].vlines(9e3, .5e3, well_2)
plt.savefig('model.svg')
plt.show()
# %%
plot_geo_setting()
# %%
# Computing model
# ---------------
# %%
gp.set_interpolator(geo_model)
# %%
gp.compute_model(geo_model)
plot_geo_setting()
# %%
# Adding Random variables
# -----------------------
# Although that can work as a good approximation, the truth is that modelling
# hundreds of meters underground is not specially precise. That's why in many
# cases we would like to model our input data as probability distributions
# instead deterministic values. GemPy is specially efficiency for these type of
# tasks:
# %%
geo_model.modify_surface_points(2, Z=500)
gp.compute_model(geo_model)
plot_geo_setting()
Z = np.random.normal(1000, 500, size=2)
geo_model.modify_surface_points([2, 3], Z=Z)
gp.compute_model(geo_model)
plot_geo_setting()
# %%
# Now we just sample from a random variable and loop it as much as we want:
lith_blocks = np.array([])
n_iterations = 50
for i in range(n_iterations):
Z = np.random.normal(1000, 500, size=2)
geo_model.modify_surface_points([2, 3], Z=Z)
gp.compute_model(geo_model)
lith_blocks = np.append(lith_blocks, geo_model.solutions.lith_block)
lith_blocks = lith_blocks.reshape(n_iterations, -1)
# %%
prob_block = compute_prob(lith_blocks)
# %%
p2dp = gp.plot_2d(geo_model,
show_lith=False, show_boundaries=False, show_data=False,
regular_grid=prob_block[2],
kwargs_regular_grid={'cmap': 'viridis',
'norm': None}
)
plt.show()
# %%
entropy_block = calculate_ie_masked(prob_block)
# %%
# sphinx_gallery_thumbnail_number = 6
p2dp = gp.plot_2d(geo_model,
show_lith=False, show_boundaries=False, show_data=False,
regular_grid=entropy_block,
kwargs_regular_grid={'cmap': 'viridis',
'norm': None}
)
| cgre-aachen/gempy | examples/tutorials/ch5_probabilistic_modeling_DEP/ch5_1.py | Python | lgpl-3.0 | 5,072 |
# pylint: disable=R0904,R0902,E1101,E1103,C0111,C0302,C0103,W0101
from six import string_types
import numpy as np
from numpy.linalg import norm
from pyNastran.utils import integer_types
from pyNastran.bdf.cards.elements.bars import CBAR, LineElement
from pyNastran.bdf.bdf_interface.assign_type import (
integer, integer_or_blank, double_or_blank, integer_double_string_or_blank)
from pyNastran.bdf.field_writer_8 import set_blank_if_default
from pyNastran.bdf.field_writer_8 import print_card_8
from pyNastran.bdf.field_writer_16 import print_card_16
class CBEAM(CBAR):
"""
+-------+-----+-----+-----+-----+-----+-----+-----+----------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=======+=====+=====+=====+=====+=====+=====+=====+==========+
| CBEAM | EID | PID | GA | GB | X1 | X2 | X3 | OFFT/BIT |
+-------+-----+-----+-----+-----+-----+-----+-----+----------+
| | PA | PB | W1A | W2A | W3A | W1B | W2B | W3B |
+-------+-----+-----+-----+-----+-----+-----+-----+----------+
| | SA | SB | | | | | | |
+-------+-----+-----+-----+-----+-----+-----+-----+----------+
or
+-------+-----+-----+-----+-----+-----+-----+-----+----------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=======+=====+=====+=====+=====+=====+=====+=====+==========+
| CBEAM | EID | PID | GA | GB | G0 | | | OFFT/BIT |
+-------+-----+-----+-----+-----+-----+-----+-----+----------+
| | PA | PB | W1A | W2A | W3A | W1B | W2B | W3B |
+-------+-----+-----+-----+-----+-----+-----+-----+----------+
| | SA | SB | | | | | | |
+-------+-----+-----+-----+-----+-----+-----+-----+----------+
offt/bit are MSC specific fields
"""
type = 'CBEAM'
_field_map = {
1: 'eid', 2:'pid', 3:'ga', 4:'gb', #5:'x_g0', 6:'g1', 7:'g2',
#8:'offt',
9:'pa', 10:'pb',
17:'sa', 18:'sb',
}
def _update_field_helper(self, n, value):
if n == 11:
self.wa[0] = value
elif n == 12:
self.wa[1] = value
elif n == 13:
self.wa[2] = value
elif n == 14:
self.wb[0] = value
elif n == 15:
self.wb[1] = value
elif n == 16:
self.wb[2] = value
else:
if self.g0 is not None:
if n == 5:
self.g0 = value
else: # offt
msg = 'Field %r=%r is an invalid %s entry or is unsupported.' % (
n, value, self.type)
raise KeyError(msg)
else:
if n == 5:
self.x[0] = value
elif n == 6:
self.x[1] = value
elif n == 7:
self.x[2] = value
else:
msg = 'Field %r=%r is an invalid %s entry or is unsupported.' % (
n, value, self.type)
raise KeyError(msg)
def __init__(self, eid, pid, ga, gb, x, g0, offt, bit,
pa=0, pb=0, wa=None, wb=None, sa=0, sb=0, comment=''):
"""
Adds a CBEAM card
Parameters
----------
pid : int
property id
mid : int
material id
ga / gb : int
grid point at End A/B
x : List[float, float, float]
Components of orientation vector, from GA, in the displacement
coordinate system at GA (default), or in the basic coordinate system
g0 : int
Alternate method to supply the orientation vector using grid
point G0. Direction of is from GA to G0. is then transferred
to End A
offt : str; default='GGG'
Offset vector interpretation flag
None : bit is active
bit : float; default=None
Built-in twist of the cross-sectional axes about the beam axis
at end B relative to end A.
For beam p-elements ONLY!
None : offt is active
pa / pb : int; default=0
Pin Flag at End A/B. Releases the specified DOFs
wa / wb : List[float, float, float]
Components of offset vectors from the grid points to the end
points of the axis of the shear center
sa / sb : int; default=0
Scalar or grid point identification numbers for the ends A and B,
respectively. The degrees-of-freedom at these points are the
warping variables . SA and SB cannot be specified for
beam p-elements
comment : str; default=''
a comment for the card
offt/bit are MSC specific fields
"""
LineElement.__init__(self)
if comment:
self.comment = comment
if wa is None:
wa = np.zeros(3, dtype='float64')
else:
wa = np.asarray(wa)
if wb is None:
wb = np.zeros(3, dtype='float64')
else:
wb = np.asarray(wb)
self.eid = eid
self.pid = pid
self.ga = ga
self.gb = gb
self.x = x
self.g0 = g0
self.offt = offt
self.bit = bit
self.pa = pa
self.pb = pb
self.wa = wa
self.wb = wb
self.sa = sa
self.sb = sb
self._validate_input()
@classmethod
def add_card(cls, card, comment=''):
eid = integer(card, 1, 'eid')
pid = integer_or_blank(card, 2, 'pid', eid)
ga = integer(card, 3, 'ga')
gb = integer(card, 4, 'gb')
x, g0 = cls._init_x_g0(card, eid)
offt, bit = cls._init_offt_bit(card, eid)# offt doesn't exist in NX nastran
pa = integer_or_blank(card, 9, 'pa', 0)
pb = integer_or_blank(card, 10, 'pb', 0)
wa = np.array([double_or_blank(card, 11, 'w1a', 0.0),
double_or_blank(card, 12, 'w2a', 0.0),
double_or_blank(card, 13, 'w3a', 0.0)], 'float64')
wb = np.array([double_or_blank(card, 14, 'w1b', 0.0),
double_or_blank(card, 15, 'w2b', 0.0),
double_or_blank(card, 16, 'w3b', 0.0)], 'float64')
sa = integer_or_blank(card, 17, 'sa', 0)
sb = integer_or_blank(card, 18, 'sb', 0)
assert len(card) <= 19, 'len(CBEAM card) = %i\ncard=%s' % (len(card), card)
return CBEAM(eid, pid, ga, gb, x, g0, offt, bit,
pa=pa, pb=pb, wa=wa, wb=wb, sa=sa, sb=sb, comment=comment)
@classmethod
def add_op2_data(cls, data, f, comment=''):
#: .. todo:: verify
assert len(data) == 2, 'data=%s len(data)=%s' % (data, len(data))
#data = [[eid,pid,ga,gb,sa,sb, pa,pb,w1a,w2a,w3a,w1b,w2b,w3b],
# [f,g0]]
#data = [[eid,pid,ga,gb,sa,sb, pa,pb,w1a,w2a,w3a,w1b,w2b,w3b],
# [f,x1,x2,x3]]
main, aft = data
flag = aft[0]
assert f == flag, 'f=%s flag=%s' % (f, flag)
if flag == 0:
# basic cid
#data_in = [[eid, pid, ga, gb, sa, sb, pa, pb, w1a, w2a, w3a, w1b, w2b, w3b],
#[f, x1, x2, x3]]
assert len(aft) == 4, 'f=%s aft=%s len(aft)=%s' % (f, aft, len(aft))
x1, x2, x3 = aft[1:]
g0 = None
x = np.array([x1, x2, x3], dtype='float64')
elif flag == 1:
# global cid
#data_in = [[eid, pid, ga, gb, sa, sb, pa, pb, w1a, w2a, w3a, w1b, w2b, w3b],
#[f, x1, x2, x3]]
assert len(aft) == 4, 'f=%s aft=%s len(aft)=%s' % (f, aft, len(aft))
g0 = None
x1, x2, x3 = aft[1:]
x = np.array([x1, x2, x3], dtype='float64')
elif flag == 2:
# grid option
#data_in = [[eid, pid, ga, gb, sa, sb, pa, pb, w1a, w2a, w3a, w1b, w2b, w3b],
#[f, g0]]
assert len(aft) == 2, 'f=%s aft=%s len(aft)=%s' % (f, aft, len(aft))
g0 = data[1][1]
x = None
else:
raise NotImplementedError()
eid = main[0]
pid = main[1]
ga = main[2]
gb = main[3]
sa = main[4]
sb = main[5]
#offt = str(data[6]) # GGG
bit = None # ???
offt = 'GGG' #: .. todo:: is this correct???
pa = main[6]
pb = main[7]
wa = np.array([main[8], main[9], main[10]], 'float64')
wb = np.array([main[11], main[12], main[13]], 'float64')
return CBEAM(eid, pid, ga, gb, x, g0, offt, bit,
pa=pa, pb=pb, wa=wa, wb=wb, sa=sa, sb=sb, comment=comment)
def _validate_input(self):
if self.g0 in [self.ga, self.gb]:
msg = 'G0=%s cannot be GA=%s or GB=%s' % (self.g0, self.ga, self.gb)
raise RuntimeError(msg)
def Nodes(self):
return [self.ga, self.gb]
@classmethod
def _init_offt_bit(cls, card, eid):
"""
offt doesn't exist in NX nastran
"""
field8 = integer_double_string_or_blank(card, 8, 'field8')
if isinstance(field8, float):
offt = None
bit = field8
elif field8 is None:
offt = 'GGG' # default
bit = None
elif isinstance(field8, string_types):
bit = None
offt = field8
msg = 'invalid offt parameter of CBEAM...offt=%s' % offt
assert offt[0] in ['G', 'B', 'O', 'E'], msg
assert offt[1] in ['G', 'B', 'O', 'E'], msg
assert offt[2] in ['G', 'B', 'O', 'E'], msg
else:
msg = ('field8 on %s card is not a string(offt) or bit '
'(float)...field8=%s\n' % (cls.type, field8))
raise RuntimeError("Card Instantiation: %s" % msg)
return offt, bit
def Mid(self):
if isinstance(self.pid, integer_types):
raise RuntimeError('Element eid=%i has not been '
'cross referenced.\n%s' % (self.eid, str(self)))
return self.pid_ref.Mid()
def Area(self):
if isinstance(self.pid, integer_types):
raise RuntimeError('Element eid=%i has not been '
'cross referenced.\n%s' % (self.eid, str(self)))
return self.pid_ref.Area()
def Nsm(self):
if isinstance(self.pid, integer_types):
raise RuntimeError('Element eid=%i has not been '
'cross referenced.\n%s' % (self.eid, str(self)))
return self.pid_ref.Nsm()
@property
def is_offt(self):
"""is the offt flag active?"""
if isinstance(self.offt, string_types):
return True
assert isinstance(self.bit, float), 'bit=%s type=%s' % (self.bit, type(self.bit))
return False
@property
def is_bit(self):
"""is the bit flag active?"""
return not self.is_offt
def get_offt_bit_defaults(self):
"""
offt doesn't exist in NX nastran
"""
if self.is_offt:
field8 = set_blank_if_default(self.offt, 'GGG')
else:
field8 = set_blank_if_default(self.bit, 0.0)
return field8
def cross_reference(self, model):
"""
Cross links the card so referenced cards can be extracted directly
Parameters
----------
model : BDF()
the BDF object
"""
msg = ' which is required by CBEAM eid=%s' % (self.eid)
self.ga = model.Node(self.ga, msg=msg)
self.ga_ref = self.ga
self.gb = model.Node(self.gb, msg=msg)
self.gb_ref = self.gb
self.nodes = model.Nodes([self.ga.nid, self.gb.nid], msg=msg)
self.nodes_ref = self.nodes
self.pid = model.Property(self.pid, msg=msg)
self.pid_ref = self.pid
if self.g0:
g0 = model.nodes[self.g0]
self.g0_vector = g0.get_position() - self.ga.get_position()
else:
self.g0_vector = self.x
if model.is_nx:
assert self.offt == 'GGG', 'NX only support offt=GGG; offt=%r' % self.offt
def safe_cross_reference(self, model):
msg = ' which is required by CBEAM eid=%s' % (self.eid)
self.ga = model.Node(self.ga, msg=msg)
self.gb = model.Node(self.gb, msg=msg)
self.ga_ref = self.ga
self.gb_ref = self.gb
try:
self.pid = model.Property(self.pid, msg=msg)
self.pid_ref = self.pid
except KeyError:
model.log.warning('pid=%s%s' % (self.pid, msg))
if self.g0:
try:
g0 = model.nodes[self.g0]
self.g0_vector = g0.get_position() - self.ga.get_position()
except KeyError:
model.log.warning('Node=%s%s' % (self.g0, msg))
else:
self.g0_vector = self.x
def uncross_reference(self):
self.pid = self.Pid()
self.ga = self.Ga()
self.gb = self.Gb()
del self.ga_ref, self.gb_ref, self.pid_ref
def raw_fields(self):
(x1, x2, x3) = self.getX_G0_defaults()
offt = self.getOfft_Bit_defaults()
ga, gb = self.node_ids
list_fields = ['CBEAM', self.eid, self.Pid(), ga, gb, x1, x2, x3, offt,
self.pa, self.pb] + list(self.wa) + list(self.wb) + [self.sa, self.sb]
return list_fields
def repr_fields(self):
w1a = set_blank_if_default(self.wa[0], 0.0)
w2a = set_blank_if_default(self.wa[1], 0.0)
w3a = set_blank_if_default(self.wa[2], 0.0)
w1b = set_blank_if_default(self.wb[0], 0.0)
w2b = set_blank_if_default(self.wb[1], 0.0)
w3b = set_blank_if_default(self.wb[2], 0.0)
sa = set_blank_if_default(self.sa, 0)
sb = set_blank_if_default(self.sb, 0)
(x1, x2, x3) = self.getX_G0_defaults()
offt = self.get_offt_bit_defaults()
ga, gb = self.node_ids
list_fields = ['CBEAM', self.eid, self.Pid(), ga, gb, x1, x2, x3, offt,
self.pa, self.pb, w1a, w2a, w3a,
w1b, w2b, w3b, sa, sb]
return list_fields
def write_card(self, size=8, is_double=False):
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
def write_card_16(self, is_double=False):
card = self.repr_fields()
return self.comment + print_card_16(card)
| saullocastro/pyNastran | pyNastran/bdf/cards/elements/beam.py | Python | lgpl-3.0 | 15,046 |
"""
Identity Based Signature
| From: "David Pointcheval and Olivier Sanders. Short Randomizable Signatures"
| Published in: 2015
| Available from: https://eprint.iacr.org/2015/525.pdf
* type: signature (identity-based)
* setting: bilinear groups (asymmetric)
:Authors: Lovesh Harchandani
:Date: 6/2018
"""
from functools import reduce
from charm.toolbox.pairinggroup import PairingGroup, ZR, G1, G2, pair
debug = False
class PS01:
"""
Signatures over committed messages, section 6.1 of the paper
"""
def __init__(self, groupObj):
global group
group = groupObj
@staticmethod
def keygen(num_messages=1):
x = group.random(ZR)
g1 = group.random(G1)
sk = {'x': x, 'X1': g1 ** x}
g2 = group.random(G2)
ys = [group.random(ZR) for _ in range(num_messages)]
X2 = g2 ** x
y1s = [g1 ** y for y in ys]
y2s = [g2 ** y for y in ys]
pk = {'X2': X2, 'Y2': y2s, 'Y1': y1s, 'g2': g2, 'g1': g1}
return pk, sk
def commitment(self, pk, *messages):
t = group.random(ZR)
return t, (pk['g1'] ** t) * self.product([y1 ** group.hash(m, ZR) for (y1, m) in zip(pk['Y1'], messages)])
def sign(self, sk, pk, commitment):
u = group.random(ZR)
return pk['g1'] ** u, (sk['X1'] * commitment) ** u
@staticmethod
def unblind_signature(t, sig):
s1, s2 = sig
return s1, (s2 / (s1 ** t))
def verify(self, pk, sig, *messages):
ms = [group.hash(m, ZR) for m in messages]
s1, s2 = sig
if group.init(G1) == s1:
return False
l2 = pk['X2'] * self.product([pk['Y2'][i] ** ms[i] for i in range(len(messages))])
return pair(s1, l2) == pair(pk['g2'], s2)
def randomize_sig(self, sig):
s1, s2 = sig
t = group.random(ZR)
return s1 ** t, s2 ** t
@staticmethod
def product(seq):
return reduce(lambda x, y: x * y, seq)
def main():
grp = PairingGroup('MNT224')
ps = PS01(grp)
messages = ['Hi there', 'Not there', 'Some message ................', 'Dont know .............']
(pk, sk) = ps.keygen(len(messages))
if debug:
print("Keygen...")
print("pk :=", pk)
print("sk :=", sk)
t, commitment = ps.commitment(pk, *messages)
sig = ps.sign(sk, pk, commitment)
if debug:
print("Signature: ", sig)
sig = ps.unblind_signature(t, sig)
result = ps.verify(pk, sig, *messages)
assert result, "INVALID signature!"
if debug:
print("Successful Verification!!!")
rand_sig = ps.randomize_sig(sig)
assert sig != rand_sig
if debug:
print("Randomized Signature: ", rand_sig)
result = ps.verify(pk, rand_sig, *messages)
assert result, "INVALID signature!"
if debug:
print("Successful Verification!!!")
if __name__ == "__main__":
debug = True
main()
| JHUISI/charm | charm/schemes/pksig/pksig_ps03.py | Python | lgpl-3.0 | 2,946 |
# -*- coding: utf-8 -*-
# Copyright (C) 2016 Matthias Luescher
#
# Authors:
# Matthias Luescher
#
# This file is part of edi.
#
# edi is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# edi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with edi. If not, see <http://www.gnu.org/licenses/>.
from edi.lib.configurationparser import ConfigurationParser
from edi.lib.playbookrunner import PlaybookRunner
from tests.libtesting.fixtures.configfiles import config_files
from tests.libtesting.helpers import get_command_parameter
from edi.lib import mockablerun
import shutil
import subprocess
from codecs import open
import yaml
def verify_inventory(file):
with open(file, encoding='utf-8') as f:
assert 'fake-container' in f.read()
def verify_extra_vars(file):
print(file)
with open(file, encoding='utf-8') as f:
extra_vars = yaml.load(f)
assert extra_vars['edi_config_management_user_name'] == 'edicfgmgmt'
mountpoints = extra_vars['edi_shared_folder_mountpoints']
assert len(mountpoints) == 2
assert mountpoints[0] == '/foo/bar/target_mountpoint'
def test_lxd_connection(config_files, monkeypatch):
def fake_ansible_playbook_run(*popenargs, **kwargs):
command = popenargs[0]
if command[0] == 'ansible-playbook':
assert 'lxd' == get_command_parameter(command, '--connection')
verify_inventory(get_command_parameter(command, '--inventory'))
verify_extra_vars(get_command_parameter(command, '--extra-vars').lstrip('@'))
# TODO: verify --user for ssh connection
return subprocess.CompletedProcess("fakerun", 0, '')
else:
return subprocess.run(*popenargs, **kwargs)
monkeypatch.setattr(mockablerun, 'run_mockable', fake_ansible_playbook_run)
def fakechown(*_):
pass
monkeypatch.setattr(shutil, 'chown', fakechown)
with open(config_files, "r") as main_file:
parser = ConfigurationParser(main_file)
runner = PlaybookRunner(parser, "fake-container", "lxd")
playbooks = runner.run_all()
expected_playbooks = ['10_base_system', '20_networking', '30_foo']
assert playbooks == expected_playbooks
| erickeller/edi | tests/lib/test_playbookrunner.py | Python | lgpl-3.0 | 2,684 |
from reportlab.lib.colors import black
from reportlab.lib.units import cm
from base import BAND_WIDTH, BAND_HEIGHT, Element
class Graphic(Element):
"""Base graphic class"""
visible = True
stroke = True
stroke_color = black
stroke_width = 1
fill = False
fill_color = black
def __init__(self, **kwargs):
for k,v in kwargs.items():
setattr(self, k, v)
def set_rect(self, **kwargs):
"""This method will adapt the graphic element in a rect."""
self.left = kwargs.get('left', self.left)
self.top = kwargs.get('top', self.top)
if 'width' in kwargs:
self.width = kwargs['width']
elif 'right' in kwargs:
self.width = kwargs['right'] - self.left
if 'height' in kwargs:
self.height = kwargs['height']
elif 'bottom' in kwargs:
self.height = kwargs['bottom'] - self.top
class Rect(Graphic):
"""A simple rectangle"""
pass
class RoundRect(Rect):
"""A rectangle graphic element that is possible set its radius and have
round corners"""
radius = 0.5
class Fixed(Graphic):
"""A fixed graphic is base on right and bottom coordinates instead of width
and height.
It is just a reference class and shouldn't be used directly in reports."""
left = None
top = None
right = None
bottom = None
def set_rect(self, **kwargs):
self.left = kwargs.get('left', self.left)
self.top = kwargs.get('top', self.top)
if 'right' in kwargs:
self.right = kwargs['right']
elif 'width' in kwargs:
self.right = kwargs['width'] + self.left
if 'bottom' in kwargs:
self.bottom = kwargs['bottom']
elif 'height' in kwargs:
self.bottom = kwargs['height'] + self.top
class Line(Fixed):
"""A simple line"""
@property
def height(self):
return self.bottom - self.top
@property
def width(self):
return self.right - self.left
class Circle(Graphic):
"""A simple circle"""
left_center = None
top_center = None
radius = None
class Arc(Fixed):
"""A simple circle"""
start_angle = 0
extent = 90
class Ellipse(Fixed):
"""A simple circle"""
pass
class Image(Graphic):
"""A image"""
left = None
top = None
_width = None
_height = None
filename = None
_image = None # PIL image object is stored here
get_image = None # To be overrided
def _get_image(self):
"""Uses Python Imaging Library to load an image and get its
informations"""
if self.get_image:
self._image = self.get_image(self)
if not self._image and self.filename:
try:
import Image as PILImage
except ImportError:
from PIL import Image as PILImage
self._image = PILImage.open(self.filename)
return self._image
def _set_image(self, value):
self._image = value
image = property(_get_image, _set_image)
def _get_height(self):
ret = self._height or (self.image and self.image.size[1] or 0)
return ret * 0.02*cm
def _set_height(self, value):
self._height = value
height = property(_get_height, _set_height)
def _get_width(self):
ret = self._width or (self.image and self.image.size[0] or 0)
return ret * 0.02*cm
def _set_width(self, value):
self._width = value
width = property(_get_width, _set_width)
| joerabelo/geraldo | geraldo/graphics.py | Python | lgpl-3.0 | 3,556 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lazagne.config.module_info import ModuleInfo
try:
from ConfigParser import RawConfigParser # Python 2.7
except ImportError:
from configparser import RawConfigParser # Python 3
from collections import OrderedDict
class Wifi(ModuleInfo):
def __init__(self):
ModuleInfo.__init__(self, 'wifi', 'wifi')
def run(self):
pwd_found = []
directory = u'/etc/NetworkManager/system-connections'
if os.path.exists(directory):
if os.getuid() == 0:
wireless_ssid = [f for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))]
for w in wireless_ssid:
cp = RawConfigParser()
cp.read(os.path.join(directory, w))
values = OrderedDict()
try:
values['SSID'] = cp.get('wifi', 'ssid')
values['Password'] = cp.get('wifi-security', 'psk')
pwd_found.append(values)
except Exception:
pass
else:
self.info('You need sudo privileges')
return pwd_found
| AlessandroZ/LaZagne | Linux/lazagne/softwares/wifi/wifi.py | Python | lgpl-3.0 | 1,246 |
# Copyright (c) 2018 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
from typing import List, Union, Dict, Optional, Any
from cura.PrinterOutput.PrinterOutputController import PrinterOutputController
from cura.PrinterOutput.Models.PrinterOutputModel import PrinterOutputModel
from .CloudClusterBuildPlate import CloudClusterBuildPlate
from .CloudClusterPrintCoreConfiguration import CloudClusterPrintCoreConfiguration
from .BaseCloudModel import BaseCloudModel
## Class representing a cluster printer
# Spec: https://api-staging.ultimaker.com/connect/v1/spec
class CloudClusterPrinterStatus(BaseCloudModel):
## Creates a new cluster printer status
# \param enabled: A printer can be disabled if it should not receive new jobs. By default every printer is enabled.
# \param firmware_version: Firmware version installed on the printer. Can differ for each printer in a cluster.
# \param friendly_name: Human readable name of the printer. Can be used for identification purposes.
# \param ip_address: The IP address of the printer in the local network.
# \param machine_variant: The type of printer. Can be 'Ultimaker 3' or 'Ultimaker 3ext'.
# \param status: The status of the printer.
# \param unique_name: The unique name of the printer in the network.
# \param uuid: The unique ID of the printer, also known as GUID.
# \param configuration: The active print core configurations of this printer.
# \param reserved_by: A printer can be claimed by a specific print job.
# \param maintenance_required: Indicates if maintenance is necessary
# \param firmware_update_status: Whether the printer's firmware is up-to-date, value is one of: "up_to_date",
# "pending_update", "update_available", "update_in_progress", "update_failed", "update_impossible"
# \param latest_available_firmware: The version of the latest firmware that is available
# \param build_plate: The build plate that is on the printer
def __init__(self, enabled: bool, firmware_version: str, friendly_name: str, ip_address: str, machine_variant: str,
status: str, unique_name: str, uuid: str,
configuration: List[Union[Dict[str, Any], CloudClusterPrintCoreConfiguration]],
reserved_by: Optional[str] = None, maintenance_required: Optional[bool] = None,
firmware_update_status: Optional[str] = None, latest_available_firmware: Optional[str] = None,
build_plate: Union[Dict[str, Any], CloudClusterBuildPlate] = None, **kwargs) -> None:
self.configuration = self.parseModels(CloudClusterPrintCoreConfiguration, configuration)
self.enabled = enabled
self.firmware_version = firmware_version
self.friendly_name = friendly_name
self.ip_address = ip_address
self.machine_variant = machine_variant
self.status = status
self.unique_name = unique_name
self.uuid = uuid
self.reserved_by = reserved_by
self.maintenance_required = maintenance_required
self.firmware_update_status = firmware_update_status
self.latest_available_firmware = latest_available_firmware
self.build_plate = self.parseModel(CloudClusterBuildPlate, build_plate) if build_plate else None
super().__init__(**kwargs)
## Creates a new output model.
# \param controller - The controller of the model.
def createOutputModel(self, controller: PrinterOutputController) -> PrinterOutputModel:
model = PrinterOutputModel(controller, len(self.configuration), firmware_version = self.firmware_version)
self.updateOutputModel(model)
return model
## Updates the given output model.
# \param model - The output model to update.
def updateOutputModel(self, model: PrinterOutputModel) -> None:
model.updateKey(self.uuid)
model.updateName(self.friendly_name)
model.updateType(self.machine_variant)
model.updateState(self.status if self.enabled else "disabled")
model.updateBuildplate(self.build_plate.type if self.build_plate else "glass")
for configuration, extruder_output, extruder_config in \
zip(self.configuration, model.extruders, model.printerConfiguration.extruderConfigurations):
configuration.updateOutputModel(extruder_output)
configuration.updateConfigurationModel(extruder_config)
| Patola/Cura | plugins/UM3NetworkPrinting/src/Cloud/Models/CloudClusterPrinterStatus.py | Python | lgpl-3.0 | 4,467 |
# ==============================================================================
# Copyright (C) 2011 Diego Duclos
# Copyright (C) 2011-2018 Anton Vorobyov
#
# This file is part of Eos.
#
# Eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Eos. If not, see <http://www.gnu.org/licenses/>.
# ==============================================================================
from eos.const.eos import EffectBuildStatus
from eos.const.eos import ModAffecteeFilter
from eos.const.eos import ModDomain
from eos.const.eos import ModOperator
from tests.mod_builder.testcase import ModBuilderTestCase
class TestBuilderModinfoAffecteeDomGrp(ModBuilderTestCase):
def _make_yaml(self, domain):
yaml = (
'- domain: {}\n func: LocationGroupModifier\n groupID: 55\n'
' modifiedAttributeID: 22\n modifyingAttributeID: 11\n'
' operator: 6\n')
return yaml.format(domain)
def test_domain_none(self):
effect_row = {'modifierInfo': self._make_yaml('null')}
modifiers, status = self.run_builder(effect_row)
self.assertEqual(status, EffectBuildStatus.success)
self.assertEqual(len(modifiers), 1)
modifier = modifiers[0]
self.assertEqual(
modifier.affectee_filter, ModAffecteeFilter.domain_group)
self.assertEqual(modifier.affectee_domain, ModDomain.self)
self.assertEqual(modifier.affectee_filter_extra_arg, 55)
self.assertEqual(modifier.affectee_attr_id, 22)
self.assertEqual(modifier.operator, ModOperator.post_percent)
self.assertEqual(modifier.affector_attr_id, 11)
self.assert_log_entries(0)
def test_domain_item(self):
effect_row = {'modifierInfo': self._make_yaml('itemID')}
modifiers, status = self.run_builder(effect_row)
self.assertEqual(status, EffectBuildStatus.success)
self.assertEqual(len(modifiers), 1)
modifier = modifiers[0]
self.assertEqual(
modifier.affectee_filter, ModAffecteeFilter.domain_group)
self.assertEqual(modifier.affectee_domain, ModDomain.self)
self.assertEqual(modifier.affectee_filter_extra_arg, 55)
self.assertEqual(modifier.affectee_attr_id, 22)
self.assertEqual(modifier.operator, ModOperator.post_percent)
self.assertEqual(modifier.affector_attr_id, 11)
self.assert_log_entries(0)
def test_domain_char(self):
effect_row = {'modifierInfo': self._make_yaml('charID')}
modifiers, status = self.run_builder(effect_row)
self.assertEqual(status, EffectBuildStatus.success)
self.assertEqual(len(modifiers), 1)
modifier = modifiers[0]
self.assertEqual(
modifier.affectee_filter, ModAffecteeFilter.domain_group)
self.assertEqual(modifier.affectee_domain, ModDomain.character)
self.assertEqual(modifier.affectee_filter_extra_arg, 55)
self.assertEqual(modifier.affectee_attr_id, 22)
self.assertEqual(modifier.operator, ModOperator.post_percent)
self.assertEqual(modifier.affector_attr_id, 11)
self.assert_log_entries(0)
def test_domain_ship(self):
effect_row = {'modifierInfo': self._make_yaml('shipID')}
modifiers, status = self.run_builder(effect_row)
self.assertEqual(status, EffectBuildStatus.success)
self.assertEqual(len(modifiers), 1)
modifier = modifiers[0]
self.assertEqual(
modifier.affectee_filter, ModAffecteeFilter.domain_group)
self.assertEqual(modifier.affectee_domain, ModDomain.ship)
self.assertEqual(modifier.affectee_filter_extra_arg, 55)
self.assertEqual(modifier.affectee_attr_id, 22)
self.assertEqual(modifier.operator, ModOperator.post_percent)
self.assertEqual(modifier.affector_attr_id, 11)
self.assert_log_entries(0)
def test_domain_target(self):
effect_row = {'modifierInfo': self._make_yaml('targetID')}
modifiers, status = self.run_builder(effect_row)
self.assertEqual(status, EffectBuildStatus.success)
self.assertEqual(len(modifiers), 1)
modifier = modifiers[0]
self.assertEqual(
modifier.affectee_filter, ModAffecteeFilter.domain_group)
self.assertEqual(modifier.affectee_domain, ModDomain.target)
self.assertEqual(modifier.affectee_filter_extra_arg, 55)
self.assertEqual(modifier.affectee_attr_id, 22)
self.assertEqual(modifier.operator, ModOperator.post_percent)
self.assertEqual(modifier.affector_attr_id, 11)
self.assert_log_entries(0)
def test_domain_other(self):
effect_row = {'modifierInfo': self._make_yaml('otherID')}
modifiers, status = self.run_builder(effect_row)
self.assertEqual(status, EffectBuildStatus.error)
self.assertEqual(len(modifiers), 0)
self.assert_log_entries(1)
| pyfa-org/eos | tests/mod_builder/modinfo/affectee_filter/test_affectee_dom_grp.py | Python | lgpl-3.0 | 5,426 |
from __future__ import annotations
from typing import (
Any,
Awaitable, Callable, Iterable,
AsyncIterator,
Tuple,
Mapping,
)
from aiohttp import web
import aiohttp_cors
WebRequestHandler = Callable[
[web.Request],
Awaitable[web.StreamResponse]
]
WebMiddleware = Callable[
[web.Request, WebRequestHandler],
Awaitable[web.StreamResponse]
]
CORSOptions = Mapping[str, aiohttp_cors.ResourceOptions]
AppCreator = Callable[
[CORSOptions],
Tuple[web.Application, Iterable[WebMiddleware]]
]
PluginAppCreator = Callable[
[Mapping[str, Any], CORSOptions],
Tuple[web.Application, Iterable[WebMiddleware]]
]
CleanupContext = Callable[[web.Application], AsyncIterator[None]]
| lablup/sorna-manager | src/ai/backend/gateway/types.py | Python | lgpl-3.0 | 720 |
# Copyright (c) 2013 Alon Swartz <alon@turnkeylinux.org>
#
# This file is part of OctoHub.
#
# OctoHub is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
import simplejson as json
class ResponseError(Exception):
"""Accessible attributes: error
error (AttrDict): Parsed error response
"""
def __init__(self, error):
Exception.__init__(self, error)
self.error = error
def __str__(self):
return json.dumps(self.error, indent=1)
class OctoHubError(Exception):
pass
| bmya/odoo-support | adhoc_modules_server/octohub/exceptions.py | Python | lgpl-3.0 | 701 |
#!/usr/bin/python
#===============================================================================
#
# conversion script to create a mbstestlib readable file containing test specifications
# out of an testset file in XML format
#
#===============================================================================
# Input can be given via optional command line parameters.
#
#
# TODO: add check for joint count
# TODO: add model description to output (as comment)
import sys # for io
import xml.dom.minidom # for xml parsing
from glob import glob # for expanding wildcards in cmd line arguements
class _config:
default_input_file = 'testset-example.xml'
output_file_ext = '.txt'
empty_vecn = ""
zero_vec = "0 0 0"
unity_mat = "1 0 0 0 1 0 0 0 1"
case_defaults = { 'delta': "0.001",
'base_r': zero_vec,
'base_R': unity_mat,
'base_v': zero_vec,
'base_omega': zero_vec,
'base_vdot': zero_vec,
'base_omegadot': zero_vec,
'gravitiy': zero_vec,
'joints_q': empty_vecn,
'joints_qdot': empty_vecn,
'joints_qdotdot': empty_vecn,
'joints_tau': empty_vecn,
'tcp_r': zero_vec,
'tcp_R': unity_mat,
'tcp_v': zero_vec,
'tcp_omega': zero_vec,
'tcp_vdot': zero_vec,
'tcp_omegadot': zero_vec,
'f_ext': zero_vec,
'n_ext': zero_vec
}
case_output_order = [
'delta',
'base_r',
'base_R',
'base_v',
'base_omega',
'base_vdot',
'base_omegadot',
'gravitiy',
'joints_q',
'joints_qdot',
'joints_qdotdot',
'joints_tau',
'tcp_r',
'tcp_R',
'tcp_v',
'tcp_omega',
'tcp_vdot',
'tcp_omegadot',
'f_ext',
'n_ext'
]
class _state:
error_occured_while_processing_xml = False
input_file = ''
def getText(nodelist):
# str(method.childNodes[0].nodeValue) # TODO: remove
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc)
# inspired by http://code.activestate.com/recipes/52306-to-sort-a-dictionary/
def sortedDict(adict):
return [ adict[k] for k in sorted(adict.keys()) ]
# parses a specific node and either stores it's value in a dict or the default value
# may set the error bit
def parse_opt(nodename, valuetype, current_case, current_case_value_dict):
# if the node does not exist use the default value
nodelist = current_case.getElementsByTagName(nodename)
if nodelist.length == 0:
current_case_value_dict.update({nodename : _config.case_defaults.get(nodename)})
elif nodelist.length > 1:
_state.error_occured_while_processing_xml = True
print("'" + nodename + "' defined more than once.")
return
else:
# we have one single node to parse
node = nodelist[0]
value = node.getAttribute(valuetype)
if value == None:
# TODO: more advanced checks with regexp
_state.error_occured_while_processing_xml = True
print("'" + nodename + "' has an empty value or wrong type ('"+ valuetype +"').")
return
else :
current_case_value_dict.update({nodename : value})
return
def convert_xml_testset_2_raw_testset(mbs_test_set):
raw_testsets = dict([]) # filename:content dict
for mbs in mbs_test_set.getElementsByTagName('mbs'): # for every file
file = mbs.getAttribute('file')
raw_testset = []
if mbs.getElementsByTagName('model').length != 1:
_state.error_occured_while_processing_xml = True
print("Only one model allowed per file!")
return dict([])
# extract model
raw_testset.append("% " + mbs.getElementsByTagName('model')[0].getAttribute('desc'))
raw_testset.append(getText(mbs.getElementsByTagName('model')[0].childNodes))
# insert separation marker
raw_testset.append("\nendmodel")
# now process the cases
if mbs.getElementsByTagName('case').length == 0:
_state.error_occured_while_processing_xml = True
print("No cases defined!")
return dict([])
cases = dict([])
for case in mbs.getElementsByTagName('case'):
# TODO: sanity check -> number collisions
# parse case
case_nr = case.getAttribute('nr')
case_desc = case.getAttribute('desc')
case_value_dict = dict([])
# everything but joints does not have to be defined explicitly
# TODO: unify these calls in a generic way (e.g. add type to case_output_order and iterate over it)
parse_opt('delta', 'scalar', case, case_value_dict)
parse_opt('base_r', 'vector3', case, case_value_dict)
parse_opt('base_R', 'matrix3x3', case, case_value_dict)
parse_opt('base_v', 'vector3', case, case_value_dict)
parse_opt('base_omega', 'vector3', case, case_value_dict)
parse_opt('base_vdot', 'vector3', case, case_value_dict)
parse_opt('base_omegadot', 'vector3', case, case_value_dict)
parse_opt('gravitiy', 'vector3', case, case_value_dict)
# TODO: checks with n (the number of joints)
parse_opt('joints_q', 'vector_n', case, case_value_dict)
parse_opt('joints_qdot', 'vector_n', case, case_value_dict)
parse_opt('joints_qdotdot', 'vector_n', case, case_value_dict)
parse_opt('joints_tau', 'vector_n', case, case_value_dict)
parse_opt('tcp_r', 'vector3', case, case_value_dict)
parse_opt('tcp_R', 'matrix3x3', case, case_value_dict)
parse_opt('tcp_v', 'vector3', case, case_value_dict)
parse_opt('tcp_omega', 'vector3', case, case_value_dict)
parse_opt('tcp_vdot', 'vector3', case, case_value_dict)
parse_opt('tcp_omegadot', 'vector3', case, case_value_dict)
parse_opt('f_ext', 'vector3', case, case_value_dict)
parse_opt('n_ext', 'vector3', case, case_value_dict)
if _state.error_occured_while_processing_xml: return dict([])
# compile raw case output
case_content = ["\n" + case_desc]
for value_name in _config.case_output_order:
if case_value_dict.get(value_name) is None :
_state.error_occured_while_processing_xml = True
print("Not all values defined in one testcase!")
return dict([])
case_content.append(case_value_dict.get(value_name))
cases.update({case_nr : "\n".join(case_content)})
# flatten cases (and sort)
raw_testset.append("\n".join(sortedDict(cases)))
# update file:testset dict
raw_testsets.update({file : "\n".join(raw_testset)})
# return the dict of files:testsets
return raw_testsets
#===============================================================================
# process command line arguments (i.e. file i/o)
#===============================================================================
script_name = sys.argv[0][sys.argv[0].rfind("\\")+1:]
if len(sys.argv) == 1:
_state.input_file = _config.default_input_file
print("No command line arguments were given. Defaulting to:")
print("Input '" + _state.input_file + "'")
print("Usage hint: " + script_name + " [INPUTFILE(s)]\n")
elif len(sys.argv) == 2:
if sys.argv[1] == "--help":
print("Usage: " + script_name + " [INPUTFILE(s)]")
sys.exit()
else:
_state.input_file = glob(sys.argv[1])
#===============================================================================
# run the conversion
#===============================================================================
for inputfile in _state.input_file :
xmldom = xml.dom.minidom.parse(inputfile)
raw_testsets = convert_xml_testset_2_raw_testset(xmldom.firstChild)
if not _state.error_occured_while_processing_xml :
for k in raw_testsets.keys():
with open(k, 'w') as raw_testset_file:
raw_testset_file.write(raw_testsets.get(k))
print("File '" + k + "' written.")
#===============================================================================
# concluding housekeeping
#===============================================================================
if not _state.error_occured_while_processing_xml:
print("Conversion successful.")
else:
print("The xml file could not be processed properly. It most likely contains errors.")
sys.exit(_state.error_occured_while_processing_xml)
| SIM-TU-Darmstadt/mbslib | dependencies/mbstestlib/src/testsetXML2intermediateConverter.py | Python | lgpl-3.0 | 9,479 |
#!/usr/bin/env python3
"""tbar - Terminal Bar
Number to bar in terminal.
"""
__version__ = "0.1"
import sys
from tbar.tbar import TBar
from tbar.reader import Reader
def main(infile, comment, sep, field, regexp,
max, length, vertical):
infile = infile or sys.stdin
r = Reader(infile=infile, comment=comment, sep=sep, field=field,
regexp=regexp)
b = TBar(_max=max, length=length, vertical=vertical)
b.add_data_itr(r.data)
s = str(b)
if s:
print(s)
else:
print("No data.")
return
| 10sr/tbar | tbar/__init__.py | Python | unlicense | 557 |