code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
import unittest
from mock import MagicMock
import sys
sys.path.insert(0, "..")
from pysmear.smear_utils import SmearUtils as utils
from pysmear.card_counting import CardCounting
import pydealer
class TestSafeToPlay(unittest.TestCase):
def setUp(self):
self.cc = CardCounting(num_players = 4)
self.trump = "Spades"
self.current_trick = MagicMock()
self.current_trick.trump = self.trump
self.current_trick.lead_suit = "Trump"
self.jack_hearts = pydealer.Card(value="Jack", suit="Hearts")
self.jack_spades = pydealer.Card(value="Jack", suit="Spades")
self.jack_diamonds = pydealer.Card(value="Jack", suit="Diamonds")
self.jack_clubs = pydealer.Card(value="Jack", suit="Clubs")
self.ace_spades = pydealer.Card("Ace", "Spades")
self.king_spades = pydealer.Card("King", "Spades")
self.queen_spades = pydealer.Card("Queen", "Spades")
self.ten_spades = pydealer.Card("10", "Spades")
self.two_diamonds = pydealer.Card("2", "Diamonds")
self.queen_hearts = pydealer.Card("Queen", "Hearts")
self.seven_clubs = pydealer.Card("7", "Clubs")
self.two_spades = pydealer.Card("2", "Spades")
self.cards = [self.ace_spades, self.two_diamonds, self.queen_hearts,
self.seven_clubs, self.jack_clubs, self.two_spades]
self.current_trick.cards = self.cards
def test_jick_is_safe_after_a_k_q_j(self):
self.cc.cards_played["Trump"].append(self.ace_spades)
self.cc.cards_played["Trump"].append(self.king_spades)
self.cc.cards_played["Trump"].append(self.queen_spades)
self.cc.cards_played["Trump"].append(self.jack_spades)
self.current_trick.cards = [ self.two_diamonds ]
safe = self.cc.safe_to_play(0, self.jack_clubs, self.current_trick, [])
self.assertEqual(safe, True)
def test_two_is_safe_after_everyone_is_out_of_trump(self):
self.cc.card_was_played(0, self.ace_spades, self.current_trick)
self.cc.card_was_played(1, self.two_diamonds, self.current_trick)
self.cc.card_was_played(2, self.queen_hearts, self.current_trick)
self.cc.card_was_played(3, self.seven_clubs, self.current_trick)
self.current_trick.cards = []
safe = self.cc.safe_to_play(0, self.two_spades, self.current_trick, [])
self.assertEqual(safe, True)
def test_jick_is_safe_if_only_player_behind_me_is_out(self):
self.cc.player_out_of_cards[1]["Trump"] = True
self.current_trick.cards = [ self.two_diamonds, self.seven_clubs ]
safe = self.cc.safe_to_play(0, self.jack_clubs, self.current_trick, [])
self.assertEqual(safe, True)
class TestHighestCardStillOut(unittest.TestCase):
def setUp(self):
self.cc = CardCounting(num_players = 4)
self.trump = "Spades"
self.current_trick = MagicMock()
self.current_trick.trump = self.trump
self.current_trick.lead_suit = self.trump
self.jack_hearts = pydealer.Card(value="Jack", suit="Hearts")
self.jack_spades = pydealer.Card(value="Jack", suit="Spades")
self.jack_diamonds = pydealer.Card(value="Jack", suit="Diamonds")
self.jack_clubs = pydealer.Card(value="Jack", suit="Clubs")
self.ace_spades = pydealer.Card("Ace", "Spades")
self.king_spades = pydealer.Card("King", "Spades")
self.queen_spades = pydealer.Card("Queen", "Spades")
self.ten_spades = pydealer.Card("10", "Spades")
self.two_diamonds = pydealer.Card("2", "Diamonds")
self.queen_hearts = pydealer.Card("Queen", "Hearts")
self.seven_clubs = pydealer.Card("7", "Clubs")
self.two_spades = pydealer.Card("2", "Spades")
self.cards = [self.ace_spades, self.two_diamonds, self.queen_hearts,
self.seven_clubs, self.jack_clubs, self.two_spades]
self.current_trick.cards = self.cards
def test_jick_is_highest_after_a_k_q_j(self):
self.cc.cards_played["Trump"].append(self.ace_spades)
self.cc.cards_played["Trump"].append(self.king_spades)
self.cc.cards_played["Trump"].append(self.queen_spades)
self.cc.cards_played["Trump"].append(self.jack_spades)
card = self.cc.highest_card_still_out("Spades", True)
self.assertEqual(card, self.jack_clubs)
def test_10_is_highest_after_a_k_q_j_j(self):
self.cc.cards_played["Trump"].append(self.ace_spades)
self.cc.cards_played["Trump"].append(self.king_spades)
self.cc.cards_played["Trump"].append(self.queen_spades)
self.cc.cards_played["Trump"].append(self.jack_spades)
self.cc.cards_played["Trump"].append(self.jack_clubs)
card = self.cc.highest_card_still_out("Spades", True)
self.assertEqual(card, self.ten_spades)
def test_ace_is_highest(self):
card = self.cc.highest_card_still_out("Spades", True)
self.assertEqual(card, self.ace_spades)
def test_ace_is_still_highest_after_played_if_ignored(self):
card = self.cc.highest_card_still_out("Spades", True, self.ace_spades)
self.assertEqual(card, self.ace_spades)
|
mkokotovich/pysmear
|
tests/test_card_counting.py
|
Python
|
gpl-3.0
| 5,165
|
import itertools
import logging
import urllib.parse
from datetime import timedelta, datetime, date
import requests
from django.conf import settings
from django.contrib import messages
from django.core.exceptions import PermissionDenied
from django.utils.translation import gettext_lazy as _
from django.views.generic import FormView
from zds.tutorialv2.forms import ContentCompareStatsURLForm
from zds.tutorialv2.mixins import SingleOnlineContentDetailViewMixin
from zds.tutorialv2.utils import NamedUrl
class ContentStatisticsView(SingleOnlineContentDetailViewMixin, FormView):
template_name = "tutorialv2/stats/index.html"
form_class = ContentCompareStatsURLForm
urls = []
matomo_token_auth = settings.ZDS_APP["site"]["matomo_token_auth"]
matomo_api_url = "{}/index.php?token_auth={}".format(settings.ZDS_APP["site"]["matomo_url"], matomo_token_auth)
matomo_site_id = settings.ZDS_APP["site"]["matomo_site_id"]
logger = logging.getLogger(__name__)
def post(self, request, *args, **kwargs):
self.public_content_object = self.get_public_object()
self.object = self.get_object()
self.versioned_object = self.get_versioned_object()
return super().post(request, *args, **kwargs)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs["urls"] = [(named_url.url, named_url.name) for named_url in self.get_urls_to_render()]
return kwargs
def form_valid(self, form):
self.urls = form.cleaned_data["urls"]
return super().get(self.request)
def get_urls_to_render(self):
all_named_urls = self.get_content_urls()
base_list = self.request.GET.getlist("urls", None) or self.urls
if base_list:
return [named_url for named_url in all_named_urls if named_url.url in base_list]
else:
return all_named_urls
def get_content_urls(self):
content = self.versioned_object
urls = [NamedUrl(content.title, content.get_absolute_url_online(), 0)]
if content.has_extracts():
return urls
for child in content.children:
urls.append(NamedUrl(child.title, child.get_absolute_url_online(), 1))
if not child.has_extracts():
for subchild in child.children:
urls.append(NamedUrl(subchild.title, subchild.get_absolute_url_online(), 2))
return urls
def get_all_statistics(self, urls, start, end, methods):
date_ranges = "{},{}".format(start.strftime("%Y-%m-%d"), end.strftime("%Y-%m-%d"))
data_request = {"module": "API", "method": "API.getBulkRequest", "format": "json", "filter_limit": -1}
data_structured = {}
for method in methods:
data_structured[method] = []
for index, method_url in enumerate(itertools.product(methods, urls)):
method = method_url[0]
url = method_url[1]
absolute_url = f"{self.request.scheme}://{self.request.get_host()}{url.url}"
param_url = f"pageUrl=={urllib.parse.quote_plus(absolute_url)}"
request_params = {"method": method, "idSite": self.matomo_site_id, "date": date_ranges, "period": "day"}
if method.startswith("Referrers"): # referrers requests use segment for define url
request_params["segment"] = ",".join([param_url])
elif method == "Actions.getPageUrl":
request_params["pageUrl"] = absolute_url
data_request.update({f"urls[{index}]": urllib.parse.urlencode(request_params)})
try:
response_matomo = requests.post(url=self.matomo_api_url, data=data_request)
data = response_matomo.json()
if isinstance(data, dict) and data.get("result", "") == "error":
data = {}
self.logger.error(data.get("message", "Something failed with Matomo reporting system."))
messages.error(
self.request, data.get("message", _(f"Impossible de récupérer les statistiques du site."))
)
for index, method_url in enumerate(itertools.product(methods, urls)):
method = method_url[0]
data_structured[method].append(data[index])
return data_structured
except Exception:
data = {}
self.logger.exception(f"Something failed with Matomo reporting system.")
messages.error(self.request, _(f"Impossible de récupérer les statistiques du site."))
return data
@staticmethod
def get_stat_metrics(data, metric_name):
x = []
y = []
for key, val in data.items():
x.append(key)
if len(val) == 0:
y.append(0)
else:
y.append(val[0].get(metric_name, 0))
return (x, y)
@staticmethod
def get_ref_metrics(data):
refs = {}
for key, val in data.items():
for item in val:
if item["label"] in refs:
refs[item["label"]] += item["nb_visits"]
else:
refs[item["label"]] = item["nb_visits"]
return refs
def get_start_and_end_dates(self):
end_date = self.request.GET.get("end_date", None)
try:
end_date = datetime.strptime(end_date, "%Y-%m-%d").date()
except TypeError:
end_date = date.today()
except ValueError:
end_date = date.today()
messages.error(self.request, _("La date de fin fournie est invalide."))
start_date = self.request.GET.get("start_date", None)
try:
start_date = datetime.strptime(start_date, "%Y-%m-%d").date()
except TypeError:
start_date = end_date - timedelta(days=7)
except ValueError:
start_date = end_date - timedelta(days=7)
messages.error(self.request, _("La date de début fournie est invalide."))
if start_date > end_date:
end_date, start_date = start_date, end_date
return start_date, end_date
def get_display_mode(self, urls):
# TODO make display_mode an enum ?
# Good idea, but not straightforward for the template integration
if len(urls) == 1:
return "details"
if len(urls) == len(self.get_content_urls()):
return "global"
return "comparison"
@staticmethod
def get_cumulative(stats):
cumul = {"total": 0}
for info_date, infos_stat in stats.items():
cumul["total"] += len(infos_stat)
for info_stat in infos_stat:
for key, val in info_stat.items():
if type(val) == str:
continue
if key in cumul:
cumul[key] += int(val)
else:
cumul[key] = int(val)
return cumul
@staticmethod
def merge_ref_to_data(metrics, refs):
for key, item in refs.items():
if key in metrics:
metrics[key] += item
else:
metrics[key] = item
return metrics
@staticmethod
def merge_report_to_global(reports, fields):
metrics = {}
for key, item in reports.items():
for field, is_avg in fields:
if field in metrics:
metrics[field] = (
metrics[field][0],
[i + j for (i, j) in zip(metrics[field][1], item.get(field)[1])],
)
else:
metrics[field] = item.get(field)
return metrics
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
if not (self.is_author or self.is_staff):
raise PermissionDenied
urls = self.get_urls_to_render()
start_date, end_date = self.get_start_and_end_dates()
display_mode = self.get_display_mode(urls)
reports = {}
cumulative_stats = {}
referrers = {}
type_referrers = {}
keywords = {}
report_field = [("nb_uniq_visitors", False), ("nb_hits", False), ("avg_time_on_page", True)]
# Each function sends only one bulk request for all the urls
# Each variable is a list of dictionnaries (one for each url)
all = self.get_all_statistics(
urls,
start_date,
end_date,
["Referrers.getReferrerType", "Referrers.getWebsites", "Referrers.getKeywords", "Actions.getPageUrl"],
)
all_stats = all["Actions.getPageUrl"]
all_ref_websites = all["Referrers.getWebsites"]
all_ref_types = all["Referrers.getReferrerType"]
all_ref_keyword = all["Referrers.getKeywords"]
for index, url in enumerate(urls):
cumul_stats = self.get_cumulative(all_stats[index])
reports[url] = {}
cumulative_stats[url] = {}
for item, is_avg in report_field:
reports[url][item] = self.get_stat_metrics(all_stats[index], item)
if is_avg:
cumulative_stats[url][item] = 0
if cumul_stats.get("total") > 0:
cumulative_stats[url][item] = cumul_stats.get(item, 0) / cumul_stats.get("total")
else:
cumulative_stats[url][item] = cumul_stats.get(item, 0)
referrers = self.merge_ref_to_data(referrers, self.get_ref_metrics(all_ref_websites[index]))
type_referrers = self.merge_ref_to_data(type_referrers, self.get_ref_metrics(all_ref_types[index]))
keywords = self.merge_ref_to_data(keywords, self.get_ref_metrics(all_ref_keyword[index]))
if display_mode.lower() == "global":
reports = {NamedUrl(display_mode, "", 0): self.merge_report_to_global(reports, report_field)}
context.update(
{
"display": display_mode,
"urls": urls,
"reports": reports,
"cumulative_stats": cumulative_stats,
"referrers": referrers,
"type_referrers": type_referrers,
"keywords": keywords,
}
)
return context
|
ChantyTaguan/zds-site
|
zds/tutorialv2/views/statistics.py
|
Python
|
gpl-3.0
| 10,353
|
from twisted.internet import defer
from moira.api.request import delayed
from moira.api.resources.redis import RedisResource
class Login(RedisResource):
def __init__(self, db):
RedisResource.__init__(self, db)
self.putChild("settings", Settings(db))
@delayed
def render_GET(self, request):
login = request.login
self.write_json(request, {'login': login})
class Settings(RedisResource):
def __init__(self, db):
RedisResource.__init__(self, db)
@delayed
@defer.inlineCallbacks
def render_GET(self, request):
login = request.login
settings = {"login": login,
"subscriptions": [],
"contacts": []}
subs = yield self.db.getUserSubscriptions(login)
contacts = yield self.db.getUserContacts(login)
yield self.db.join(contacts, self.db.getContact, settings["contacts"])
yield self.db.join(subs, self.db.getSubscription, settings["subscriptions"])
self.write_json(request, settings)
|
moira-alert/worker
|
moira/api/resources/user.py
|
Python
|
gpl-3.0
| 1,046
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
'''Pychemqt, Chemical Engineering Process simulator
Copyright (C) 2009-2017, Juan José Gómez Romera <jjgomera@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.'''
from unittest import TestCase
from lib import unidades
from lib.meos import MEoS
class R152a(MEoS):
"""Multiparameter equation of state for R152a"""
name = "1,1-difluoroethane"
CASNumber = "75-37-6"
formula = "CHF2CH3"
synonym = "R152a"
_refPropName = "R152A"
_coolPropName = "R152A"
rhoc = unidades.Density(368.)
Tc = unidades.Temperature(386.411)
Pc = unidades.Pressure(4516.75, "kPa")
M = 66.051 # g/mol
Tt = unidades.Temperature(154.56)
Tb = unidades.Temperature(249.127)
f_acent = 0.27521
momentoDipolar = unidades.DipoleMoment(2.262, "Debye")
id = 245
CP1 = {
# Cp/R relation in paper
# Tr terms in polynomial, so the resulting terms are:
# a0 = c0
# a1 = c1/Tc
# a2 = c2/Tc**2
# a3 = c3/Tc**3
"ao": 3.354951,
"an": [4.245301/Tc, 3.735248/Tc**2, -1.608254/Tc**3],
"pow": [1, 2, 3]}
Fi1 = {"R": 8.314471,
"ao_log": [1, -1],
"pow": [0, 1, -0.25, -2, -4],
"ao_pow": [10.87227, 6.839515, -20.78887, -0.6539092, 0.03342831]}
Fi2 = {"ao_log": [1, -1],
"pow": [0, 1, -0.5, 0.25],
"ao_pow": [-9.508135074, 6.812068779, -7.285916044, 6.741130104],
"ao_exp": [1.978152028, 5.880826311],
"titao": [1.753741145, 4.360150337]}
Fi3 = {"ao_log": [1, 0.0434935],
"pow": [0, 1, -1, -2, -3],
"ao_pow": [-5.969835, 7.421932, -5.56713, 0.436101, -0.0196281]}
MBWR = {
"__type__": "MBWR",
"__name__": "MBWR equation of state for R-152a of Outcalt (1996)",
"__doi__": {"autor": "Outcalt, S.L., McLinden, M.O.",
"title": "A modified Benedict-Webb-Rubin Equation of "
"State for the Thermodynamic Properties of R152a "
"(1,1-difluoroethane)",
"ref": "J. Phys. Chem. Ref. Data 25(2) (1996) 605-636",
"doi": "10.1063/1.555979"},
"R": 8.314471,
"Tc": 386.411, "Pc": 4516.75, "rhoc": 5.57145,
"cp": CP1,
"ref": "IIR",
"Tmin": Tt, "Tmax": 520.0, "Pmax": 60000.0, "rhomax": 18.07,
"b": [None, -0.101623317192e-1, 0.215677129618e1, -0.648581254334e2,
0.122535596303e5, -0.206805988259e7, -0.379836507323e-3,
-0.441333232984, 0.158248874708e3, 0.564062216256e6,
-0.124115350431e-3, 0.494972178825, -0.208058039834e3,
-0.131403187106e-1, 0.212083848812, -0.151263785082e3,
0.311108025395e-1, -0.115280979645e-2, 0.437040025765,
-0.965596535032e-2, -0.242705525346e6, -0.518042519989e8,
-0.119070545681e5, 0.459333195257e9, -0.719317286511e2,
-0.840102861460e4, -0.102910957390e1, -0.325913880841e5,
-0.412362182230e-2, 0.175102808144e1, -0.198636624640e-4,
-0.421363036104e-2, -0.198696760653e1]}
outcalt = {
"__type__": "Helmholtz",
"__name__": "Helmholtz transform of MBWR EOS for R-152a of Outcalt "
"and McLinden (1996).",
"__doi__": {"autor": "Outcalt, S.L., McLinden, M.O.",
"title": "A modified Benedict-Webb-Rubin Equation of "
"State for the Thermodynamic Properties of R152a "
"(1,1-difluoroethane)",
"ref": "J. Phys. Chem. Ref. Data 25(2) (1996) 605-636",
"doi": "10.1063/1.555979"},
"R": 8.314471,
"cp": CP1,
"ref": "IIR",
"Tmin": Tt, "Tmax": 500.0, "Pmax": 60000.0, "rhomax": 18.07,
"nr1": [-0.354657949982e1, -0.364631280620, 0.333233335558e-1,
-0.6809684351170, 0.735212646801e1, -0.112473063838e2,
0.549916715657e1, -0.240186327322e1, -0.709036447042e-1,
-0.213200886814, 0.197839736368, 0.182494769909e1,
-0.860546479693e-1, 0.888137366540, -0.966127346370,
-0.985223479324e-1, 0.183419368472e-1, -0.338550204252e-1,
0.124921101016e-1, -0.221056706423e-2, 0.216879133161e-2,
-0.233597690478e-3],
"d1": [0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 5, 5, 6, 7, 7,
8],
"t1": [3, 4, 5, 0, 0.5, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 1, 2, 3, 2, 2, 3,
3],
"nr2": [0.354657949982e1, 0.364631280620, -0.333233335558e-1,
0.276133830254e1, -0.691185711880e-1, -0.333233335558e-1,
0.782761327717, -0.345592855940e-1, 0.137813531906,
0.186173126153, -0.341119393297e-1, 0.459378439687e-1,
0.216470012607e-1, -0.852798483242e-2, 0.620394038634e-2,
0.185210290813e-2, 0.101674662734e-2, 0.124078807727e-2],
"d2": [0, 0, 0, 2, 2, 2, 4, 4, 4, 6, 6, 6, 8, 8, 8, 10, 10, 10],
"t2": [3, 4, 5, 3, 4, 5, 3, 4, 5, 3, 4, 5, 3, 4, 5, 3, 4, 5],
"c2": [2]*18,
"gamma2": [1]*18}
kim = {
"__type__": "Helmholtz",
"__name__": "Helmholtz equation of state for R-152a of Kim (1997).",
"__doi__": {"autor": "Kim, Y., Borgnakke, C., Sonntag, R.E.",
"title": "Equation of State for 1,1-difluoroethane "
" (R152a)",
"ref": "International Journal of Energy Research 21 (7)"
"(1997) 575-589",
"doi": "10.1002/(sici)1099-114x(19970610)21:7<575::"
"aid-er272>3.0.co;2-f"},
"R": 8.314471,
"cp": Fi3,
"ref": "IIR",
"Tc": 386.4, "Pc": 4519., "rhoc": 368/M,
"Tmin": 213, "Tmax": 433, "Pmax": 20000.0, "rhomax": 18.07,
"nr1": [3.27282477979913, -5.25887189160385, 0.849951067158520,
-0.326056649432851, 0.147973856820353, 0.463200609308586e-2],
"d1": [1, 1, 1, 1, 2, 5],
"t1": [1, 1.5, 3, 5, 0.5, 1],
"nr2": [-0.184693035421790e-1, -0.529265795606284, 1.39788588805247,
-0.826528289800619, 0.603238985406408, 0.184020254678691e-9,
0.198000633690890e-1, 0.385227997762326e-1,
-0.354915684935072e-1, -0.146266261800962e-3,
0.385244461907819e-4, -0.930695615881793e-7,
0.792443305748410e-2, -0.117773096693244e-1,
0.780856661432880e-2, -0.335895387327679e-2,
-0.905744836093298e-4, 0.348630546773750e-3,
0.167579895771929e-1, -0.159255383659542e-1],
"d2": [1.9, 2.2, 2.2, 2.5, 3.0, 3.3, 4.4, 4.9, 5.3, 6.6, 9.7, 13.1,
4.6, 4.0, 5.2, 5.3, 13.3, 11.9, 4.1, 4.1],
"t2": [8.0, 1.4, 3.1, 5.0, 5.5, 25.5, 5.2, 3.3, 3.5, 6.7, 5.3, 5.7,
6.4, 30.0, 28.4, 8.2, 5.9, 20.3, 27.1, 29.3],
"c2": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 3, 3, 3, 3, 5, 6, 6],
"gamma2": [1]*20}
tillner = {
"__type__": "Helmholtz",
"__name__": "Helmholtz equation of state for R-152a of Tillner-Roth "
"(1995).",
"__doi__": {"autor": "Tillner-Roth, R.",
"title": "A Fundamental Equation of State for "
"1,1-Difluoroethane (HFC-152a)",
"ref": "Int. J. Thermophys., 16(1) (1995) 91-100",
"doi": "10.1007/BF01438960"},
"R": 8.314471,
"M": 66.051, "Tc": 386.41,
"cp": Fi1,
"ref": "IIR",
"Tmin": Tt, "Tmax": 435.0, "Pmax": 30000.0, "rhomax": 18.03,
"nr1": [0.3552260, -0.1425660e1, -0.4631621e-1, 0.6903546e-1,
0.1975710e-1, 0.7486977e-3, 0.4642204e-3],
"d1": [1, 1, 1, 1.5, 3, 6, 6],
"t1": [0, 1.5, 3, -0.5, -0.5, -0.5, 1.5],
"nr2": [-0.2603396, -0.7624212e-1, 0.2233522, 0.1992515e-1, 0.3449040,
-0.4963849, 0.1290719, 0.9760790e-3, 0.5066545e-2,
-0.1402020e-1, 0.5169918e-2, 0.2679087e-3],
"d2": [1, 1, 3, 4, 1, 1, 1, 8, 2, 3, 5, 6],
"t2": [3, 4, 3, 2, 4, 5, 6, 5, 12.5, 25, 20, 25],
"c2": [1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3],
"gamma2": [1]*12}
shortSpan = {
"__type__": "Helmholtz",
"__name__": "short Helmholtz equation of state for R-152a of Span and "
"Wagner (2003)",
"__doi__": {"autor": "Span, R., Wagner, W.",
"title": "Equations of State for Technical Applications. "
"III. Results for Polar Fluids",
"ref": "Int. J. Thermophys., 24(1) (2003) 111-162",
"doi": "10.1023/A:1022362231796"},
"R": 8.31451,
"cp": Fi1,
"ref": "IIR",
"Tmin": Tt, "Tmax": 600.0, "Pmax": 100000.0, "rhomax": 18.1,
"nr1": [0.95702326, -2.3707196, 0.18748463, 0.063800843, 1.6625977e-4],
"d1": [1, 1, 1, 3, 7],
"t1": [0.25, 1.25, 1.5, 0.25, 0.875],
"nr2": [0.82208165e-1, 0.57243518, 0.39476701e-2, -0.23848654,
-0.80711618e-1, -0.73103558e-1, -0.15538724e-1],
"d2": [1, 2, 5, 1, 1, 4, 2],
"t2": [2.375, 2, 2.125, 3.5, 6.5, 4.75, 12.5],
"c2": [1, 1, 1, 2, 2, 2, 3],
"gamma2": [1]*12}
astina = {
"__type__": "Helmholtz",
"__name__": "Helmholtz equation of state for R-152a of Astina (2004)",
"__doi__": {"autor": "Astina, I.M., Sato, H.",
"title": "A Rigorous Thermodynamic Property Model for "
"Fluid-Phase 1,1-Difluoroethane (R-152a)",
"ref": "Int. J. Thermophys., 25(6) (2004) 1713-1733",
"doi": "10.1007/s10765-004-7731-8"},
"R": 8.314472,
"cp": Fi2,
"ref": "IIR",
"M": 66.05, "Tc": 386.41, "Pc": 4516, "rhoc": 368/66.05,
"Tmin": Tt, "Tmax": 450.0, "Pmax": 60000.0, "rhomax": 18.04,
"nr1": [1.753847317, -4.049760759, -2.277389257e-1, 7.087751950e-1,
-0.5528619502, -3.025046686e-2, 0.1396289974, 1.121238954e-4],
"d1": [1, 1, 1, 2, 2, 3, 3, 4],
"t1": [0.5, 1.125, 2.875, 0.875, 1.875, 0.5, 1.875, 4],
"nr2": [1.181005890, 1.535785579, 7.468363045e-1, -1.252266405e-1,
-3.898223986e-2, -7.260588801e-2, -2.659302250e-3,
4.210849329e-3, 2.015953966e-4],
"d2": [1, 2, 3, 1, 2, 3, 3, 4, 5],
"t2": [1.25, 2, 2.75, 6, 9, 6, 22, 20, 32],
"c2": [1, 1, 1, 2, 2, 2, 3, 3, 3],
"gamma2": [1]*9}
eq = MBWR, outcalt, kim, tillner, shortSpan, astina
_PR = [0.1115, -23.1484]
_surface = {"sigma": [0.05808], "exp": [1.2115]}
_vapor_Pressure = {
"eq": 3,
"n": [-0.74821e1, 0.21105e1, -0.20761e1, -0.35539e1, 0.58004],
"t": [1.0, 1.5, 2.2, 4.8, 6.2]}
_liquid_Density = {
"eq": 1,
"n": [0.19914e2, -0.68624e2, 0.99821e2, -0.77984e2, 0.29913e2],
"t": [0.56, 0.76, 0.95, 1.2, 1.4]}
_vapor_Density = {
"eq": 2,
"n": [-.33621e1, -.85985e1, -.2683e1, -.2414e2, -.43159e2, -.28045e2],
"t": [0.406, 1.42, 3.6, 3.9, 8.0, 9.0]}
visco0 = {"__name__": "Krauss (1996)",
"__doi__": {
"autor": "Krauss, R., Weiss, V.C., Edison, T.A., Sengers, "
"J.V., Stephan, K.",
"title": "Transport Properties of 1,1-Difluoroethane "
"(R152a)",
"ref": "Int. J. Thermophysics 17:731-757, 1996.",
"doi": "10.1007/BF01439187"},
"eq": 1, "omega": 1,
"M": 66.05, "ek": 354.84, "sigma": 0.46115,
"n_chapman": 0.2169614/M**0.5,
"collision": [0.4425728, -0.5138403, 0.1547566, -0.02821844,
0.001578286],
"rhoref_res": 368, "muref_res": 51.12,
"nr": [-0.139986563, -0.0737927, 0.517924, -0.308875, 0.108049],
"tr": [0, 0, 0, 0, 0],
"dr": [0, 1, 2, 3, 4],
"nr_num": [-0.408387],
"tr_num": [0],
"dr_num": [0],
"nr_den": [1, -2.91733],
"tr_den": [0, 0],
"dr_den": [1, 0]}
_viscosity = visco0,
thermo0 = {"__name__": "Krauss (1996)",
"__doi__": {
"autor": "Krauss, R., Weiss, V.C., Edison, T.A., Sengers, "
"J.V., Stephan, K.",
"title": "Transport Properties of 1,1-Difluoroethane "
"(R152a)",
"ref": "Int. J. Thermophysics 17:731-757, 1996.",
"doi": "10.1007/BF01439187"},
"eq": 1,
"M": 66.05, "Tc": 386.411, "Pc": 4520,
"Toref": 1., "koref": 1e-3,
"no": [-14.942, 0.0973283],
"to": [0, 1],
"Tref_res": 1., "rhoref_res": 368, "kref_res": 1.155e-3,
"nr": [9.1809, 11.8577, -5.4473, 1.71379],
"tr": [0, 0, 0, 0],
"dr": [1, 2, 3, 4],
"critical": 3,
"gnu": 0.63, "gamma": 1.239, "R0": 1.03, "Xio": 1.894e-10,
"gam0": 0.0487, "qd": 4.37e-10, "Tcref": 579.6165}
_thermal = thermo0,
class Test(TestCase):
def test_Outcalt(self):
# Selected point from Table 6, Pag 616, saturation states
st = R152a(T=-118+273.15, x=0.5)
self.assertEqual(round(st.P.MPa, 6), 0.000069)
self.assertEqual(round(st.Liquido.rho, 1), 1191.8)
self.assertEqual(round(st.Gas.rho, 4), 0.0036)
self.assertEqual(round(st.Liquido.h.kJkg, 2), 14.67)
self.assertEqual(round(st.Gas.h.kJkg, 2), 419.73)
self.assertEqual(round(st.Liquido.s.kJkgK, 4), 0.1176)
self.assertEqual(round(st.Gas.s.kJkgK, 4), 2.7284)
self.assertEqual(round(st.Liquido.cv.kJkgK, 3), 0.998)
self.assertEqual(round(st.Gas.cv.kJkgK, 3), 0.574)
self.assertEqual(round(st.Liquido.cp.kJkgK, 3), 1.480)
self.assertEqual(round(st.Gas.cp.kJkgK, 3), 0.700)
self.assertEqual(round(st.Liquido.w, 1), 1396.1)
self.assertEqual(round(st.Gas.w, 1), 154.3)
st = R152a(T=-100+273.15, x=0.5)
self.assertEqual(round(st.P.MPa, 6), 0.000579)
self.assertEqual(round(st.Liquido.rho, 1), 1158.7)
self.assertEqual(round(st.Gas.rho, 4), 0.0266)
self.assertEqual(round(st.Liquido.h.kJkg, 2), 41.75)
self.assertEqual(round(st.Gas.h.kJkg, 2), 432.59)
self.assertEqual(round(st.Liquido.s.kJkgK, 4), 0.2827)
self.assertEqual(round(st.Gas.s.kJkgK, 4), 2.5399)
self.assertEqual(round(st.Liquido.cv.kJkgK, 3), 1.030)
self.assertEqual(round(st.Gas.cv.kJkgK, 3), 0.613)
self.assertEqual(round(st.Liquido.cp.kJkgK, 3), 1.518)
self.assertEqual(round(st.Gas.cp.kJkgK, 3), 0.740)
self.assertEqual(round(st.Liquido.w, 1), 1274.9)
self.assertEqual(round(st.Gas.w, 1), 162.0)
st = R152a(T=-50+273.15, x=0.5)
self.assertEqual(round(st.P.MPa, 6), 0.027425)
self.assertEqual(round(st.Liquido.rho, 1), 1063.7)
self.assertEqual(round(st.Gas.rho, 4), 0.9936)
self.assertEqual(round(st.Liquido.h.kJkg, 2), 118.62)
self.assertEqual(round(st.Gas.h.kJkg, 2), 470.40)
self.assertEqual(round(st.Liquido.s.kJkgK, 4), 0.6723)
self.assertEqual(round(st.Gas.s.kJkgK, 4), 2.2487)
self.assertEqual(round(st.Liquido.cv.kJkgK, 3), 1.042)
self.assertEqual(round(st.Gas.cv.kJkgK, 3), 0.738)
self.assertEqual(round(st.Liquido.cp.kJkgK, 3), 1.567)
self.assertEqual(round(st.Gas.cp.kJkgK, 3), 0.877)
self.assertEqual(round(st.Liquido.w, 1), 1016.4)
self.assertEqual(round(st.Gas.w, 1), 179.5)
st = R152a(T=273.15, x=0.5)
self.assertEqual(round(st.P.MPa, 6), 0.263992)
self.assertEqual(round(st.Liquido.rho, 1), 959.1)
self.assertEqual(round(st.Gas.rho, 4), 8.3589)
self.assertEqual(round(st.Liquido.h.kJkg, 2), 200.00)
self.assertEqual(round(st.Gas.h.kJkg, 2), 507.11)
self.assertEqual(round(st.Liquido.s.kJkgK, 4), 1.0000)
self.assertEqual(round(st.Gas.s.kJkgK, 4), 2.1243)
self.assertEqual(round(st.Liquido.cv.kJkgK, 3), 1.100)
self.assertEqual(round(st.Gas.cv.kJkgK, 3), 0.898)
self.assertEqual(round(st.Liquido.cp.kJkgK, 3), 1.697)
self.assertEqual(round(st.Gas.cp.kJkgK, 3), 1.094)
self.assertEqual(round(st.Liquido.w, 1), 772.0)
self.assertEqual(round(st.Gas.w, 1), 187.4)
st = R152a(T=50+273.15, x=0.5)
self.assertEqual(round(st.P.MPa, 6), 1.177382)
self.assertEqual(round(st.Liquido.rho, 1), 830.8)
self.assertEqual(round(st.Gas.rho, 4), 37.0576)
self.assertEqual(round(st.Liquido.h.kJkg, 2), 290.50)
self.assertEqual(round(st.Gas.h.kJkg, 2), 535.93)
self.assertEqual(round(st.Liquido.s.kJkgK, 4), 1.3003)
self.assertEqual(round(st.Gas.s.kJkgK, 4), 2.0598)
self.assertEqual(round(st.Liquido.cv.kJkgK, 3), 1.182)
self.assertEqual(round(st.Gas.cv.kJkgK, 3), 1.092)
self.assertEqual(round(st.Liquido.cp.kJkgK, 3), 1.957)
self.assertEqual(round(st.Gas.cp.kJkgK, 3), 1.489)
self.assertEqual(round(st.Liquido.w, 1), 519.9)
self.assertEqual(round(st.Gas.w, 1), 178.9)
st = R152a(T=100+273.15, x=0.5)
self.assertEqual(round(st.P.MPa, 6), 3.505025)
self.assertEqual(round(st.Liquido.rho, 1), 618.5)
self.assertEqual(round(st.Gas.rho, 4), 145.7543)
self.assertEqual(round(st.Liquido.h.kJkg, 2), 403.59)
self.assertEqual(round(st.Gas.h.kJkg, 2), 536.28)
self.assertEqual(round(st.Liquido.s.kJkgK, 4), 1.6151)
self.assertEqual(round(st.Gas.s.kJkgK, 4), 1.9707)
self.assertEqual(round(st.Liquido.cv.kJkgK, 3), 1.322)
self.assertEqual(round(st.Gas.cv.kJkgK, 3), 1.346)
self.assertEqual(round(st.Liquido.cp.kJkgK, 3), 3.495)
self.assertEqual(round(st.Gas.cp.kJkgK, 3), 3.776)
self.assertEqual(round(st.Liquido.w, 1), 233.5)
self.assertEqual(round(st.Gas.w, 1), 143.1)
st = R152a(T=112+273.15, x=0.5)
self.assertEqual(round(st.P.MPa, 6), 4.408087)
self.assertEqual(round(st.Liquido.rho, 1), 473.0)
self.assertEqual(round(st.Gas.rho, 4), 263.7988)
self.assertEqual(round(st.Liquido.h.kJkg, 2), 451.59)
self.assertEqual(round(st.Gas.h.kJkg, 2), 506.12)
self.assertEqual(round(st.Liquido.s.kJkgK, 4), 1.7371)
self.assertEqual(round(st.Gas.s.kJkgK, 4), 1.8787)
self.assertEqual(round(st.Liquido.cv.kJkgK, 3), 1.422)
self.assertEqual(round(st.Gas.cv.kJkgK, 3), 1.458)
self.assertEqual(round(st.Liquido.cp.kJkgK, 3), 22.908)
self.assertEqual(round(st.Gas.cp.kJkgK, 3), 31.032)
self.assertEqual(round(st.Liquido.w, 1), 139.1)
self.assertEqual(round(st.Gas.w, 1), 125.5)
# Selected point of Table 7, pag 618, singhe phase region
st = R152a(T=-100+273.15, P=1e4)
self.assertEqual(round(st.rho, 1), 1158.7)
self.assertEqual(round(st.h.kJkg, 2), 41.76)
self.assertEqual(round(st.s.kJkgK, 4), 0.2827)
self.assertEqual(round(st.cv.kJkgK, 3), 1.030)
self.assertEqual(round(st.cp.kJkgK, 3), 1.518)
self.assertEqual(round(st.w, 1), 1274.9)
st = R152a(T=-50+273.15, P=2e4)
self.assertEqual(round(st.rho, 3), 0.721)
self.assertEqual(round(st.h.kJkg, 2), 470.93)
self.assertEqual(round(st.s.kJkgK, 4), 2.2903)
self.assertEqual(round(st.cv.kJkgK, 3), 0.734)
self.assertEqual(round(st.cp.kJkgK, 3), 0.869)
self.assertEqual(round(st.w, 1), 180.1)
st = R152a(T=240+273.15, P=4e4)
self.assertEqual(round(st.rho, 3), 0.620)
self.assertEqual(round(st.h.kJkg, 2), 813.67)
self.assertEqual(round(st.s.kJkgK, 4), 3.1512)
self.assertEqual(round(st.cv.kJkgK, 3), 1.362)
self.assertEqual(round(st.cp.kJkgK, 3), 1.488)
self.assertEqual(round(st.w, 1), 265.4)
st = R152a(T=-35+273.15, P=6e4)
self.assertEqual(round(st.rho, 3), 2.063)
self.assertEqual(round(st.h.kJkg, 2), 481.84)
self.assertEqual(round(st.s.kJkgK, 4), 2.2021)
self.assertEqual(round(st.cv.kJkgK, 3), 0.782)
self.assertEqual(round(st.cp.kJkgK, 3), 0.931)
self.assertEqual(round(st.w, 1), 183.2)
st = R152a(T=-25+273.15, P=1e5)
self.assertEqual(round(st.rho, 1), 1013.2)
self.assertEqual(round(st.h.kJkg, 2), 158.48)
self.assertEqual(round(st.s.kJkgK, 4), 0.8413)
self.assertEqual(round(st.cv.kJkgK, 3), 1.067)
self.assertEqual(round(st.cp.kJkgK, 3), 1.622)
self.assertEqual(round(st.w, 1), 894.3)
st = R152a(T=50+273.15, P=101325)
self.assertEqual(round(st.rho, 3), 2.531)
self.assertEqual(round(st.h.kJkg, 2), 566.38)
self.assertEqual(round(st.s.kJkgK, 4), 2.4397)
self.assertEqual(round(st.cv.kJkgK, 3), 0.963)
self.assertEqual(round(st.cp.kJkgK, 3), 1.100)
self.assertEqual(round(st.w, 1), 212.1)
st = R152a(T=-10+273.15, P=2e5)
self.assertEqual(round(st.rho, 1), 981.3)
self.assertEqual(round(st.h.kJkg, 2), 183.17)
self.assertEqual(round(st.s.kJkgK, 4), 0.9375)
self.assertEqual(round(st.cv.kJkgK, 3), 1.086)
self.assertEqual(round(st.cp.kJkgK, 3), 1.664)
self.assertEqual(round(st.w, 1), 821.2)
st = R152a(T=-100+273.15, P=4e5)
self.assertEqual(round(st.rho, 1), 1159.0)
self.assertEqual(round(st.h.kJkg, 2), 42.00)
self.assertEqual(round(st.s.kJkgK, 4), 0.2821)
self.assertEqual(round(st.cv.kJkgK, 3), 1.030)
self.assertEqual(round(st.cp.kJkgK, 3), 1.517)
self.assertEqual(round(st.w, 1), 1276.1)
st = R152a(T=25+273.15, P=6e5)
self.assertEqual(round(st.rho, 1), 899.5)
self.assertEqual(round(st.h.kJkg, 2), 243.73)
self.assertEqual(round(st.s.kJkgK, 4), 1.1519)
self.assertEqual(round(st.cv.kJkgK, 3), 1.138)
self.assertEqual(round(st.cp.kJkgK, 3), 1.800)
self.assertEqual(round(st.w, 1), 648.0)
st = R152a(T=240+273.15, P=1e6)
self.assertEqual(round(st.rho, 3), 15.938)
self.assertEqual(round(st.h.kJkg, 2), 806.30)
self.assertEqual(round(st.s.kJkgK, 4), 2.7350)
self.assertEqual(round(st.cv.kJkgK, 3), 1.369)
self.assertEqual(round(st.cp.kJkgK, 3), 1.519)
self.assertEqual(round(st.w, 1), 260.0)
st = R152a(T=70+273.15, P=2e6)
self.assertEqual(round(st.rho, 1), 765.8)
self.assertEqual(round(st.h.kJkg, 2), 331.04)
self.assertEqual(round(st.s.kJkgK, 4), 1.4188)
self.assertEqual(round(st.cv.kJkgK, 3), 1.225)
self.assertEqual(round(st.cp.kJkgK, 3), 2.171)
self.assertEqual(round(st.w, 1), 415.3)
st = R152a(T=100+273.15, P=4e6)
self.assertEqual(round(st.rho, 1), 638.3)
self.assertEqual(round(st.h.kJkg, 2), 400.18)
self.assertEqual(round(st.s.kJkgK, 4), 1.6038)
self.assertEqual(round(st.cv.kJkgK, 3), 1.309)
self.assertEqual(round(st.cp.kJkgK, 3), 3.008)
self.assertEqual(round(st.w, 1), 261.3)
st = R152a(T=273.15, P=6e6)
self.assertEqual(round(st.rho, 1), 973.0)
self.assertEqual(round(st.h.kJkg, 2), 202.26)
self.assertEqual(round(st.s.kJkgK, 4), 0.9865)
self.assertEqual(round(st.cv.kJkgK, 3), 1.102)
self.assertEqual(round(st.cp.kJkgK, 3), 1.667)
self.assertEqual(round(st.w, 1), 816.8)
st = R152a(T=-100+273.15, P=1e7)
self.assertEqual(round(st.rho, 1), 1167.5)
self.assertEqual(round(st.h.kJkg, 2), 48.03)
self.assertEqual(round(st.s.kJkgK, 4), 0.2693)
self.assertEqual(round(st.cv.kJkgK, 3), 1.040)
self.assertEqual(round(st.cp.kJkgK, 3), 1.509)
self.assertEqual(round(st.w, 1), 1306.2)
st = R152a(T=200+273.15, P=2e7)
self.assertEqual(round(st.rho, 1), 541.5)
self.assertEqual(round(st.h.kJkg, 2), 586.81)
self.assertEqual(round(st.s.kJkgK, 4), 1.9821)
self.assertEqual(round(st.cv.kJkgK, 3), 1.396)
self.assertEqual(round(st.cp.kJkgK, 3), 2.167)
self.assertEqual(round(st.w, 1), 327.7)
st = R152a(T=-50+273.15, P=4e7)
self.assertEqual(round(st.rho, 1), 1111.4)
self.assertEqual(round(st.h.kJkg, 2), 142.13)
self.assertEqual(round(st.s.kJkgK, 4), 0.6132)
self.assertEqual(round(st.cv.kJkgK, 3), 1.058)
self.assertEqual(round(st.cp.kJkgK, 3), 1.514)
self.assertEqual(round(st.w, 1), 1200.6)
st = R152a(T=240+273.15, P=6e7)
self.assertEqual(round(st.rho, 1), 730.8)
self.assertEqual(round(st.h.kJkg, 2), 645.96)
self.assertEqual(round(st.s.kJkgK, 4), 1.9773)
self.assertEqual(round(st.cv.kJkgK, 3), 1.470)
self.assertEqual(round(st.cp.kJkgK, 3), 1.879)
self.assertEqual(round(st.w, 1), 599.3)
def test_shortSpan(self):
# Table III, Pag 117
st = R152a(T=500, rho=500, eq="shortSpan")
self.assertEqual(round(st.cp0.kJkgK, 4), 1.4632)
self.assertEqual(round(st.P.MPa, 3), 21.594)
self.assertEqual(round(st.cp.kJkgK, 4), 2.1580)
st2 = R152a(T=600, rho=100, eq="shortSpan")
self.assertEqual(round(st2.h.kJkg-st.h.kJkg, 2), 270.60)
self.assertEqual(round(st2.s.kJkgK-st.s.kJkgK, 5), 0.60934)
def test_astina(self):
# Table III, Pag 1719
st = R152a(T=200, P=1e4, eq="astina")
self.assertEqual(round(st.rho, 2), 1108.30)
self.assertEqual(round(st.cv.kJkgK, 5), 1.02563)
self.assertEqual(round(st.cp.kJkgK, 5), 1.53226)
self.assertEqual(round(st.w, 2), 1144.46)
self.assertEqual(round(st.h.kJkg, 4), 83.0731)
self.assertEqual(round(st.s.kJkgK, 6), 0.504046)
st = R152a(T=250, P=1e5, eq="astina")
self.assertEqual(round(st.rho, 5), 3.32533)
self.assertEqual(round(st.cv.kJkgK, 5), 0.85146)
self.assertEqual(round(st.cp.kJkgK, 5), 1.02093)
self.assertEqual(round(st.w, 3), 185.395)
self.assertEqual(round(st.h.kJkg, 3), 490.101)
self.assertEqual(round(st.s.kJkgK, 5), 2.17402)
st = R152a(T=300, P=5e5, eq="astina")
self.assertEqual(round(st.rho, 4), 14.9178)
self.assertEqual(round(st.cv.kJkgK, 5), 1.01270)
self.assertEqual(round(st.cp.kJkgK, 5), 1.25049)
self.assertEqual(round(st.w, 3), 190.307)
self.assertEqual(round(st.h.kJkg, 3), 528.281)
self.assertEqual(round(st.s.kJkgK, 5), 2.12554)
st = R152a(T=250, P=1e6, eq="astina")
self.assertEqual(round(st.rho, 2), 1011.40)
self.assertEqual(round(st.cv.kJkgK, 5), 1.06828)
self.assertEqual(round(st.cp.kJkgK, 5), 1.61778)
self.assertEqual(round(st.w, 3), 887.901)
self.assertEqual(round(st.h.kJkg, 3), 162.066)
self.assertEqual(round(st.s.kJkgK, 6), 0.852079)
st = R152a(T=450, P=2e6, eq="astina")
self.assertEqual(round(st.rho, 4), 39.2134)
self.assertEqual(round(st.cv.kJkgK, 5), 1.27176)
self.assertEqual(round(st.cp.kJkgK, 5), 1.47906)
self.assertEqual(round(st.w, 3), 230.750)
self.assertEqual(round(st.h.kJkg, 3), 703.818)
self.assertEqual(round(st.s.kJkgK, 5), 2.44002)
st = R152a(T=450, P=3e6, eq="astina")
self.assertEqual(round(st.rho, 4), 62.4333)
self.assertEqual(round(st.cv.kJkgK, 5), 1.29309)
self.assertEqual(round(st.cp.kJkgK, 5), 1.56254)
self.assertEqual(round(st.w, 3), 221.167)
self.assertEqual(round(st.h.kJkg, 3), 692.526)
self.assertEqual(round(st.s.kJkgK, 5), 2.37021)
st = R152a(T=300, P=5e6, eq="astina")
self.assertEqual(round(st.rho, 3), 910.768)
self.assertEqual(round(st.cv.kJkgK, 5), 1.14104)
self.assertEqual(round(st.cp.kJkgK, 5), 1.75988)
self.assertEqual(round(st.w, 3), 681.439)
self.assertEqual(round(st.h.kJkg, 3), 247.753)
self.assertEqual(round(st.s.kJkgK, 5), 1.14904)
st = R152a(T=350, P=1.5e7, eq="astina")
self.assertEqual(round(st.rho, 3), 829.854)
self.assertEqual(round(st.cv.kJkgK, 5), 1.22465)
self.assertEqual(round(st.cp.kJkgK, 5), 1.86556)
self.assertEqual(round(st.w, 3), 577.309)
self.assertEqual(round(st.h.kJkg, 3), 339.639)
self.assertEqual(round(st.s.kJkgK, 5), 1.39677)
st = R152a(T=400, P=2.5e7, eq="astina")
self.assertEqual(round(st.rho, 3), 764.468)
self.assertEqual(round(st.cv.kJkgK, 5), 1.30865)
self.assertEqual(round(st.cp.kJkgK, 5), 1.92116)
self.assertEqual(round(st.w, 3), 526.848)
self.assertEqual(round(st.h.kJkg, 3), 433.427)
self.assertEqual(round(st.s.kJkgK, 5), 1.61365)
st = R152a(T=250, P=4e7, eq="astina")
self.assertEqual(round(st.rho, 2), 1069.55)
self.assertEqual(round(st.cv.kJkgK, 5), 1.10283)
self.assertEqual(round(st.cp.kJkgK, 5), 1.54070)
self.assertEqual(round(st.w, 2), 1076.73)
self.assertEqual(round(st.h.kJkg, 3), 183.511)
self.assertEqual(round(st.s.kJkgK, 5), 0.78821)
st = R152a(T=300, P=4.5e7, eq="astina")
self.assertEqual(round(st.rho, 3), 999.404)
self.assertEqual(round(st.cv.kJkgK, 5), 1.17500)
self.assertEqual(round(st.cp.kJkgK, 5), 1.61174)
self.assertEqual(round(st.w, 3), 920.812)
self.assertEqual(round(st.h.kJkg, 3), 265.090)
self.assertEqual(round(st.s.kJkgK, 5), 1.06788)
st = R152a(T=450, P=5e7, eq="astina")
self.assertEqual(round(st.rho, 3), 782.051)
self.assertEqual(round(st.cv.kJkgK, 5), 1.40653)
self.assertEqual(round(st.cp.kJkgK, 5), 1.86403)
self.assertEqual(round(st.w, 3), 612.067)
self.assertEqual(round(st.h.kJkg, 3), 528.381)
self.assertEqual(round(st.s.kJkgK, 5), 1.76108)
st = R152a(T=200, x=0.5, eq="astina")
self.assertEqual(round(st.P.MPa, 5), 0.00608)
self.assertEqual(round(st.Liquido.rho, 2), 1108.30)
self.assertEqual(round(st.Liquido.cv.kJkgK, 5), 1.02563)
self.assertEqual(round(st.Liquido.cp.kJkgK, 5), 1.53227)
self.assertEqual(round(st.Liquido.w, 2), 1144.44)
self.assertEqual(round(st.Liquido.h.kJkg, 4), 83.0708)
self.assertEqual(round(st.Liquido.s.kJkgK, 6), 0.504052)
self.assertEqual(round(st.Gas.rho, 6), 0.243503)
self.assertEqual(round(st.Gas.cv.kJkgK, 6), 0.688696)
self.assertEqual(round(st.Gas.cp.kJkgK, 6), 0.822899)
self.assertEqual(round(st.Gas.w, 3), 172.126)
self.assertEqual(round(st.Gas.h.kJkg, 3), 452.530)
self.assertEqual(round(st.Gas.s.kJkgK, 5), 2.35135)
st = R152a(T=300, x=0.5, eq="astina")
self.assertEqual(round(st.P.MPa, 5), 0.62958)
self.assertEqual(round(st.Liquido.rho, 3), 895.050)
self.assertEqual(round(st.Liquido.cv.kJkgK, 5), 1.14163)
self.assertEqual(round(st.Liquido.cp.kJkgK, 5), 1.80605)
self.assertEqual(round(st.Liquido.w, 3), 635.932)
self.assertEqual(round(st.Liquido.h.kJkg, 3), 246.936)
self.assertEqual(round(st.Liquido.s.kJkgK, 5), 1.16245)
self.assertEqual(round(st.Gas.rho, 4), 19.5363)
self.assertEqual(round(st.Gas.cv.kJkgK, 5), 1.05165)
self.assertEqual(round(st.Gas.cp.kJkgK, 5), 1.34292)
self.assertEqual(round(st.Gas.w, 3), 184.955)
self.assertEqual(round(st.Gas.h.kJkg, 3), 523.189)
self.assertEqual(round(st.Gas.s.kJkgK, 5), 2.08329)
st = R152a(P=1e4, x=0.5, eq="astina")
self.assertEqual(round(st.T, 3), 206.996)
self.assertEqual(round(st.Liquido.rho, 2), 1095.05)
self.assertEqual(round(st.Liquido.cv.kJkgK, 5), 1.02888)
self.assertEqual(round(st.Liquido.cp.kJkgK, 5), 1.54024)
self.assertEqual(round(st.Liquido.w, 2), 1106.00)
self.assertEqual(round(st.Liquido.h.kJkg, 4), 93.8198)
self.assertEqual(round(st.Liquido.s.kJkgK, 6), 0.556861)
self.assertEqual(round(st.Gas.rho, 6), 0.387819)
self.assertEqual(round(st.Gas.cv.kJkgK, 6), 0.709506)
self.assertEqual(round(st.Gas.cp.kJkgK, 6), 0.846608)
self.assertEqual(round(st.Gas.w, 3), 174.478)
self.assertEqual(round(st.Gas.h.kJkg, 3), 457.773)
self.assertEqual(round(st.Gas.s.kJkgK, 5), 2.31513)
st = R152a(P=2e6, x=0.5, eq="astina")
self.assertEqual(round(st.T, 3), 345.817)
self.assertEqual(round(st.Liquido.rho, 3), 755.354)
self.assertEqual(round(st.Liquido.cv.kJkgK, 5), 1.23824)
self.assertEqual(round(st.Liquido.cp.kJkgK, 5), 2.22690)
self.assertEqual(round(st.Liquido.w, 3), 400.169)
self.assertEqual(round(st.Liquido.h.kJkg, 3), 336.806)
self.assertEqual(round(st.Liquido.s.kJkgK, 5), 1.43550)
self.assertEqual(round(st.Gas.rho, 4), 67.2945)
self.assertEqual(round(st.Gas.cv.kJkgK, 5), 1.25774)
self.assertEqual(round(st.Gas.cp.kJkgK, 5), 1.99694)
self.assertEqual(round(st.Gas.w, 3), 166.520)
self.assertEqual(round(st.Gas.h.kJkg, 3), 542.188)
self.assertEqual(round(st.Gas.s.kJkgK, 5), 2.02940)
def test_kim(self):
# Table 6, pag 585, saturation states
# FIXME: The value fail in fourth digital sign
pass
# st = R152a(T=-60+273.15, x=0.5, eq="kim")
# self.assertEqual(round(st.P.MPa, 5), 0.01500)
# self.assertEqual(round(st.Liquido.rho, 1), 1081.8)
# self.assertEqual(round(st.Gas.v, 5), 1.76626)
# self.assertEqual(round(st.Liquido.h.kJkg, 2), 103.24)
# self.assertEqual(round(st.Gas.h.kJkg, 2), 462.57)
# self.assertEqual(round(st.Liquido.s.kJkgK, 4), 0.6017)
# self.assertEqual(round(st.Gas.s.kJkgK, 4), 2.2875)
# self.assertEqual(round(st.Liquido.cp.kJkgK, 3), 1.565)
# self.assertEqual(round(st.Gas.cp.kJkgK, 3), 0.864)
# self.assertEqual(round(st.Liquido.w, 1), 1021.6)
# self.assertEqual(round(st.Gas.w, 1), 176.4)
def test_Krauss(self):
# Table VI, pag 750, saturation states
# The correlation use the Pelt-Sengers extension for the critical
# region from tillner mEoS, so the returned values differ, specially
# the thermal conductivity values
# van Pelt, Sengers, J.V.
# Thermodynamic Properties of 1,1-Difluoroethane (R152a) in the
# Critical Region
# The Journal of Supercritical Fluids 8(1) (1995) 81-99
# doi: 10.1016/0896-8446(95)90021-7
# For testing it uses the outcalt mEoS for point near to critical point
st = R152a(T=240, x=0.5, eq="tillner")
self.assertEqual(round(st.P.MPa, 5), 0.06642)
self.assertEqual(round(st.Liquido.rho, 1), 1029.6)
self.assertEqual(round(st.Gas.rho, 4), 2.2736)
self.assertEqual(round(st.Liquido.mu.muPas, 1), 364.8)
self.assertEqual(round(st.Gas.mu.muPas, 2), 8.09)
self.assertEqual(round(st.Liquido.k.mWmK, 1), 128.8)
self.assertEqual(round(st.Gas.k.mWmK, 3), 8.483)
st = R152a(T=280, x=0.5, eq="tillner")
self.assertEqual(round(st.P.MPa, 5), 0.33558)
self.assertEqual(round(st.Liquido.rho, 2), 943.16)
self.assertEqual(round(st.Gas.rho, 3), 10.550)
self.assertEqual(round(st.Liquido.mu.muPas, 1), 198.1)
self.assertEqual(round(st.Gas.mu.muPas, 3), 9.606)
self.assertEqual(round(st.Liquido.k.mWmK, 1), 109.0)
self.assertEqual(round(st.Gas.k.mWmK, 2), 12.63)
st = R152a(T=320, x=0.5, eq="tillner")
self.assertEqual(round(st.P.MPa, 4), 1.0883)
self.assertEqual(round(st.Liquido.rho, 2), 839.97)
self.assertEqual(round(st.Gas.rho, 3), 34.202)
self.assertEqual(round(st.Liquido.mu.muPas, 1), 128.3)
self.assertEqual(round(st.Gas.mu.muPas, 2), 11.20)
self.assertEqual(round(st.Liquido.k.mWmK, 2), 90.71)
self.assertEqual(round(st.Gas.k.mWmK, 2), 17.31)
st = R152a(T=360, x=0.5, eq="tillner")
self.assertEqual(round(st.P.MPa, 4), 2.7024)
self.assertEqual(round(st.Liquido.rho, 2), 694.46)
self.assertEqual(round(st.Gas.rho, 3), 98.845)
self.assertEqual(round(st.Liquido.mu.muPas, 2), 76.74)
self.assertEqual(round(st.Gas.mu.muPas, 2), 13.87)
self.assertEqual(round(st.Liquido.k.mWmK, 2), 71.86)
self.assertEqual(round(st.Gas.k.mWmK, 2), 23.91)
# Table VII, Pag 753, Single phase point Viscosity
# Table VIII, Pag 754, Single phase point thermal conductivity
st = R152a(T=240, P=1e5, eq="tillner")
self.assertEqual(round(st.mu.muPas, 1), 365.1)
self.assertEqual(round(st.k.mWmK, 1), 128.8)
st = R152a(T=360, P=1e5, eq="tillner")
self.assertEqual(round(st.mu.muPas, 2), 12.52)
self.assertEqual(round(st.k.mWmK, 2), 20.16)
st = R152a(T=430, P=1e5, eq="tillner")
self.assertEqual(round(st.mu.muPas, 2), 14.91)
self.assertEqual(round(st.k.mWmK, 2), 26.96)
st = R152a(T=240, P=5e6, eq="tillner")
self.assertEqual(round(st.mu.muPas, 1), 411.3)
self.assertEqual(round(st.k.mWmK, 1), 131.4)
st = R152a(T=360, P=5e6, eq="tillner")
self.assertEqual(round(st.mu.muPas, 2), 86.54)
self.assertEqual(round(st.k.mWmK, 2), 76.58)
st = R152a(T=430, P=5e6, eq="tillner")
self.assertEqual(round(st.mu.muPas, 2), 17.53)
self.assertEqual(round(st.k.mWmK, 2), 32.54)
st = R152a(T=250, P=2e7, eq="tillner")
self.assertEqual(round(st.mu.muPas, 1), 445.9)
self.assertEqual(round(st.k.mWmK, 1), 133.7)
st = R152a(T=360, P=2e7, eq="tillner")
self.assertEqual(round(st.mu.muPas, 1), 123.9)
self.assertEqual(round(st.k.mWmK, 2), 92.37)
st = R152a(T=430, P=2e7, eq="tillner")
self.assertEqual(round(st.mu.muPas, 2), 70.29)
self.assertEqual(round(st.k.mWmK, 2), 73.98)
|
jjgomera/pychemqt
|
lib/mEoS/R152a.py
|
Python
|
gpl-3.0
| 39,124
|
from enum import IntEnum
import logging
import os
from django.conf import settings as main_settings
from django.db import models
from django.db.models import Q
from django.utils import timezone as tz
from django.utils.translation import gettext_lazy as _
from taggit.managers import TaggableManager
from aircox import settings
from .program import Program
from .episode import Episode
logger = logging.getLogger('aircox')
__all__ = ['Sound', 'SoundQuerySet', 'Track']
class SoundQuerySet(models.QuerySet):
def station(self, station=None, id=None):
id = station.pk if id is None else id
return self.filter(program__station__id=id)
def episode(self, episode=None, id=None):
id = episode.pk if id is None else id
return self.filter(episode__id=id)
def diffusion(self, diffusion=None, id=None):
id = diffusion.pk if id is None else id
return self.filter(episode__diffusion__id=id)
def available(self):
return self.exclude(type=Sound.TYPE_REMOVED)
def public(self):
""" Return sounds available as podcasts """
return self.filter(is_public=True)
def archive(self):
""" Return sounds that are archives """
return self.filter(type=Sound.TYPE_ARCHIVE)
def paths(self, archive=True, order_by=True):
"""
Return paths as a flat list (exclude sound without path).
If `order_by` is True, order by path.
"""
if archive:
self = self.archive()
if order_by:
self = self.order_by('path')
return self.filter(path__isnull=False).values_list('path', flat=True)
def search(self, query):
return self.filter(
Q(name__icontains=query) | Q(path__icontains=query) |
Q(program__title__icontains=query) |
Q(episode__title__icontains=query)
)
class Sound(models.Model):
"""
A Sound is the representation of a sound file that can be either an excerpt
or a complete archive of the related diffusion.
"""
TYPE_OTHER = 0x00
TYPE_ARCHIVE = 0x01
TYPE_EXCERPT = 0x02
TYPE_REMOVED = 0x03
TYPE_CHOICES = (
(TYPE_OTHER, _('other')), (TYPE_ARCHIVE, _('archive')),
(TYPE_EXCERPT, _('excerpt')), (TYPE_REMOVED, _('removed'))
)
name = models.CharField(_('name'), max_length=64)
program = models.ForeignKey(
Program, models.CASCADE, blank=True, # NOT NULL
verbose_name=_('program'),
help_text=_('program related to it'),
)
episode = models.ForeignKey(
Episode, models.SET_NULL, blank=True, null=True,
verbose_name=_('episode'),
)
type = models.SmallIntegerField(_('type'), choices=TYPE_CHOICES)
position = models.PositiveSmallIntegerField(
_('order'), default=0, help_text=_('position in the playlist'),
)
# FIXME: url() does not use the same directory than here
# should we use FileField for more reliability?
path = models.FilePathField(
_('file'),
path=settings.AIRCOX_PROGRAMS_DIR,
match=r'(' + '|'.join(settings.AIRCOX_SOUND_FILE_EXT)
.replace('.', r'\.') + ')$',
recursive=True, max_length=255,
blank=True, null=True, unique=True,
)
#embed = models.TextField(
# _('embed'),
# blank=True, null=True,
# help_text=_('HTML code to embed a sound from an external plateform'),
#)
duration = models.TimeField(
_('duration'),
blank=True, null=True,
help_text=_('duration of the sound'),
)
mtime = models.DateTimeField(
_('modification time'),
blank=True, null=True,
help_text=_('last modification date and time'),
)
is_good_quality = models.BooleanField(
_('good quality'), help_text=_('sound meets quality requirements'),
blank=True, null=True
)
is_public = models.BooleanField(
_('public'), help_text=_('if it can be podcasted from the server'),
default=False,
)
objects = SoundQuerySet.as_manager()
class Meta:
verbose_name = _('Sound')
verbose_name_plural = _('Sounds')
def __str__(self):
return '/'.join(self.path.split('/')[-3:])
def save(self, check=True, *args, **kwargs):
if self.episode is not None and self.program is None:
self.program = self.episode.program
if check:
self.check_on_file()
self.__check_name()
super().save(*args, **kwargs)
def url(self):
""" Return an url to the file. """
path = self.path.replace(main_settings.MEDIA_ROOT, '', 1)
return (main_settings.MEDIA_URL + path).replace('//','/')
# TODO: rename get_file_mtime(self)
def get_mtime(self):
"""
Get the last modification date from file
"""
mtime = os.stat(self.path).st_mtime
mtime = tz.datetime.fromtimestamp(mtime)
mtime = mtime.replace(microsecond=0)
return tz.make_aware(mtime, tz.get_current_timezone())
def file_exists(self):
""" Return true if the file still exists. """
return os.path.exists(self.path)
def check_on_file(self):
"""
Check sound file info again'st self, and update informations if
needed (do not save). Return True if there was changes.
"""
if not self.file_exists():
if self.type == self.TYPE_REMOVED:
return
logger.info('sound %s: has been removed', self.path)
self.type = self.TYPE_REMOVED
return True
# not anymore removed
changed = False
if self.type == self.TYPE_REMOVED and self.program:
changed = True
self.type = self.TYPE_ARCHIVE \
if self.path.startswith(self.program.archives_path) else \
self.TYPE_EXCERPT
# check mtime -> reset quality if changed (assume file changed)
mtime = self.get_mtime()
if self.mtime != mtime:
self.mtime = mtime
self.is_good_quality = None
logger.info('sound %s: m_time has changed. Reset quality info',
self.path)
return True
return changed
def __check_name(self):
if not self.name and self.path:
# FIXME: later, remove date?
self.name = os.path.basename(self.path)
self.name = os.path.splitext(self.name)[0]
self.name = self.name.replace('_', ' ')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__check_name()
class Track(models.Model):
"""
Track of a playlist of an object. The position can either be expressed
as the position in the playlist or as the moment in seconds it started.
"""
episode = models.ForeignKey(
Episode, models.CASCADE, blank=True, null=True,
verbose_name=_('episode'),
)
sound = models.ForeignKey(
Sound, models.CASCADE, blank=True, null=True,
verbose_name=_('sound'),
)
position = models.PositiveSmallIntegerField(
_('order'), default=0, help_text=_('position in the playlist'),
)
timestamp = models.PositiveSmallIntegerField(
_('timestamp'),
blank=True, null=True,
help_text=_('position (in seconds)')
)
title = models.CharField(_('title'), max_length=128)
artist = models.CharField(_('artist'), max_length=128)
tags = TaggableManager(verbose_name=_('tags'), blank=True,)
info = models.CharField(
_('information'),
max_length=128,
blank=True, null=True,
help_text=_('additional informations about this track, such as '
'the version, if is it a remix, features, etc.'),
)
class Meta:
verbose_name = _('Track')
verbose_name_plural = _('Tracks')
ordering = ('position',)
def __str__(self):
return '{self.artist} -- {self.title} -- {self.position}'.format(
self=self)
def save(self, *args, **kwargs):
if (self.sound is None and self.episode is None) or \
(self.sound is not None and self.episode is not None):
raise ValueError('sound XOR episode is required')
super().save(*args, **kwargs)
|
lordblackfox/aircox
|
aircox/models/sound.py
|
Python
|
gpl-3.0
| 8,350
|
from test import support
import enum
import locale
import platform
import sys
import sysconfig
import time
import unittest
try:
import threading
except ImportError:
threading = None
try:
import _testcapi
except ImportError:
_testcapi = None
# Max year is only limited by the size of C int.
SIZEOF_INT = sysconfig.get_config_var('SIZEOF_INT') or 4
TIME_MAXYEAR = (1 << 8 * SIZEOF_INT - 1) - 1
TIME_MINYEAR = -TIME_MAXYEAR - 1
US_TO_NS = 10 ** 3
MS_TO_NS = 10 ** 6
SEC_TO_NS = 10 ** 9
class _PyTime(enum.IntEnum):
# Round towards minus infinity (-inf)
ROUND_FLOOR = 0
# Round towards infinity (+inf)
ROUND_CEILING = 1
ALL_ROUNDING_METHODS = (_PyTime.ROUND_FLOOR, _PyTime.ROUND_CEILING)
class TimeTestCase(unittest.TestCase):
def setUp(self):
self.t = time.time()
def test_data_attributes(self):
time.altzone
time.daylight
time.timezone
time.tzname
def test_time(self):
time.time()
info = time.get_clock_info('time')
self.assertFalse(info.monotonic)
self.assertTrue(info.adjustable)
def test_clock(self):
time.clock()
info = time.get_clock_info('clock')
self.assertTrue(info.monotonic)
self.assertFalse(info.adjustable)
@unittest.skipUnless(hasattr(time, 'clock_gettime'),
'need time.clock_gettime()')
def test_clock_realtime(self):
time.clock_gettime(time.CLOCK_REALTIME)
@unittest.skipUnless(hasattr(time, 'clock_gettime'),
'need time.clock_gettime()')
@unittest.skipUnless(hasattr(time, 'CLOCK_MONOTONIC'),
'need time.CLOCK_MONOTONIC')
def test_clock_monotonic(self):
a = time.clock_gettime(time.CLOCK_MONOTONIC)
b = time.clock_gettime(time.CLOCK_MONOTONIC)
self.assertLessEqual(a, b)
@unittest.skipUnless(hasattr(time, 'clock_getres'),
'need time.clock_getres()')
def test_clock_getres(self):
res = time.clock_getres(time.CLOCK_REALTIME)
self.assertGreater(res, 0.0)
self.assertLessEqual(res, 1.0)
@unittest.skipUnless(hasattr(time, 'clock_settime'),
'need time.clock_settime()')
def test_clock_settime(self):
t = time.clock_gettime(time.CLOCK_REALTIME)
try:
time.clock_settime(time.CLOCK_REALTIME, t)
except PermissionError:
pass
if hasattr(time, 'CLOCK_MONOTONIC'):
self.assertRaises(OSError,
time.clock_settime, time.CLOCK_MONOTONIC, 0)
def test_conversions(self):
self.assertEqual(time.ctime(self.t),
time.asctime(time.localtime(self.t)))
self.assertEqual(int(time.mktime(time.localtime(self.t))),
int(self.t))
def test_sleep(self):
self.assertRaises(ValueError, time.sleep, -2)
self.assertRaises(ValueError, time.sleep, -1)
time.sleep(1.2)
def test_strftime(self):
tt = time.gmtime(self.t)
for directive in ('a', 'A', 'b', 'B', 'c', 'd', 'H', 'I',
'j', 'm', 'M', 'p', 'S',
'U', 'w', 'W', 'x', 'X', 'y', 'Y', 'Z', '%'):
format = ' %' + directive
try:
time.strftime(format, tt)
except ValueError:
self.fail('conversion specifier: %r failed.' % format)
def _bounds_checking(self, func):
# Make sure that strftime() checks the bounds of the various parts
# of the time tuple (0 is valid for *all* values).
# The year field is tested by other test cases above
# Check month [1, 12] + zero support
func((1900, 0, 1, 0, 0, 0, 0, 1, -1))
func((1900, 12, 1, 0, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, -1, 1, 0, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 13, 1, 0, 0, 0, 0, 1, -1))
# Check day of month [1, 31] + zero support
func((1900, 1, 0, 0, 0, 0, 0, 1, -1))
func((1900, 1, 31, 0, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 1, -1, 0, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 1, 32, 0, 0, 0, 0, 1, -1))
# Check hour [0, 23]
func((1900, 1, 1, 23, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 1, 1, -1, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 1, 1, 24, 0, 0, 0, 1, -1))
# Check minute [0, 59]
func((1900, 1, 1, 0, 59, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, -1, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, 60, 0, 0, 1, -1))
# Check second [0, 61]
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, 0, -1, 0, 1, -1))
# C99 only requires allowing for one leap second, but Python's docs say
# allow two leap seconds (0..61)
func((1900, 1, 1, 0, 0, 60, 0, 1, -1))
func((1900, 1, 1, 0, 0, 61, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, 0, 62, 0, 1, -1))
# No check for upper-bound day of week;
# value forced into range by a ``% 7`` calculation.
# Start check at -2 since gettmarg() increments value before taking
# modulo.
self.assertEqual(func((1900, 1, 1, 0, 0, 0, -1, 1, -1)),
func((1900, 1, 1, 0, 0, 0, +6, 1, -1)))
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, 0, 0, -2, 1, -1))
# Check day of the year [1, 366] + zero support
func((1900, 1, 1, 0, 0, 0, 0, 0, -1))
func((1900, 1, 1, 0, 0, 0, 0, 366, -1))
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, 0, 0, 0, -1, -1))
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, 0, 0, 0, 367, -1))
def test_strftime_bounding_check(self):
self._bounds_checking(lambda tup: time.strftime('', tup))
def test_strftime_format_check(self):
# Test that strftime does not crash on invalid format strings
# that may trigger a buffer overread. When not triggered,
# strftime may succeed or raise ValueError depending on
# the platform.
for x in [ '', 'A', '%A', '%AA' ]:
for y in range(0x0, 0x10):
for z in [ '%', 'A%', 'AA%', '%A%', 'A%A%', '%#' ]:
try:
time.strftime(x * y + z)
except ValueError:
pass
def test_default_values_for_zero(self):
# Make sure that using all zeros uses the proper default
# values. No test for daylight savings since strftime() does
# not change output based on its value and no test for year
# because systems vary in their support for year 0.
expected = "2000 01 01 00 00 00 1 001"
with support.check_warnings():
result = time.strftime("%Y %m %d %H %M %S %w %j", (2000,)+(0,)*8)
self.assertEqual(expected, result)
def test_strptime(self):
# Should be able to go round-trip from strftime to strptime without
# raising an exception.
tt = time.gmtime(self.t)
for directive in ('a', 'A', 'b', 'B', 'c', 'd', 'H', 'I',
'j', 'm', 'M', 'p', 'S',
'U', 'w', 'W', 'x', 'X', 'y', 'Y', 'Z', '%'):
format = '%' + directive
strf_output = time.strftime(format, tt)
try:
time.strptime(strf_output, format)
except ValueError:
self.fail("conversion specifier %r failed with '%s' input." %
(format, strf_output))
def test_strptime_bytes(self):
# Make sure only strings are accepted as arguments to strptime.
self.assertRaises(TypeError, time.strptime, b'2009', "%Y")
self.assertRaises(TypeError, time.strptime, '2009', b'%Y')
def test_strptime_exception_context(self):
# check that this doesn't chain exceptions needlessly (see #17572)
with self.assertRaises(ValueError) as e:
time.strptime('', '%D')
self.assertIs(e.exception.__suppress_context__, True)
# additional check for IndexError branch (issue #19545)
with self.assertRaises(ValueError) as e:
time.strptime('19', '%Y %')
self.assertIs(e.exception.__suppress_context__, True)
def test_asctime(self):
time.asctime(time.gmtime(self.t))
# Max year is only limited by the size of C int.
for bigyear in TIME_MAXYEAR, TIME_MINYEAR:
asc = time.asctime((bigyear, 6, 1) + (0,) * 6)
self.assertEqual(asc[-len(str(bigyear)):], str(bigyear))
self.assertRaises(OverflowError, time.asctime,
(TIME_MAXYEAR + 1,) + (0,) * 8)
self.assertRaises(OverflowError, time.asctime,
(TIME_MINYEAR - 1,) + (0,) * 8)
self.assertRaises(TypeError, time.asctime, 0)
self.assertRaises(TypeError, time.asctime, ())
self.assertRaises(TypeError, time.asctime, (0,) * 10)
def test_asctime_bounding_check(self):
self._bounds_checking(time.asctime)
def test_ctime(self):
t = time.mktime((1973, 9, 16, 1, 3, 52, 0, 0, -1))
self.assertEqual(time.ctime(t), 'Sun Sep 16 01:03:52 1973')
t = time.mktime((2000, 1, 1, 0, 0, 0, 0, 0, -1))
self.assertEqual(time.ctime(t), 'Sat Jan 1 00:00:00 2000')
for year in [-100, 100, 1000, 2000, 2050, 10000]:
try:
testval = time.mktime((year, 1, 10) + (0,)*6)
except (ValueError, OverflowError):
# If mktime fails, ctime will fail too. This may happen
# on some platforms.
pass
else:
self.assertEqual(time.ctime(testval)[20:], str(year))
@unittest.skipUnless(hasattr(time, "tzset"),
"time module has no attribute tzset")
def test_tzset(self):
from os import environ
# Epoch time of midnight Dec 25th 2002. Never DST in northern
# hemisphere.
xmas2002 = 1040774400.0
# These formats are correct for 2002, and possibly future years
# This format is the 'standard' as documented at:
# http://www.opengroup.org/onlinepubs/007904975/basedefs/xbd_chap08.html
# They are also documented in the tzset(3) man page on most Unix
# systems.
eastern = 'EST+05EDT,M4.1.0,M10.5.0'
victoria = 'AEST-10AEDT-11,M10.5.0,M3.5.0'
utc='UTC+0'
org_TZ = environ.get('TZ',None)
try:
# Make sure we can switch to UTC time and results are correct
# Note that unknown timezones default to UTC.
# Note that altzone is undefined in UTC, as there is no DST
environ['TZ'] = eastern
time.tzset()
environ['TZ'] = utc
time.tzset()
self.assertEqual(
time.gmtime(xmas2002), time.localtime(xmas2002)
)
self.assertEqual(time.daylight, 0)
self.assertEqual(time.timezone, 0)
self.assertEqual(time.localtime(xmas2002).tm_isdst, 0)
# Make sure we can switch to US/Eastern
environ['TZ'] = eastern
time.tzset()
self.assertNotEqual(time.gmtime(xmas2002), time.localtime(xmas2002))
self.assertEqual(time.tzname, ('EST', 'EDT'))
self.assertEqual(len(time.tzname), 2)
self.assertEqual(time.daylight, 1)
self.assertEqual(time.timezone, 18000)
self.assertEqual(time.altzone, 14400)
self.assertEqual(time.localtime(xmas2002).tm_isdst, 0)
self.assertEqual(len(time.tzname), 2)
# Now go to the southern hemisphere.
environ['TZ'] = victoria
time.tzset()
self.assertNotEqual(time.gmtime(xmas2002), time.localtime(xmas2002))
# Issue #11886: Australian Eastern Standard Time (UTC+10) is called
# "EST" (as Eastern Standard Time, UTC-5) instead of "AEST"
# (non-DST timezone), and "EDT" instead of "AEDT" (DST timezone),
# on some operating systems (e.g. FreeBSD), which is wrong. See for
# example this bug:
# http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=93810
self.assertIn(time.tzname[0], ('AEST' 'EST'), time.tzname[0])
self.assertTrue(time.tzname[1] in ('AEDT', 'EDT'), str(time.tzname[1]))
self.assertEqual(len(time.tzname), 2)
self.assertEqual(time.daylight, 1)
self.assertEqual(time.timezone, -36000)
self.assertEqual(time.altzone, -39600)
self.assertEqual(time.localtime(xmas2002).tm_isdst, 1)
finally:
# Repair TZ environment variable in case any other tests
# rely on it.
if org_TZ is not None:
environ['TZ'] = org_TZ
elif 'TZ' in environ:
del environ['TZ']
time.tzset()
def test_insane_timestamps(self):
# It's possible that some platform maps time_t to double,
# and that this test will fail there. This test should
# exempt such platforms (provided they return reasonable
# results!).
for func in time.ctime, time.gmtime, time.localtime:
for unreasonable in -1e200, 1e200:
self.assertRaises(OverflowError, func, unreasonable)
def test_ctime_without_arg(self):
# Not sure how to check the values, since the clock could tick
# at any time. Make sure these are at least accepted and
# don't raise errors.
time.ctime()
time.ctime(None)
def test_gmtime_without_arg(self):
gt0 = time.gmtime()
gt1 = time.gmtime(None)
t0 = time.mktime(gt0)
t1 = time.mktime(gt1)
self.assertAlmostEqual(t1, t0, delta=0.2)
def test_localtime_without_arg(self):
lt0 = time.localtime()
lt1 = time.localtime(None)
t0 = time.mktime(lt0)
t1 = time.mktime(lt1)
self.assertAlmostEqual(t1, t0, delta=0.2)
def test_mktime(self):
# Issue #1726687
for t in (-2, -1, 0, 1):
if sys.platform.startswith('aix') and t == -1:
# Issue #11188, #19748: mktime() returns -1 on error. On Linux,
# the tm_wday field is used as a sentinel () to detect if -1 is
# really an error or a valid timestamp. On AIX, tm_wday is
# unchanged even on success and so cannot be used as a
# sentinel.
continue
try:
tt = time.localtime(t)
except (OverflowError, OSError):
pass
else:
self.assertEqual(time.mktime(tt), t)
# Issue #13309: passing extreme values to mktime() or localtime()
# borks the glibc's internal timezone data.
@unittest.skipUnless(platform.libc_ver()[0] != 'glibc',
"disabled because of a bug in glibc. Issue #13309")
def test_mktime_error(self):
# It may not be possible to reliably make mktime return error
# on all platfom. This will make sure that no other exception
# than OverflowError is raised for an extreme value.
tt = time.gmtime(self.t)
tzname = time.strftime('%Z', tt)
self.assertNotEqual(tzname, 'LMT')
try:
time.mktime((-1, 1, 1, 0, 0, 0, -1, -1, -1))
except OverflowError:
pass
self.assertEqual(time.strftime('%Z', tt), tzname)
@unittest.skipUnless(hasattr(time, 'monotonic'),
'need time.monotonic')
def test_monotonic(self):
# monotonic() should not go backward
times = [time.monotonic() for n in range(100)]
t1 = times[0]
for t2 in times[1:]:
self.assertGreaterEqual(t2, t1, "times=%s" % times)
t1 = t2
# monotonic() includes time elapsed during a sleep
t1 = time.monotonic()
time.sleep(0.5)
t2 = time.monotonic()
dt = t2 - t1
self.assertGreater(t2, t1)
# Issue #20101: On some Windows machines, dt may be slightly low
self.assertTrue(0.45 <= dt <= 1.0, dt)
# monotonic() is a monotonic but non adjustable clock
info = time.get_clock_info('monotonic')
self.assertTrue(info.monotonic)
self.assertFalse(info.adjustable)
def test_perf_counter(self):
time.perf_counter()
def test_process_time(self):
# process_time() should not include time spend during a sleep
start = time.process_time()
time.sleep(0.100)
stop = time.process_time()
# use 20 ms because process_time() has usually a resolution of 15 ms
# on Windows
self.assertLess(stop - start, 0.020)
info = time.get_clock_info('process_time')
self.assertTrue(info.monotonic)
self.assertFalse(info.adjustable)
@unittest.skipUnless(hasattr(time, 'monotonic'),
'need time.monotonic')
@unittest.skipUnless(hasattr(time, 'clock_settime'),
'need time.clock_settime')
def test_monotonic_settime(self):
t1 = time.monotonic()
realtime = time.clock_gettime(time.CLOCK_REALTIME)
# jump backward with an offset of 1 hour
try:
time.clock_settime(time.CLOCK_REALTIME, realtime - 3600)
except PermissionError as err:
self.skipTest(err)
t2 = time.monotonic()
time.clock_settime(time.CLOCK_REALTIME, realtime)
# monotonic must not be affected by system clock updates
self.assertGreaterEqual(t2, t1)
def test_localtime_failure(self):
# Issue #13847: check for localtime() failure
invalid_time_t = None
for time_t in (-1, 2**30, 2**33, 2**60):
try:
time.localtime(time_t)
except OverflowError:
self.skipTest("need 64-bit time_t")
except OSError:
invalid_time_t = time_t
break
if invalid_time_t is None:
self.skipTest("unable to find an invalid time_t value")
self.assertRaises(OSError, time.localtime, invalid_time_t)
self.assertRaises(OSError, time.ctime, invalid_time_t)
def test_get_clock_info(self):
clocks = ['clock', 'perf_counter', 'process_time', 'time']
if hasattr(time, 'monotonic'):
clocks.append('monotonic')
for name in clocks:
info = time.get_clock_info(name)
#self.assertIsInstance(info, dict)
self.assertIsInstance(info.implementation, str)
self.assertNotEqual(info.implementation, '')
self.assertIsInstance(info.monotonic, bool)
self.assertIsInstance(info.resolution, float)
# 0.0 < resolution <= 1.0
self.assertGreater(info.resolution, 0.0)
self.assertLessEqual(info.resolution, 1.0)
self.assertIsInstance(info.adjustable, bool)
self.assertRaises(ValueError, time.get_clock_info, 'xxx')
class TestLocale(unittest.TestCase):
def setUp(self):
self.oldloc = locale.setlocale(locale.LC_ALL)
def tearDown(self):
locale.setlocale(locale.LC_ALL, self.oldloc)
def test_bug_3061(self):
try:
tmp = locale.setlocale(locale.LC_ALL, "fr_FR")
except locale.Error:
self.skipTest('could not set locale.LC_ALL to fr_FR')
# This should not cause an exception
time.strftime("%B", (2009,2,1,0,0,0,0,0,0))
class _TestAsctimeYear:
_format = '%d'
def yearstr(self, y):
return time.asctime((y,) + (0,) * 8).split()[-1]
def test_large_year(self):
# Check that it doesn't crash for year > 9999
self.assertEqual(self.yearstr(12345), '12345')
self.assertEqual(self.yearstr(123456789), '123456789')
class _TestStrftimeYear:
# Issue 13305: For years < 1000, the value is not always
# padded to 4 digits across platforms. The C standard
# assumes year >= 1900, so it does not specify the number
# of digits.
if time.strftime('%Y', (1,) + (0,) * 8) == '0001':
_format = '%04d'
else:
_format = '%d'
def yearstr(self, y):
return time.strftime('%Y', (y,) + (0,) * 8)
def test_4dyear(self):
# Check that we can return the zero padded value.
if self._format == '%04d':
self.test_year('%04d')
else:
def year4d(y):
return time.strftime('%4Y', (y,) + (0,) * 8)
self.test_year('%04d', func=year4d)
def skip_if_not_supported(y):
msg = "strftime() is limited to [1; 9999] with Visual Studio"
# Check that it doesn't crash for year > 9999
try:
time.strftime('%Y', (y,) + (0,) * 8)
except ValueError:
cond = False
else:
cond = True
return unittest.skipUnless(cond, msg)
@skip_if_not_supported(10000)
def test_large_year(self):
return super().test_large_year()
@skip_if_not_supported(0)
def test_negative(self):
return super().test_negative()
del skip_if_not_supported
class _Test4dYear:
_format = '%d'
def test_year(self, fmt=None, func=None):
fmt = fmt or self._format
func = func or self.yearstr
self.assertEqual(func(1), fmt % 1)
self.assertEqual(func(68), fmt % 68)
self.assertEqual(func(69), fmt % 69)
self.assertEqual(func(99), fmt % 99)
self.assertEqual(func(999), fmt % 999)
self.assertEqual(func(9999), fmt % 9999)
def test_large_year(self):
self.assertEqual(self.yearstr(12345), '12345')
self.assertEqual(self.yearstr(123456789), '123456789')
self.assertEqual(self.yearstr(TIME_MAXYEAR), str(TIME_MAXYEAR))
self.assertRaises(OverflowError, self.yearstr, TIME_MAXYEAR + 1)
def test_negative(self):
self.assertEqual(self.yearstr(-1), self._format % -1)
self.assertEqual(self.yearstr(-1234), '-1234')
self.assertEqual(self.yearstr(-123456), '-123456')
self.assertEqual(self.yearstr(-123456789), str(-123456789))
self.assertEqual(self.yearstr(-1234567890), str(-1234567890))
self.assertEqual(self.yearstr(TIME_MINYEAR + 1900), str(TIME_MINYEAR + 1900))
# Issue #13312: it may return wrong value for year < TIME_MINYEAR + 1900
# Skip the value test, but check that no error is raised
self.yearstr(TIME_MINYEAR)
# self.assertEqual(self.yearstr(TIME_MINYEAR), str(TIME_MINYEAR))
self.assertRaises(OverflowError, self.yearstr, TIME_MINYEAR - 1)
class TestAsctime4dyear(_TestAsctimeYear, _Test4dYear, unittest.TestCase):
pass
class TestStrftime4dyear(_TestStrftimeYear, _Test4dYear, unittest.TestCase):
pass
class TestPytime(unittest.TestCase):
def setUp(self):
self.invalid_values = (
-(2 ** 100), 2 ** 100,
-(2.0 ** 100.0), 2.0 ** 100.0,
)
@support.cpython_only
def test_time_t(self):
from _testcapi import pytime_object_to_time_t
for obj, time_t, rnd in (
# Round towards minus infinity (-inf)
(0, 0, _PyTime.ROUND_FLOOR),
(-1, -1, _PyTime.ROUND_FLOOR),
(-1.0, -1, _PyTime.ROUND_FLOOR),
(-1.9, -2, _PyTime.ROUND_FLOOR),
(1.0, 1, _PyTime.ROUND_FLOOR),
(1.9, 1, _PyTime.ROUND_FLOOR),
# Round towards infinity (+inf)
(0, 0, _PyTime.ROUND_CEILING),
(-1, -1, _PyTime.ROUND_CEILING),
(-1.0, -1, _PyTime.ROUND_CEILING),
(-1.9, -1, _PyTime.ROUND_CEILING),
(1.0, 1, _PyTime.ROUND_CEILING),
(1.9, 2, _PyTime.ROUND_CEILING),
):
self.assertEqual(pytime_object_to_time_t(obj, rnd), time_t)
rnd = _PyTime.ROUND_FLOOR
for invalid in self.invalid_values:
self.assertRaises(OverflowError,
pytime_object_to_time_t, invalid, rnd)
@support.cpython_only
def test_timespec(self):
from _testcapi import pytime_object_to_timespec
for obj, timespec, rnd in (
# Round towards minus infinity (-inf)
(0, (0, 0), _PyTime.ROUND_FLOOR),
(-1, (-1, 0), _PyTime.ROUND_FLOOR),
(-1.0, (-1, 0), _PyTime.ROUND_FLOOR),
(1e-9, (0, 1), _PyTime.ROUND_FLOOR),
(1e-10, (0, 0), _PyTime.ROUND_FLOOR),
(-1e-9, (-1, 999999999), _PyTime.ROUND_FLOOR),
(-1e-10, (-1, 999999999), _PyTime.ROUND_FLOOR),
(-1.2, (-2, 800000000), _PyTime.ROUND_FLOOR),
(0.9999999999, (0, 999999999), _PyTime.ROUND_FLOOR),
(1.1234567890, (1, 123456789), _PyTime.ROUND_FLOOR),
(1.1234567899, (1, 123456789), _PyTime.ROUND_FLOOR),
(-1.1234567890, (-2, 876543211), _PyTime.ROUND_FLOOR),
(-1.1234567891, (-2, 876543210), _PyTime.ROUND_FLOOR),
# Round towards infinity (+inf)
(0, (0, 0), _PyTime.ROUND_CEILING),
(-1, (-1, 0), _PyTime.ROUND_CEILING),
(-1.0, (-1, 0), _PyTime.ROUND_CEILING),
(1e-9, (0, 1), _PyTime.ROUND_CEILING),
(1e-10, (0, 1), _PyTime.ROUND_CEILING),
(-1e-9, (-1, 999999999), _PyTime.ROUND_CEILING),
(-1e-10, (0, 0), _PyTime.ROUND_CEILING),
(-1.2, (-2, 800000000), _PyTime.ROUND_CEILING),
(0.9999999999, (1, 0), _PyTime.ROUND_CEILING),
(1.1234567890, (1, 123456790), _PyTime.ROUND_CEILING),
(1.1234567899, (1, 123456790), _PyTime.ROUND_CEILING),
(-1.1234567890, (-2, 876543211), _PyTime.ROUND_CEILING),
(-1.1234567891, (-2, 876543211), _PyTime.ROUND_CEILING),
):
with self.subTest(obj=obj, round=rnd, timespec=timespec):
self.assertEqual(pytime_object_to_timespec(obj, rnd), timespec)
rnd = _PyTime.ROUND_FLOOR
for invalid in self.invalid_values:
self.assertRaises(OverflowError,
pytime_object_to_timespec, invalid, rnd)
@unittest.skipUnless(time._STRUCT_TM_ITEMS == 11, "needs tm_zone support")
def test_localtime_timezone(self):
# Get the localtime and examine it for the offset and zone.
lt = time.localtime()
self.assertTrue(hasattr(lt, "tm_gmtoff"))
self.assertTrue(hasattr(lt, "tm_zone"))
# See if the offset and zone are similar to the module
# attributes.
if lt.tm_gmtoff is None:
self.assertTrue(not hasattr(time, "timezone"))
else:
self.assertEqual(lt.tm_gmtoff, -[time.timezone, time.altzone][lt.tm_isdst])
if lt.tm_zone is None:
self.assertTrue(not hasattr(time, "tzname"))
else:
self.assertEqual(lt.tm_zone, time.tzname[lt.tm_isdst])
# Try and make UNIX times from the localtime and a 9-tuple
# created from the localtime. Test to see that the times are
# the same.
t = time.mktime(lt); t9 = time.mktime(lt[:9])
self.assertEqual(t, t9)
# Make localtimes from the UNIX times and compare them to
# the original localtime, thus making a round trip.
new_lt = time.localtime(t); new_lt9 = time.localtime(t9)
self.assertEqual(new_lt, lt)
self.assertEqual(new_lt.tm_gmtoff, lt.tm_gmtoff)
self.assertEqual(new_lt.tm_zone, lt.tm_zone)
self.assertEqual(new_lt9, lt)
self.assertEqual(new_lt.tm_gmtoff, lt.tm_gmtoff)
self.assertEqual(new_lt9.tm_zone, lt.tm_zone)
@unittest.skipUnless(time._STRUCT_TM_ITEMS == 11, "needs tm_zone support")
def test_strptime_timezone(self):
t = time.strptime("UTC", "%Z")
self.assertEqual(t.tm_zone, 'UTC')
t = time.strptime("+0500", "%z")
self.assertEqual(t.tm_gmtoff, 5 * 3600)
@unittest.skipUnless(time._STRUCT_TM_ITEMS == 11, "needs tm_zone support")
def test_short_times(self):
import pickle
# Load a short time structure using pickle.
st = b"ctime\nstruct_time\np0\n((I2007\nI8\nI11\nI1\nI24\nI49\nI5\nI223\nI1\ntp1\n(dp2\ntp3\nRp4\n."
lt = pickle.loads(st)
self.assertIs(lt.tm_gmtoff, None)
self.assertIs(lt.tm_zone, None)
@unittest.skipUnless(_testcapi is not None,
'need the _testcapi module')
class TestPyTime_t(unittest.TestCase):
def test_FromSeconds(self):
from _testcapi import PyTime_FromSeconds
for seconds in (0, 3, -456, _testcapi.INT_MAX, _testcapi.INT_MIN):
with self.subTest(seconds=seconds):
self.assertEqual(PyTime_FromSeconds(seconds),
seconds * SEC_TO_NS)
def test_FromSecondsObject(self):
from _testcapi import PyTime_FromSecondsObject
# Conversion giving the same result for all rounding methods
for rnd in ALL_ROUNDING_METHODS:
for obj, ts in (
# integers
(0, 0),
(1, SEC_TO_NS),
(-3, -3 * SEC_TO_NS),
# float: subseconds
(0.0, 0),
(1e-9, 1),
(1e-6, 10 ** 3),
(1e-3, 10 ** 6),
# float: seconds
(2.0, 2 * SEC_TO_NS),
(123.0, 123 * SEC_TO_NS),
(-7.0, -7 * SEC_TO_NS),
# nanosecond are kept for value <= 2^23 seconds
(2**22 - 1e-9, 4194303999999999),
(2**22, 4194304000000000),
(2**22 + 1e-9, 4194304000000001),
(2**23 - 1e-9, 8388607999999999),
(2**23, 8388608000000000),
# start losing precision for value > 2^23 seconds
(2**23 + 1e-9, 8388608000000002),
# nanoseconds are lost for value > 2^23 seconds
(2**24 - 1e-9, 16777215999999998),
(2**24, 16777216000000000),
(2**24 + 1e-9, 16777216000000000),
(2**25 - 1e-9, 33554432000000000),
(2**25 , 33554432000000000),
(2**25 + 1e-9, 33554432000000000),
# close to 2^63 nanoseconds (_PyTime_t limit)
(9223372036, 9223372036 * SEC_TO_NS),
(9223372036.0, 9223372036 * SEC_TO_NS),
(-9223372036, -9223372036 * SEC_TO_NS),
(-9223372036.0, -9223372036 * SEC_TO_NS),
):
with self.subTest(obj=obj, round=rnd, timestamp=ts):
self.assertEqual(PyTime_FromSecondsObject(obj, rnd), ts)
with self.subTest(round=rnd):
with self.assertRaises(OverflowError):
PyTime_FromSecondsObject(9223372037, rnd)
PyTime_FromSecondsObject(9223372037.0, rnd)
PyTime_FromSecondsObject(-9223372037, rnd)
PyTime_FromSecondsObject(-9223372037.0, rnd)
# Conversion giving different results depending on the rounding method
FLOOR = _PyTime.ROUND_FLOOR
CEILING = _PyTime.ROUND_CEILING
for obj, ts, rnd in (
# close to zero
( 1e-10, 0, FLOOR),
( 1e-10, 1, CEILING),
(-1e-10, -1, FLOOR),
(-1e-10, 0, CEILING),
# test rounding of the last nanosecond
( 1.1234567899, 1123456789, FLOOR),
( 1.1234567899, 1123456790, CEILING),
(-1.1234567899, -1123456790, FLOOR),
(-1.1234567899, -1123456789, CEILING),
# close to 1 second
( 0.9999999999, 999999999, FLOOR),
( 0.9999999999, 1000000000, CEILING),
(-0.9999999999, -1000000000, FLOOR),
(-0.9999999999, -999999999, CEILING),
):
with self.subTest(obj=obj, round=rnd, timestamp=ts):
self.assertEqual(PyTime_FromSecondsObject(obj, rnd), ts)
def test_AsSecondsDouble(self):
from _testcapi import PyTime_AsSecondsDouble
for nanoseconds, seconds in (
# near 1 nanosecond
( 0, 0.0),
( 1, 1e-9),
(-1, -1e-9),
# near 1 second
(SEC_TO_NS + 1, 1.0 + 1e-9),
(SEC_TO_NS, 1.0),
(SEC_TO_NS - 1, 1.0 - 1e-9),
# a few seconds
(123 * SEC_TO_NS, 123.0),
(-567 * SEC_TO_NS, -567.0),
# nanosecond are kept for value <= 2^23 seconds
(4194303999999999, 2**22 - 1e-9),
(4194304000000000, 2**22),
(4194304000000001, 2**22 + 1e-9),
# start losing precision for value > 2^23 seconds
(8388608000000002, 2**23 + 1e-9),
# nanoseconds are lost for value > 2^23 seconds
(16777215999999998, 2**24 - 1e-9),
(16777215999999999, 2**24 - 1e-9),
(16777216000000000, 2**24 ),
(16777216000000001, 2**24 ),
(16777216000000002, 2**24 + 2e-9),
(33554432000000000, 2**25 ),
(33554432000000002, 2**25 ),
(33554432000000004, 2**25 + 4e-9),
# close to 2^63 nanoseconds (_PyTime_t limit)
(9223372036 * SEC_TO_NS, 9223372036.0),
(-9223372036 * SEC_TO_NS, -9223372036.0),
):
with self.subTest(nanoseconds=nanoseconds, seconds=seconds):
self.assertEqual(PyTime_AsSecondsDouble(nanoseconds),
seconds)
def test_timeval(self):
from _testcapi import PyTime_AsTimeval
for rnd in ALL_ROUNDING_METHODS:
for ns, tv in (
# microseconds
(0, (0, 0)),
(1000, (0, 1)),
(-1000, (-1, 999999)),
# seconds
(2 * SEC_TO_NS, (2, 0)),
(-3 * SEC_TO_NS, (-3, 0)),
):
with self.subTest(nanoseconds=ns, timeval=tv, round=rnd):
self.assertEqual(PyTime_AsTimeval(ns, rnd), tv)
FLOOR = _PyTime.ROUND_FLOOR
CEILING = _PyTime.ROUND_CEILING
for ns, tv, rnd in (
# nanoseconds
(1, (0, 0), FLOOR),
(1, (0, 1), CEILING),
(-1, (-1, 999999), FLOOR),
(-1, (0, 0), CEILING),
# seconds + nanoseconds
(1234567001, (1, 234567), FLOOR),
(1234567001, (1, 234568), CEILING),
(-1234567001, (-2, 765432), FLOOR),
(-1234567001, (-2, 765433), CEILING),
):
with self.subTest(nanoseconds=ns, timeval=tv, round=rnd):
self.assertEqual(PyTime_AsTimeval(ns, rnd), tv)
@unittest.skipUnless(hasattr(_testcapi, 'PyTime_AsTimespec'),
'need _testcapi.PyTime_AsTimespec')
def test_timespec(self):
from _testcapi import PyTime_AsTimespec
for ns, ts in (
# nanoseconds
(0, (0, 0)),
(1, (0, 1)),
(-1, (-1, 999999999)),
# seconds
(2 * SEC_TO_NS, (2, 0)),
(-3 * SEC_TO_NS, (-3, 0)),
# seconds + nanoseconds
(1234567890, (1, 234567890)),
(-1234567890, (-2, 765432110)),
):
with self.subTest(nanoseconds=ns, timespec=ts):
self.assertEqual(PyTime_AsTimespec(ns), ts)
def test_milliseconds(self):
from _testcapi import PyTime_AsMilliseconds
for rnd in ALL_ROUNDING_METHODS:
for ns, tv in (
# milliseconds
(1 * MS_TO_NS, 1),
(-2 * MS_TO_NS, -2),
# seconds
(2 * SEC_TO_NS, 2000),
(-3 * SEC_TO_NS, -3000),
):
with self.subTest(nanoseconds=ns, timeval=tv, round=rnd):
self.assertEqual(PyTime_AsMilliseconds(ns, rnd), tv)
FLOOR = _PyTime.ROUND_FLOOR
CEILING = _PyTime.ROUND_CEILING
for ns, ms, rnd in (
# nanoseconds
(1, 0, FLOOR),
(1, 1, CEILING),
(-1, -1, FLOOR),
(-1, 0, CEILING),
# seconds + nanoseconds
(1234 * MS_TO_NS + 1, 1234, FLOOR),
(1234 * MS_TO_NS + 1, 1235, CEILING),
(-1234 * MS_TO_NS - 1, -1235, FLOOR),
(-1234 * MS_TO_NS - 1, -1234, CEILING),
):
with self.subTest(nanoseconds=ns, milliseconds=ms, round=rnd):
self.assertEqual(PyTime_AsMilliseconds(ns, rnd), ms)
def test_microseconds(self):
from _testcapi import PyTime_AsMicroseconds
for rnd in ALL_ROUNDING_METHODS:
for ns, tv in (
# microseconds
(1 * US_TO_NS, 1),
(-2 * US_TO_NS, -2),
# milliseconds
(1 * MS_TO_NS, 1000),
(-2 * MS_TO_NS, -2000),
# seconds
(2 * SEC_TO_NS, 2000000),
(-3 * SEC_TO_NS, -3000000),
):
with self.subTest(nanoseconds=ns, timeval=tv, round=rnd):
self.assertEqual(PyTime_AsMicroseconds(ns, rnd), tv)
FLOOR = _PyTime.ROUND_FLOOR
CEILING = _PyTime.ROUND_CEILING
for ns, ms, rnd in (
# nanoseconds
(1, 0, FLOOR),
(1, 1, CEILING),
(-1, -1, FLOOR),
(-1, 0, CEILING),
# seconds + nanoseconds
(1234 * US_TO_NS + 1, 1234, FLOOR),
(1234 * US_TO_NS + 1, 1235, CEILING),
(-1234 * US_TO_NS - 1, -1235, FLOOR),
(-1234 * US_TO_NS - 1, -1234, CEILING),
):
with self.subTest(nanoseconds=ns, milliseconds=ms, round=rnd):
self.assertEqual(PyTime_AsMicroseconds(ns, rnd), ms)
if __name__ == "__main__":
unittest.main()
|
Microvellum/Fluid-Designer
|
win64-vc/2.78/python/lib/test/test_time.py
|
Python
|
gpl-3.0
| 38,764
|
#!/usr/bin/env python
"""
Exports various methods used to initialize motion configuration. These methods
have been moved to this seperate module to reduce issues when the motion API
changes. All changes should be in just this module.
"""
import logger
from mutex_parsers import *
import os
log = logger.Logger('kmotion', logger.DEBUG)
class InitMotion:
def __init__(self, kmotion_dir):
self.kmotion_dir = kmotion_dir
self.kmotion_parser = mutex_kmotion_parser_rd(self.kmotion_dir)
self.www_parser = mutex_www_parser_rd(self.kmotion_dir)
self.images_dbase_dir = self.kmotion_parser.get('dirs', 'images_dbase_dir')
self.ramdisk_dir = self.kmotion_parser.get('dirs', 'ramdisk_dir')
self.feed_list = []
for section in self.www_parser.sections():
try:
if 'motion_feed' in section:
if self.www_parser.getboolean(section, 'feed_enabled'):
feed = int(section.replace('motion_feed', ''))
self.feed_list.append(feed)
except Exception:
log.exception('init error')
self.feed_list.sort()
def create_mask(self, feed):
"""
Create a motion PGM mask from 'mask_hex_string' for feed 'feed'. Save it
as ../core/masks/mask??.png.
args : kmotion_dir ... the 'root' directory of kmotion
feed ... the feed number
mask_hex_str ... the encoded mask hex string
excepts :
return : none
"""
mask_hex_str = self.www_parser.get('motion_feed%02i' % feed, 'feed_mask')
log.debug('create_mask() - mask hex string: %s' % mask_hex_str)
image_width = self.www_parser.getint('motion_feed%02i' % feed, 'feed_width')
image_height = self.www_parser.getint('motion_feed%02i' % feed, 'feed_height')
log.debug('create_mask() - width: %i height: %i' % (image_width, image_height))
black_px = '\x00'
white_px = '\xFF'
mask = ''
mask_hex_split = mask_hex_str.split('#')
px_yptr = 0
for y in range(15):
tmp_dec = int(mask_hex_split[y], 16)
px_xptr = 0
image_line = ''
for x in range(15, 0, -1):
px_mult = (image_width - px_xptr) / x
px_xptr += px_mult
bin_ = tmp_dec & 16384
tmp_dec <<= 1
if bin_ == 16384:
image_line += black_px * px_mult
else:
image_line += white_px * px_mult
px_mult = (image_height - px_yptr) / (15 - y)
px_yptr += px_mult
mask += image_line * px_mult
masks_dir = os.path.join(self.kmotion_dir, 'core/masks')
if not os.path.isdir(masks_dir):
os.makedirs(masks_dir)
with open(os.path.join(masks_dir, 'mask%0.2i.pgm' % feed), 'wb') as f_obj:
f_obj.write('P5\n')
f_obj.write('%i %i\n' % (image_width, image_height))
f_obj.write('255\n')
f_obj.write(mask)
log.debug('create_mask() - mask written')
def init_motion_out(self):
"""
Wipes the 'motion_output' file in preperation for new output.
args : kmotion_dir ... the 'root' directory of kmotion
excepts :
return : none
"""
motion_out = os.path.join(self.kmotion_dir, 'www/motion_out')
if os.path.isfile(motion_out):
os.remove(motion_out)
def gen_motion_configs(self):
"""
Generates the motion.conf and thread??.conf files from www_rc and virtual
motion conf files
args : kmotion_dir ... the 'root' directory of kmotion
excepts :
return : none
"""
motion_conf_dir = os.path.join(self.kmotion_dir, 'core/motion_conf')
if not os.path.isdir(motion_conf_dir):
os.makedirs(motion_conf_dir)
# delete all files in motion_conf
for del_file in [del_file for del_file in os.listdir(motion_conf_dir)
if os.path.isfile(os.path.join(motion_conf_dir, del_file))]:
os.remove(os.path.join(motion_conf_dir, del_file))
if len(self.feed_list) > 0: # only populate 'motion_conf' if valid feeds
self.gen_motion_conf()
self.gen_threads_conf()
def gen_motion_conf(self):
"""
Generates the motion.conf file from www_rc and the virtual motion conf files
args : kmotion_dir ... the 'root' directory of kmotion
feed_list ... a list of enabled feeds
ramdisk_dir ... the ramdisk directory
excepts :
return : none
"""
with open('%s/core/motion_conf/motion.conf' % self.kmotion_dir, 'w') as f_obj1:
print >> f_obj1, '''
# ------------------------------------------------------------------------------
# This config file has been automatically generated by kmotion
# Do __NOT__ modify it in any way.
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# 'default' section
# ------------------------------------------------------------------------------
quiet on
# ------------------------------------------------------------------------------
# 'user' section from 'virtual_motion_conf/motion.conf'
# ------------------------------------------------------------------------------
'''
# this is a user changable file so error trap
try:
with open('%s/virtual_motion_conf/motion.conf' % self.kmotion_dir, 'r') as f_obj2:
user_conf = f_obj2.read()
except IOError:
print >> f_obj1, '# virtual_motion_conf/motion.conf not readable - ignored'
log.exception('no motion.conf readable in virtual_motion_conf dir - none included in final motion.conf')
else:
print >> f_obj1, user_conf
print >> f_obj1, '''
# ------------------------------------------------------------------------------
# 'override' section
# ------------------------------------------------------------------------------
daemon off
control_port 8080
control_localhost on
'''
for feed in self.feed_list:
print >> f_obj1, 'thread %s/core/motion_conf/thread%02i.conf\n' % (self.kmotion_dir, feed)
def gen_threads_conf(self):
"""
Generates the thread??.conf files from www_rc and the virtual motion conf
files
args : kmotion_dir ... the 'root' directory of kmotion
feed_list ... a list of enabled feeds
ramdisk_dir ... the ram disk directory
images_dbase_dir ... the images dbase directory
excepts :
return : none
"""
for feed in self.feed_list:
with open('%s/core/motion_conf/thread%02i.conf' % (self.kmotion_dir, feed), 'w') as f_obj1:
print >> f_obj1, '''
# ------------------------------------------------------------------------------
# This config file has been automatically generated by kmotion
# Do __NOT__ modify it in any way.
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# 'default' section
# ------------------------------------------------------------------------------
gap 2
pre_capture 1
post_capture 10
quality 85
webcam_localhost off
'''
# pal or ntsc,
print >> f_obj1, 'norm 1'
# feed mask,
if self.www_parser.get('motion_feed%02i' % feed, 'feed_mask') != '0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#':
self.create_mask(feed)
print >> f_obj1, 'mask_file %s/core/masks/mask%0.2i.pgm' % (self.kmotion_dir, feed)
# framerate,
print >> f_obj1, 'framerate 3' # default for feed updates
print >> f_obj1, '''
# ------------------------------------------------------------------------------
# 'user' section from 'virtual_motion_conf/thread%02i.conf'
# ------------------------------------------------------------------------------
''' % feed
# this is a user changable file so error trap
try:
with open('%s/virtual_motion_conf/thread%02i.conf' % (self.kmotion_dir, feed)) as f_obj2:
user_conf = f_obj2.read()
except IOError:
print >> f_obj1, '# virtual_motion_conf/thread%02i.conf not readable - ignored' % feed
log.exception('no feed%02i.conf readable in virtual_motion_conf dir - none included in final motion.conf' % feed)
else:
print >> f_obj1, user_conf
print >> f_obj1, '''
# ------------------------------------------------------------------------------
# 'override' section
# ------------------------------------------------------------------------------
snapshot_interval 1
webcam_localhost off
'''
print >> f_obj1, 'target_dir %s' % self.ramdisk_dir
# device and input
feed_device = int(self.www_parser.get('motion_feed%02i' % feed, 'feed_device'))
if feed_device > -1: # /dev/video? device
print >> f_obj1, 'videodevice /dev/video%s' % feed_device
print >> f_obj1, 'input %s' % self.www_parser.get('motion_feed%02i' % feed, 'feed_input')
else: # netcam
print >> f_obj1, 'netcam_url %s' % self.www_parser.get('motion_feed%02i' % feed, 'feed_url')
print >> f_obj1, 'netcam_proxy %s' % self.www_parser.get('motion_feed%02i' % feed, 'feed_proxy')
print >> f_obj1, 'netcam_userpass %s:%s' % (
self.www_parser.get('motion_feed%02i' % feed, 'feed_lgn_name'),
self.www_parser.get('motion_feed%02i' % feed, 'feed_lgn_pw'))
print >> f_obj1, 'width %s' % self.www_parser.get('motion_feed%02i' % feed, 'feed_width')
print >> f_obj1, 'height %s' % self.www_parser.get('motion_feed%02i' % feed, 'feed_height')
print >> f_obj1, 'threshold %s' % self.www_parser.get('motion_feed%02i' % feed, 'feed_threshold')
print >> f_obj1, 'quality %s' % self.www_parser.get('motion_feed%02i' % feed, 'feed_quality')
# show motion box
if self.www_parser.getboolean('motion_feed%02i' % feed, 'feed_show_box'):
print >> f_obj1, 'locate on'
# always on for feed updates
print >> f_obj1, 'output_normal off'
print >> f_obj1, 'jpeg_filename %s/%%Y%%m%%d/%0.2i/snap/%%H%%M%%S' % (self.images_dbase_dir, feed)
print >> f_obj1, ''
print >> f_obj1, 'snapshot_filename %0.2i/%%Y%%m%%d%%H%%M%%S' % feed
print >> f_obj1, 'on_event_start %s/core/events.py %i start' % (self.kmotion_dir, feed)
print >> f_obj1, 'on_event_end %s/core/events.py %i end' % (self.kmotion_dir, feed)
print >> f_obj1, 'on_camera_lost %s/core/camera_lost.py %i' % (self.kmotion_dir, feed)
print >> f_obj1, 'on_picture_save %s/core/picture_save.py %%f' % (self.kmotion_dir)
# Module test code
if __name__ == '__main__':
print '\nModule self test ... generating motion.conf and threads\n'
kmotion_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
InitMotion(kmotion_dir).gen_motion_configs()
|
dvor85/kmotion
|
core/init_motion.py
|
Python
|
gpl-3.0
| 11,843
|
import pdb
import datetime
from .models import AnnounceType, Announce
from sched_core.sched_log import sched_log
from sched_core .const import DAY
from sched_core .config import TZ_LOCAL, local_time_str
from .config import AnnounceChannel, channel_name
from .config_func import func_announce
# When announcements are accepted (draft=True -> draft=False), process
# announcements. Meetup is currently the only channel requiring
# pre-processing when announcements are accepted.
def announce_gen(modeladmin, request, queryset):
# TODO: how to use modeladmin, request ??
# pdb.set_trace()
for event in queryset:
if event.draft or not event.planned:
# don't generate announcements for draft or unplanned events
print("announce_gen: skipped - draft or unplanned")
continue
event_type = event.event_type
date = event.date_time.astimezone(TZ_LOCAL).date()
announce_types = AnnounceType.objects.filter(event_type=event_type)
#
# get event event_type.group
group = event.group
# get coordinator of event_type.group
# owner = UserPermission.objects.filter(coordinator=ev.group)[0].user
for announce_type in announce_types:
a = Announce(event_type = event_type,
event = event,
announce_type = announce_type,
channel = announce_type.channel,
send = announce_type.send,
is_preface = announce_type.is_preface,
use_header = announce_type.use_header,
lead_title = announce_type.lead_title,
# publish_later = announce_type.publish_later,
# allow_change = announce_type.allow_later,
notes = announce_type.notes,
question = announce_type.question,
rsvp_limit = announce_type.rsvp_limit,
# group = announce_type.group,
date = date - DAY*announce_type.days_offset,
draft = True)
a.save()
def send_post(modeladmin, request, queryset):
# pdb.set_trace()
for channel, announces in classify_channels(queryset).items():
try:
#func = func_post[channel]
func = func_announce[channel]
except:
sched_log.error('no announce post function for channel {}'.
format(channel))
continue
func.post(channel, announces)
def send_update(modeladmin, request, queryset):
for channel, announces in classify_channels(queryset).items():
try:
#func = func_update[channel]
func = func_announce[channel]
except:
sched_log.error('no announce update function for channel {}'.
format(channel))
continue
func.update(channel, announces)
message = '''\
channel : {}
event : {}
date : {}
location: {}'''
def send_announce(modeladmin, request, queryset):
for channel, announces in classify_channels(queryset).items():
try:
func = func_announce[channel]
except:
sched_log.error('no announce announce function for channel {}'.
format(channel))
continue
func.announce(channel, announces)
def send_cancel(modeladmin, request, queryset):
pdb.set_trace()
# TODO: need way to send separately to meetup and others
# meetup.cancel(queryset)
for channel, announces in classify_channels(queryset).items():
try:
func = func_announce[channel]
except:
sched_log.error('no announce announce function for channel {}'.
format(channel))
continue
func.cancel(channel, announces)
def send_delete(modeladmin, request, queryset):
# TODO: need way to send separately to meetup and others
pass
# meetup.delete(queryset)
def classify_channels(queryset):
'''
Construct dictionary by announcements by channel
'''
channel_dict = {}
for an in queryset:
channel = an.channel
if channel in channel_dict:
channel_dict[channel].append(an)
else:
channel_dict[channel] = [an]
return channel_dict
from sched_ev.models import Event
GCAL_TEST = True
# TODO: caller needs to trap on no 'announce_type" found
def gen_cal(announce_type, start, end):
if announce_type.location:
events = Event.objects.filter(date_time__gte=start, date_time__lte=end,
location=announce_type.location, planned=True) \
.order_by('date_time')
elif announce_type.category:
events = Event.objects.filter(date_time__gte=start, date_time__lte=end,
category=announce_type.category, planned=True) \
.order_by('date_time')
else:
# TODO: display error message
events = None
# elif announce_type.category:
# events = Event.objects.filter(date_time__gte=start, date_time__lte=end, category=announce_type.category)
for event in events:
if GCAL_TEST:
print('{:30}: {:30} - {}'.format(channel_name[announce_type.channel],
event.name(), local_time_str(event.date_time)))
else:
gcal_insert(event, announce_type.channel)
|
sjaa/scheduler
|
sched_announce/gen.py
|
Python
|
gpl-3.0
| 5,734
|
# tPythonStep.py: Test python DPPP class
# Copyright (C) 2015
# ASTRON (Netherlands Institute for Radio Astronomy)
# P.O.Box 2, 7990 AA Dwingeloo, The Netherlands
#
# This file is part of the LOFAR software suite.
# The LOFAR software suite is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The LOFAR software suite is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with the LOFAR software suite. If not, see <http://www.gnu.org/licenses/>.
#
# $Id: __init__.py 23074 2012-12-03 07:51:29Z diepen $
from lofar.pythondppp import DPStep
from lofar.parameterset import parameterset
class tPythonStep(DPStep):
def __init__(self, parsetDict):
# The constructor gets the subset of the NDPPP parset containing
# all keys-value pairs for this step.
# Note: the superclass constructor MUST be called.
DPStep.__init__(self, parsetDict)
parset = parameterset(parsetDict)
self.itsIncr = parset.getDouble('incr', 1)
def updateInfo(self, dpinfo):
# This function must be implemented.
self.itsInfo = dpinfo
# Make the arrays that will get the input buffer data from
# the getData, etc. calls in the process function.
self.itsData = self.makeArrayDataIn()
self.itsFlags = self.makeArrayFlagsIn()
self.itsWeights = self.makeArrayWeightsIn()
self.itsUVW = self.makeArrayUVWIn()
# Return the dict with info fields that change in this step.
return {};
def process(self, time, exposure):
# This function must be implemented.
# First get the data arrays needed by this step.
self.getData (self.itsData);
self.getFlags (self.itsFlags);
self.getWeights (self.itsWeights);
self.getUVW (self.itsUVW);
# Process the data.
print "process tPythonStep", time-4.47203e9, exposure, self.itsData.sum(), self.itsFlags.sum(), self.itsWeights.sum(), self.itsUVW.sum()
# Execute the next step in the DPPP pipeline. TIME,UVW are changed.
return self.processNext ({'TIME': time+self.itsIncr, 'UVW': self.itsUVW+self.itsIncr})
def finish(self):
# Finish the step as needed.
# This function does not need to be implemented.
# Note: finish of the next step is called by the C++ layer.
print "finish tPythonStep"
def showCounts(self):
# Show the counts of this test.
# This function does not need to be implemented.
return " **showcounttest**"
def addToMS(self, msname):
# Add some info the the output MeasurementSet.
# This function does not need to be implemented.
print "addToMS tPythonStep", msname
|
jjdmol/LOFAR
|
CEP/DP3/PythonDPPP/test/tPythonStep.py
|
Python
|
gpl-3.0
| 3,123
|
import csv
import sys
from phyltr.commands.base import PhyltrCommand
from phyltr.plumbing.sinks import NullSink
from phyltr.utils.misc import dicts_from_csv
class Annotate(PhyltrCommand):
"""Annotate a the trees in a tree stream with information from a file"""
__options__ = [
(
('-e', '--extract'),
dict(
default=False,
action="store_true",
help="Build a CSV file of information from a treestream, i.e. reverse the"
"standard behaviour")),
(
('-f', '--file'),
dict(dest="filename", help="File to read/write annotation data from/to.")),
(
('-k', '--key'),
dict(help="Fieldname which corresponds to tree taxon names, used to link lines of the "
"csv file to tree nodes.")),
(
('-m', '--multiple'),
dict(
default=False, action="store_true",
help="If set, when --extract is used, information from each tree in the treestream "
"will be added to the file, with a `tree_number` column used to disambiguate. "
"When not set, information is extracted only from the first tree in the "
"treestream.")),
]
def __init__(self, **kw):
PhyltrCommand.__init__(self, **kw)
self.annotations = {}
if self.opts.extract and (self.opts.filename == "-" or not self.opts.filename):
# If we're writing an extracted CSV to stdin, we don't want to also
# serialise the trees, so plumb to null
self.sink = NullSink
if not self.opts.extract:
self.read_annotation_file()
def process_tree(self, t, n):
if self.opts.extract:
# Break out of consume if we've done one
if not self.opts.multiple and n > 1:
raise StopIteration
self.extract_annotations(t, n)
else:
self.annotate_tree(t)
return t
def read_annotation_file(self):
for row in dicts_from_csv(self.opts.filename):
this_key = row.pop(self.opts.key)
self.annotations[this_key] = row
def annotate_tree(self, t):
for node in t.traverse():
if node.name in self.annotations:
for key, value in self.annotations[node.name].items():
node.add_feature(key, value)
def extract_annotations(self, t, n):
if self.opts.filename == "-" or not self.opts.filename:
fp = sys.stdout # pragma: no cover
else:
fp = open(self.opts.filename, "a" if n > 1 else "w")
features = []
for node in t.traverse():
for f in node.features:
if f not in ["dist", "support", "name"] and f not in features:
features.append(f)
features.sort()
fieldnames = ["name"]
if self.opts.multiple:
fieldnames.append("tree_number")
fieldnames.extend(features)
writer = csv.DictWriter(fp, fieldnames=fieldnames)
if n == 1:
writer.writeheader()
for node in t.traverse():
# Only include the root node or nodes with names
if not node.name and node.up:
continue
if any([hasattr(node, f) for f in features]):
if not node.name:
# Temporarily give the node a name
node.name = "root"
fix_root_name = True
else:
fix_root_name = False
rowdict = {f: getattr(node, f, "?") for f in fieldnames}
if self.opts.multiple:
rowdict["tree_number"] = n
writer.writerow(rowdict)
if fix_root_name:
node.name = None
if self.opts.filename and self.opts.filename != "-":
fp.close()
|
lmaurits/phyltr
|
src/phyltr/commands/annotate.py
|
Python
|
gpl-3.0
| 4,014
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2018 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Partial comparison of dicts/lists."""
import re
import pprint
import os.path
import contextlib
import pytest
from qutebrowser.utils import qtutils
qt58 = pytest.mark.skipif(
qtutils.version_check('5.9'), reason="Needs Qt 5.8 or earlier")
qt59 = pytest.mark.skipif(
not qtutils.version_check('5.9'), reason="Needs Qt 5.9 or newer")
qt510 = pytest.mark.skipif(
not qtutils.version_check('5.10'), reason="Needs Qt 5.10 or newer")
skip_qt511 = pytest.mark.skipif(
qtutils.version_check('5.11'), reason="Needs Qt 5.10 or earlier")
class PartialCompareOutcome:
"""Storage for a partial_compare error.
Evaluates to False if an error was found.
Attributes:
error: A string describing an error or None.
"""
def __init__(self, error=None):
self.error = error
def __bool__(self):
return self.error is None
def __repr__(self):
return 'PartialCompareOutcome(error={!r})'.format(self.error)
def __str__(self):
return 'true' if self.error is None else 'false'
def print_i(text, indent, error=False):
if error:
text = '| ****** {} ******'.format(text)
for line in text.splitlines():
print('| ' * indent + line)
def _partial_compare_dict(val1, val2, *, indent):
for key in val2:
if key not in val1:
outcome = PartialCompareOutcome(
"Key {!r} is in second dict but not in first!".format(key))
print_i(outcome.error, indent, error=True)
return outcome
outcome = partial_compare(val1[key], val2[key], indent=indent + 1)
if not outcome:
return outcome
return PartialCompareOutcome()
def _partial_compare_list(val1, val2, *, indent):
if len(val1) < len(val2):
outcome = PartialCompareOutcome(
"Second list is longer than first list")
print_i(outcome.error, indent, error=True)
return outcome
for item1, item2 in zip(val1, val2):
outcome = partial_compare(item1, item2, indent=indent + 1)
if not outcome:
return outcome
return PartialCompareOutcome()
def _partial_compare_float(val1, val2, *, indent):
if val1 == pytest.approx(val2):
return PartialCompareOutcome()
return PartialCompareOutcome("{!r} != {!r} (float comparison)".format(
val1, val2))
def _partial_compare_str(val1, val2, *, indent):
if pattern_match(pattern=val2, value=val1):
return PartialCompareOutcome()
return PartialCompareOutcome("{!r} != {!r} (pattern matching)".format(
val1, val2))
def _partial_compare_eq(val1, val2, *, indent):
if val1 == val2:
return PartialCompareOutcome()
return PartialCompareOutcome("{!r} != {!r}".format(val1, val2))
def partial_compare(val1, val2, *, indent=0):
"""Do a partial comparison between the given values.
For dicts, keys in val2 are checked, others are ignored.
For lists, entries at the positions in val2 are checked, others ignored.
For other values, == is used.
This happens recursively.
"""
print_i("Comparing", indent)
print_i(pprint.pformat(val1), indent + 1)
print_i("|---- to ----", indent)
print_i(pprint.pformat(val2), indent + 1)
if val2 is Ellipsis:
print_i("Ignoring ellipsis comparison", indent, error=True)
return PartialCompareOutcome()
elif type(val1) != type(val2): # pylint: disable=unidiomatic-typecheck
outcome = PartialCompareOutcome(
"Different types ({}, {}) -> False".format(type(val1).__name__,
type(val2).__name__))
print_i(outcome.error, indent, error=True)
return outcome
handlers = {
dict: _partial_compare_dict,
list: _partial_compare_list,
float: _partial_compare_float,
str: _partial_compare_str,
}
for typ, handler in handlers.items():
if isinstance(val2, typ):
print_i("|======= Comparing as {}".format(typ.__name__), indent)
outcome = handler(val1, val2, indent=indent)
break
else:
print_i("|======= Comparing via ==", indent)
outcome = _partial_compare_eq(val1, val2, indent=indent)
print_i("---> {}".format(outcome), indent)
return outcome
def pattern_match(*, pattern, value):
"""Do fnmatch.fnmatchcase like matching, but only with * active.
Return:
True on a match, False otherwise.
"""
re_pattern = '.*'.join(re.escape(part) for part in pattern.split('*'))
return re.fullmatch(re_pattern, value, flags=re.DOTALL) is not None
def abs_datapath():
"""Get the absolute path to the end2end data directory."""
file_abs = os.path.abspath(os.path.dirname(__file__))
return os.path.join(file_abs, '..', 'end2end', 'data')
@contextlib.contextmanager
def nop_contextmanager():
yield
|
V155/qutebrowser
|
tests/helpers/utils.py
|
Python
|
gpl-3.0
| 5,712
|
import os
from flask import Flask, render_template
from flask_sqlalchemy import SQLAlchemy
from MyConfig import MyConfig
from Utilities import debug_msg
class Initialization:
class __Initialization:
app = None
db = None
config = MyConfig()
def global_create_app(self, name, db_path):
basedir = os.path.abspath(os.path.dirname(__file__))
if (name == None):
debug_msg("Warning: no app name!")
name = '__main__'
app = Flask(name)
if db_path is None:
database_path = 'sqlite:///' + \
os.path.join(
basedir, self.config.getConfig('database_path'))
else:
database_path = 'sqlite:///' + \
os.path.join(
basedir, db_path)
debug_msg("----database_path---: " + database_path)
app.config['SQLALCHEMY_DATABASE_URI'] = database_path
debug_msg(app.config['SQLALCHEMY_DATABASE_URI'])
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
self.app = app
return app
def global_create_db(self, app):
db = SQLAlchemy(app)
self.db = db
return db
def __init__(self):
pass
def get_global_app(self):
return self.app
def get_global_db(self):
return self.db
# for singleton
instance = None
def __init__(self):
if not Initialization.instance:
Initialization.instance = Initialization.__Initialization()
def __getattr__(self, name):
return getattr(self.instance, name)
|
pipicold/BlockyTime
|
server_new/Initialization.py
|
Python
|
gpl-3.0
| 1,720
|
from driverpower import __version__
from setuptools import setup, find_packages
setup(
name='DriverPower',
version=__version__,
author='Shimin Shuai',
author_email='sshuai@oicr.on.ca',
url='https://github.com/smshuai/DriverPower',
license='GPL',
python_requires='>= 3.5.2',
packages=find_packages(),
description='Combined burden and functional test for coding and noncoding cancer drivers',
py_modules=['driverpower'],
include_package_data=True,
install_requires=[
'numpy >= 1.13.0',
'scipy >= 0.18.1',
'pandas >= 0.18.1',
'scikit-learn == 0.19.2',
'statsmodels >= 0.6.1',
'xgboost >= 0.6a',
'pybedtools >= 0.7.10',
'tables >= 3.4.4',
],
entry_points = {
'console_scripts': [
'driverpower=driverpower.interface:main',
],
},
)
|
smshuai/DriverPower
|
setup.py
|
Python
|
gpl-3.0
| 880
|
# ===========================================================================
# Copyright 2013 University of Limerick
#
# This file is part of DREAM.
#
# DREAM is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DREAM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DREAM. If not, see <http://www.gnu.org/licenses/>.
# ===========================================================================
'''
Created on 4 Feb 2014
@author: George
'''
'''
inherits from MachineJobShop. The difference is that it reads the operator from the Entity and
checks if he is available before it takes it
'''
from SimPy.Simulation import Process, Resource, activate, now
from OperatedPoolBroker import Broker
from OperatorPool import OperatorPool
from OperatorRouter import Router
from MachineJobShop import MachineJobShop
# ===========================================================================
# the MachineManagedJob object
# ===========================================================================
class MachineManagedJob(MachineJobShop):
# =======================================================================
# initialise the MachineManagedJob
# =======================================================================
def initialize(self):
MachineJobShop.initialize(self)
self.type="MachineManagedJob"
#create an empty Operator Pool. This will be updated by canAcceptAndIsRequested
id = self.id+'_OP'
name=self.objName+'_operatorPool'
self.operatorPool=OperatorPool(id, name, operatorsList=[])
self.operatorPool.initialize()
self.operatorPool.operators=[]
#create a Broker
self.broker = Broker(self)
activate(self.broker,self.broker.run())
#create a Router
from Globals import G
if len(G.RoutersList)==0:
self.router=Router()
activate(self.router,self.router.run())
G.RoutersList.append(self.router)
# otherwise set the already existing router as the machines Router
else:
self.router=G.RoutersList[0]
# holds the Entity that is to be obtained and will be updated by canAcceptAndIsRequested
self.entityToGet=None
# =======================================================================
# checks if the Queue can accept an entity
# it checks also the next station of the Entity
# and returns true only if the active object is the next station
# =======================================================================
def canAccept(self, callerObject=None):
activeObject=self.getActiveObject()
activeObjectQueue=activeObject.getActiveObjectQueue()
thecaller=callerObject
if (thecaller!=None):
#check it the caller object holds an Entity that requests for current object
if len(thecaller.getActiveObjectQueue())>0:
# TODO: make sure that the first entity of the callerObject is to be disposed
activeEntity=thecaller.getActiveObjectQueue()[0]
# if the machine's Id is in the list of the entity's next stations
if activeObject.id in activeEntity.remainingRoute[0].get('stationIdsList',[]):
#return according to the state of the Queue
return len(activeObject.getActiveObjectQueue())<activeObject.capacity\
and activeObject.Up
return False
# =======================================================================
# checks if the Machine can accept an entity and there is an entity in
# some possible giver waiting for it
# also updates the giver to the one that is to be taken
# =======================================================================
def canAcceptAndIsRequested(self):
# get active and giver objects
activeObject=self.getActiveObject()
activeObjectQueue=self.getActiveObjectQueue()
giverObject=self.getGiverObject()
# dummy variables that help prioritise the objects requesting to give objects to the Machine (activeObject)
isRequested=False # is requested is dummyVariable checking if it is requested to accept an item
maxTimeWaiting=0 # dummy variable counting the time a predecessor is blocked
# loop through the possible givers to see which have to dispose and which is the one blocked for longer
for object in activeObject.previous:
if(object.haveToDispose(activeObject) and object.receiver==self):# and not object.exitIsAssigned()):
isRequested=True # if the possible giver has entities to dispose of
if(object.downTimeInTryingToReleaseCurrentEntity>0):# and the possible giver has been down while trying to give away the Entity
timeWaiting=now()-object.timeLastFailureEnded # the timeWaiting dummy variable counts the time end of the last failure of the giver object
else:
timeWaiting=now()-object.timeLastEntityEnded # in any other case, it holds the time since the end of the Entity processing
#if more than one possible givers have to dispose take the part from the one that is blocked longer
if(timeWaiting>=maxTimeWaiting):
activeObject.giver=object # set the giver
maxTimeWaiting=timeWaiting
if (activeObject.operatorPool!='None' and (any(type=='Load' for type in activeObject.multOperationTypeList)\
or any(type=='Setup' for type in activeObject.multOperationTypeList))):
if isRequested:
# TODO: check whether this entity is the one to be hand in
# to be used in operatorPreemptive
activeObject.requestingEntity=activeObject.giver.getActiveObjectQueue()[0]
# TODO: update the object requesting the operator
activeObject.operatorPool.requestingObject=activeObject.giver
# TODO: update the last object calling the operatorPool
activeObject.operatorPool.receivingObject=activeObject
if activeObject.Up and len(activeObjectQueue)<activeObject.capacity\
and self.checkOperator()\
and not activeObject.giver.exitIsAssigned():
activeObject.giver.assignExit()
# if the activeObject is not in manager's activeCallersList of the entityToGet
if self not in activeObject.giver.getActiveObjectQueue()[0].manager.activeCallersList:
# append it to the activeCallerList of the manager of the entity to be received
activeObject.giver.getActiveObjectQueue()[0].manager.activeCallersList.append(self)
# update entityToGet
self.entityToGet=activeObject.giver.getActiveObjectQueue()[0]
#make the operators List so that it holds only the manager of the current order
activeObject.operatorPool.operators=[activeObject.giver.getActiveObjectQueue()[0].manager]
# # set the variable operatorAssignedTo to activeObject, the operator is then blocked
# activeObject.operatorPool.operators[0].operatorAssignedTo=activeObject
# # TESTING
# print now(), activeObject.operatorPool.operators[0].objName, 'got assigned to', activeObject.id
# read the load time of the machine
self.readLoadTime()
return True
else:
return False
else:
# the operator doesn't have to be present for the loading of the machine as the load operation
# is not assigned to operators
if activeObject.Up and len(activeObjectQueue)<activeObject.capacity and isRequested:
# update entityToGet
self.entityToGet=self.giver.getActiveObjectQueue()[0]
return activeObject.Up and len(activeObjectQueue)<activeObject.capacity and isRequested
# =======================================================================
# to be called by canAcceptAndIsRequested and check for the operator
# =======================================================================
def checkOperator(self):
if self.giver.getActiveObjectQueue()[0].manager:
manager=self.giver.getActiveObjectQueue()[0].manager
# print ''
# print 'Entity',self.giver.getActiveObjectQueue()[0].id
# print 'manager',manager.id
return manager.checkIfResourceIsAvailable()
else:
return True
# =======================================================================
# identifies the Entity to be obtained so that
# getEntity gives it to removeEntity as argument
# =======================================================================
def identifyEntityToGet(self):
# ToDecide
# maybe we should work this way in all CoreObjects???
return self.entityToGet
# =======================================================================
# checks if the object is ready to receive an Entity
# =======================================================================
def isReadyToGet(self):
# check if the entity that is about to be obtained has a manager (this should be true for this object)
if self.entityToGet.manager:
manager=self.entityToGet.manager
if len(manager.activeCallersList)>0:
manager.sortEntities() # sort the callers of the manager to be used for scheduling rules
# return true if the manager is available
return manager.checkIfResourceIsAvailable()
else:
return True
# =======================================================================
# prepare the machine to be released
# =======================================================================
def releaseOperator(self):
self.outputTrace(self.currentOperator.objName, "released from "+ self.objName)
# # TESTING
# print now(), self.id, 'will release operator', self.operatorPool.operators[0].objName
# set the flag operatorAssignedTo to None
self.operatorPool.operators[0].operatorAssignedTo=None
self.broker.invokeBroker()
self.toBeOperated = False
|
mmariani/dream
|
dream/simulation/MachineManagedJob.py
|
Python
|
gpl-3.0
| 11,355
|
'''Use tap function to build data in loop'''
'''f='/mnt/nfs/comeg2/0611/tap/01%01%01@01:01/1/c,rfDC'
ch=channel.index(f, 'ext')
t=tapbuild.get(f,f)
t.avg(700, ch)'''
# Copyright 2008 Dan Collins
#
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# And is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Build; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import time
from numpy import shape, average, array, append, any, zeros, arange
from pdf2py import tap, trigdet,contourbuild, powerspecbuild, pdf
from meg import offset
import pylab
class config:
def __init__(self, datapdf, datatemplate):
self.datapiece=tap.chunk(datapdf); print 'init datafile' #initialize datafile
self.datapiece.setparams(datatemplate, trig='TRIGGER') ; print 'init parameters'#initialize parameters
self.pdfobj=pdf.read(datatemplate)
class get(config):
def avg(self, epochwidth, channelinstance, contour=None, butterfly=None, csd=None, sp=None):
'''average data'''
'''epochwidth is the number of samples/epoch'''
def calldata(channelinstance):#, fidpos):
self.datapiece.getchunk(channelinstance)#, fidpos);
try:
self.datapiece.data_blockall
except AttributeError:
return False #no data opened
else: #opened data
print 'shape if incoming data', shape(self.datapiece.data_blockall)
print 'fid-tell',self.datapiece.fid.tell()
#print 'first trigger ',self.datapiece.trigind.trigstartind[0]
return True
def parsetrig():
try:
self.datapiece.trigind.trigstartind[0]
except IndexError:
return False #no data opened
except AttributeError:
return False
else:
return True
def checkbuffer():
try: self.bufferdata
except AttributeError:
print 'nothing buffered'
#self.bufferdata=self.datapiece.data_blockall
return False
else: #something buffered, prepend to incomming data
return True
def prepend():
print 'prepending. New shape should be ',shape(self.datapiece.data_blockall), '+', shape(self.bufferdata)
self.datapiece.data_blockall=append(self.bufferdata, self.datapiece.data_blockall, axis=0) #prepended data
try:
self.datapiece.trigind.trigstartind
except AttributeError:
pass
#self.bufferedtrig=self.bufferedtrig
else:
if parsetrig()==False: #no trig
print 'no trigger instance'
print self.bufferedtrig
self.bufferedtrig=self.bufferedtrig-self.bufferedtrig[0]
else:
#print 'trig instance',self.datapiece.trigind.trigstartind
#print 'buffered trigs',self.bufferedtrig
self.datapiece.trigind.trigstartind=self.datapiece.trigind.trigstartind+shape(self.bufferdata)[0] #change indexing to add space from buffer
self.bufferedtrig=array([self.bufferedtrig-self.bufferedtrig[0]])
self.datapiece.trigind.trigstartind=append(self.bufferedtrig, self.datapiece.trigind.trigstartind)
#print 'new indexing',self.datapiece.trigind.trigstartind
#clear buffer
#print 'actual shape ', shape(self.datapiece.data_blockall)
#print 'new trigs',self.datapiece.trigind.trigstartind
del self.bufferdata, self.bufferedtrig
def avgdata():
print self.datapiece.trigind.trigstartind
for trig in self.datapiece.trigind.trigstartind: #do something for each trigger found
if self.datapiece.chunk - trig >= epochwidth: #enough room for a single epoch
epochdata=(self.datapiece.data_blockall[trig:trig+epochwidth])
#print 'shape of avg data ',shape(epochdata)
try:
print 'checking for avg data'
self.data_avg
except AttributeError:
print 'starting first epoch'
self.data_avg = epochdata #create first epoch
#self.data_avg=data_avg
self.avgtimes=0
self.data3d=epochdata
self.data_avg=offset.correct((self.data_avg+epochdata))
#print 'shape of avg',self.data_avg
#print 'averaging', shape(self.data_avg)
self.avgtimes=self.avgtimes+1
#print 'avgtimes', self.avgtimes
self.lastepoch=offset.correct(epochdata)
#self.data3d=append(self.data3d,self.lastepoch)
print self.avgtimes,shape(epochdata)[0], shape(epochdata)[1]
#self.data3d=offset.correct(self.data3d.reshape(self.avgtimes+1, shape(epochdata)[0], shape(epochdata)[1]))
else: #not enough room for an epoch
print 'not enough room for an epoch'
try:
self.bufferedtrig
except AttributeError:
self.bufferedtrig=array([trig])
else:
self.bufferedtrig=append(self.bufferedtrig, trig)
trig2end=shape(self.datapiece.data_blockall)[0] - trig #get size of from trigger ind to end of dataslice
#print trig2end, 'buffered'
try:
self.bufferdata #look for buffer var
except AttributeError:
#bufferdata = array([1,shape(self.datapiece.data_blockall)[1]]) #create empty buffer
self.bufferdata=self.datapiece.data_blockall[trig:trig+trig2end,:] #create first instance of buffer
#print 'shape of first buffer data ',shape(self.bufferdata)
else:
#print shape(self.datapiece.data_blockall)
self.bufferdata=append(self.bufferdata, self.datapiece.data_blockall[trig:trig+trig2end,:], axis=0) #keep appending until enough room
#print 'shape of buffer data ',shape(self.bufferdata)
def clean():
try:
self.datapiece.data_blockall
except AttributeError:
pass
else:
print 'deleting'
del self.datapiece.data_blockall
del self.datapiece.trigind.trigstartind
x=0;y=0
numofloops=200
#someval=200230
#inc=arange(numofloops*someval)
#fidpos=[2000000]
#fidpos=[140617600]
#fidpos=[1000230, 2000000, 4000000, 8000000, 10000000, 11000000, 20000000] #4 debugging
pylab.ion()
pylab.figure()
while x < numofloops: #loops
print x
y=y+1
if calldata(channelinstance):#, fidpos[0]*y)==True: #read chunk
print 'data read'
## print self.datapiece.trigind.trigstartind
## if shape(self.datapiece.trigind.trigstartind)[0]==0: #no trigger detected...
## print 'no new trig detected'
if parsetrig()==True: #trigs found
print 'trigs found'
else:
print 'no trigs found'
time.sleep(5)
if checkbuffer()==True:
print 'something buffered'
prepend(); print 'prepending data'
print 'averaging data'
avgdata()
#print 'exiting';return
print 'plotting'
pylab.subplot(331)
pylab.cla()
if contour != None:
contourbuild.display(self.data_avg[contour,:], channelinstance.chanlocs)
#p.draw()
#pylab.subplot(212)
#butterfly=None, csd=None, sp=None
if csd != None:
powerspecbuild.calc(self.data_avg, self.pdfobj)
#pylab.show()
## #buffer() #call buffer function to either prepend existing data to incoming, or create buffer out of new piece.
## shape(self.bufferdata); print 'shape of buffer'
else:
print 'no data read'
x=x+1
print x
#print 'fid-tell',self.datapiece.fid.tell()
clean(); print 'cleaning'
pylab.ioff()
pylab.show()
#dataall=
def cont(self):
pass
'''continious build"
datapiece.chunk();dataall=
from pdf2py import tap, channel, tapbuild
template='/opt/msw/data/sam_data0/00000/BuildAVG/03%24%09@15:39/1/e,rfDC'
acqfile='/opt/msw/data/sam_data0/00000/BuildAVG/03%24%09@15:39/2/e,rfDC'
tapped = tapbuild.get(acqfile,template)
epochwidth=20
ch=channel.index(template, 'meg')
tapped.avg(epochwidth, ch)
'''
|
badbytes/pymeg
|
pdf2py/tapbuild.py
|
Python
|
gpl-3.0
| 9,756
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-18 02:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('overseas', '0015_auto_20160918_0011'),
]
operations = [
migrations.AlterField(
model_name='cdn',
name='cdn_name',
field=models.CharField(max_length=100, unique=True),
),
migrations.AlterField(
model_name='tan14user',
name='login_email',
field=models.CharField(max_length=100, unique=True),
),
]
|
nevermoreluo/privateoverseas
|
overseas/migrations/0016_auto_20160918_1038.py
|
Python
|
gpl-3.0
| 638
|
get_ipython().magic("run 'symbols.py'")
df = nm('/home/naumov/w/ogs/build/bin/ogs')
count_equal(df)
cost_by_count_and_size(df)
libs = nm('/home/naumov/w/ogs/build/lib/libApplicationsLib.a')
lib_files=['/home/naumov/w/ogs/build/lib/libApplicationsLib.a', '/home/naumov/w/ogs/build/lib/libAssemblerLib.a', '/home/naumov/w/ogs/build/lib/libBaseLib.a', '/home/naumov/w/ogs/build/lib/libFileIO.a', '/home/naumov/w/ogs/build/lib/libGeoLib.a', '/home/naumov/w/ogs/build/lib/libInSituLib.a', '/home/naumov/w/ogs/build/lib/libMathLib.a', '/home/naumov/w/ogs/build/lib/libMeshGeoToolsLib.a', '/home/naumov/w/ogs/build/lib/libMeshLib.a', '/home/naumov/w/ogs/build/lib/libNumLib.a', '/home/naumov/w/ogs/build/lib/libProcessLib.a']
libs = pd.DataFrame()
for f in lib_files:
if libs.empty:
libs = nm(f)
else:
a = nm(f)
print(len(libs))
libs=libs.append(a)
print(len(libs))
print(len(a))
count_equal(libs)
cost_by_count_and_size(libs)
libs.sort('cost').tail(100)
|
endJunction/CppDevTools
|
object_analysis/tmp.py
|
Python
|
gpl-3.0
| 1,006
|
"""
# This is free software; you can redistribute it and/or modify it under the #
# terms of the GNU General Public License as published by the Free Software #
# Foundation; either version 3 of the License, or any later version. #
# #
# This sofware is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A #
# PARTICULAR PURPOSE. See the GNU General Public License for more details. #
# You should have received a copy of the GNU General Public licenses #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
"""
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from pyvirtualdisplay import Display
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from datetime import datetime
import unicodedata
import platform
import smtplib
import getpass
import os
import sys
import time
import random
def getUserInfo():
GATORLINK_ID = str(raw_input('GatorLink Username: '))
PASSWORD = str(getpass.getpass('Password:'))
TERM = str(raw_input('Term (spring, summer, fall): '))
COURSE = str(raw_input('Course (e.g. ENC1101): '))
print('\nIf you want to be notified via text message when a course opens up')
print('please input your phone number and carrier as shown below, including the comma')
print(' (e.g. 354-543-1234,att)\n')
print('The supported carries are: att, tmobile, sprint, verizon, metropcs\n')
print(' Written as shown above.\n')
print('If left blank you will be sent an email to your uf-email account.\n')
CELL_INFO = str(raw_input('Cell phone number and carrier (cell-phone-number,carrier): '))
global CELL_PROVIDED
if not CELL_INFO:
CELL_PROVIDED = False
return (GATORLINK_ID, PASSWORD, TERM, COURSE)
else:
CELL_PROVIDED = True
CELL = CELL_INFO.split(',')[0]
if ' ' in CELL:
CELL = CELL.replace(' ','')
if('-' in CELL):
CELL = CELL.replace('-','')
if('(' in CELL):
CELL = CELL.replace('(', '')
if(')' in CELL):
CELL = CELL.replace(')', '')
CARRIER = CELL_INFO.split(',')[1]
return (GATORLINK_ID, PASSWORD, TERM, COURSE, (CELL,CARRIER))
def sendText(user_info):
tmo_list = ('tmobile', 't-mobile')
metro_list = ('metropcs', 'metro-pcs')
att_list = ('att', 'at&t')
carrier = ((str(user_info[4][1])).lower()).strip()
if carrier in att_list:
to = str(user_info[4][0])+'@txt.att.net'
elif carrier in tmo_list:
to = str(user_info[4][0])+'@tmomail.net'
elif carrier == 'sprint':
to = str(user_info[4][0])+'@messaging.sprintpcs.com'
elif carrier == 'verizon':
to = str(user_info[4][0])+'@vtext.com'
elif carrier in metro_list:
to = str(user_info[4][0])+'@mymetropcs.com'
else:
print('Carrier not supported. An email will be sent your uf-email account.')
return sendEmail(user_info)
print('A spot opened up for your class!')
print('Sending notification text message.')
office365_user = str(user_info[0])+'@ufl.edu'
office365_pwd = str(user_info[1])
#Set up server to send text message when course is found
smtpserver = smtplib.SMTP("smtp.office365.com",587)
smtpserver.ehlo()
smtpserver.starttls()
smtpserver.ehlo()
smtpserver.login(office365_user,office365_pwd)
msg = """
\nA spot opened up for your course (%s) \nLogin to ISIS and register before someone else takes it!
""" %user_info[3]
smtpserver.sendmail(office365_user, to, msg)
smtpserver.close()
def sendEmail(user_info):
print('A spot opened up for your class!')
print('Sending notification email.')
to = str(user_info[0])+'@ufl.edu'
office365_user = str(user_info[0])+'@ufl.edu'
office365_pwd = str(user_info[1])
#Set up server to send text message when course is found
smtpserver = smtplib.SMTP("smtp.office365.com",587)
smtpserver.ehlo()
smtpserver.starttls()
smtpserver.ehlo()
smtpserver.login(office365_user,office365_pwd)
msg = """
\nA spot opened up for your course (%s) \nLogin to ISIS and register before someone else takes it!
""" %user_info[3]
smtpserver.sendmail(office365_user, to, msg)
smtpserver.close()
def navigate(user_info):
#getting the current day
CURRENT_DAY = ((((str(datetime.now())).split('-'))[2]).split(' '))[0]
#if three days have passed and no course was found. Exit the program
if abs(int(CURRENT_DAY) - int(START_DAY)) >= 3:
print('3 days have passed. Course was not found.')
print('Program will exit now. Goodbye')
quit()
#Creating webdriver object to run on Firefox browser binaries.
if platform.system() == 'Darwin':
path_to_firefox = os.path.join(os.path.expanduser('~'),'Applications','Firefox.app','Contents','MacOS','firefox')
import pyvirtualdisplay
with pyvirtualdisplay.Display(visible=False):
binary = FirefoxBinary(path_to_firefox)
driver = webdriver.Firefox(firefox_binary=binary)
else:
driver = webdriver.Firefox()
#Different links to register for spring, summer or fall terms
if user_info[2] == 'spring':
driver.get('https://www.isis.ufl.edu/cgi-bin/nirvana?MDASTRAN=RSS-RGCHK2')
elif user_info[2] == 'summer':
driver.get('https://www.isis.ufl.edu/cgi-bin/nirvana?MDASTRAN=RSU-RGCHK2')
elif user_info[2] == 'fall':
driver.get('https://www.isis.ufl.edu/cgi-bin/nirvana?MDASTRAN=RSF-RGCHK2')
else:
print('\nThere\'s an error with the information you entered.')
print('Please re-enter you GatorLink username, password and the term you want to register for.\n')
del driver
#if we get here, just close firefox.
#DOWNSIDE: It will close all firefox instances running on the OS.
os.system('killall firefox')
if 'raspberrypi' in os.uname():
os.system('killall iceweasel')
if platform.system() == 'Darwin':
os.system('pkill firefox')
#Go back to asking for user info
return navigate(getUserInfo())
time.sleep(1)
try:
assert 'Web Login Service - University of Florida' in driver.title, 'ERROR: Failed to load registration login site'
except AssertionError:
print('We apologize, but an error occured while loading the site.')
print('Plase input your information again.')
del driver
return navigate(getUserInfo())
print('\nAuthenticating username and password...')
#As soon the isis login website loads, find the input tag whose name is 'j_username'
username = driver.find_element_by_name('j_username')
#Pass the username to the username field
username.send_keys(str(user_info[0]))
#As soon the isis login website loads, find the input tag whose name is 'j_password'
password = driver.find_element_by_name('j_password')
#Pass the password to the password field
password.send_keys(str(user_info[1]))
#Press enter (return key)
password.send_keys(Keys.RETURN)
time.sleep(1)
try:
try:
#if the username or password are incorrect, an error will occur
#check if error was generated
driver.find_element_by_xpath("//div[contains(@class, 'error')]")
print('\nYour username or password is incorret. Please try again.\n')
os.system('killall firefox')
if 'raspberrypi' in os.uname():
os.system('killall iceweasel')
return navigate(getUserInfo())
except NoSuchElementException:
pass
except NameError:
pass
print('Login successful.')
#Find the 'Search All Courses' label and click on it
time.sleep(1)
driver.find_element_by_link_text('Search All Courses').click()
time.sleep(1)
#Find the 'Course Number' label and clikc on it
driver.find_element_by_xpath('//input[@value=\'C\']').click()
#Find the fiel to input the course number
course = driver.find_element_by_xpath('//input[@name=\'REGCSE\']')
#Click on the field to input the course number
course.click()
print('Finding course.')
#Input course number
course.send_keys(user_info[3])
#Press enter
course.send_keys(Keys.RETURN)
#Find classes in list
class_list = (driver.find_element_by_id('wellbody').text)
time.sleep(1)
class_list = unicodedata.normalize('NFKD', class_list).encode('ascii','ignore')
POSSIBLE_COURSE = False
try:
class_index = class_list.index(str(user_info[3].upper()))
class_neighborhood = class_list[class_index:(class_index+100)]
POSSIBLE_COURSE = True
except ValueError:
pass
if(user_info[3].upper() in class_list):
if POSSIBLE_COURSE:
if not 'NO SECTIONS AVAILABLE' in class_neighborhood:
if CELL_PROVIDED:
sendText(user_info)
os.system('killall firefox')
if 'raspberrypi' in os.uname():
os.system('killall iceweasel')
quit()
else:
sendEmail(user_info)
os.system('killall firefox')
if 'raspberrypi' in os.uname():
os.system('killall iceweasel')
quit()
os.system('killall firefox')
if 'raspberrypi' in os.uname():
os.system('killall iceweasel')
print('Course not found :(\nWill continue running until course is found.')
print('Maximum running time will be 3 days.')
#wait a random interval between 3 and 7 minutes so requests are no cyclical at
#exactly every three minutes
wait_time = random.randint(3,7)*60
time.sleep(wait_time)
del driver
navigate(user_info)
def main(args):
os.system("clear")
global START_DAY
#Getting the day that the program started. Will be 3, if the program was started on 11/3
START_DAY = ((((str(datetime.now())).split('-'))[2]).split(' '))[0]
global CURRENT_DAY
display = Display(visible=0, size=(800, 600))
display.start()
navigate(getUserInfo())
if __name__ == '__main__':
main(sys.argv)
|
gaboth22/find_my_course
|
find_course.py
|
Python
|
gpl-3.0
| 10,562
|
'''
adm.py - this file is part of S3QL (http://s3ql.googlecode.com)
Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
This program can be distributed under the terms of the GNU GPLv3.
'''
from __future__ import division, print_function, absolute_import
from . import CURRENT_FS_REV, REV_VER_MAP
from .backends.common import BetterBackend, get_backend, DanglingStorageURLError
from .common import (QuietError, setup_logging, get_backend_cachedir, get_seq_no,
stream_write_bz2, stream_read_bz2, CTRL_INODE)
from .database import Connection
from .metadata import restore_metadata, cycle_metadata, dump_metadata
from .parse_args import ArgumentParser
from datetime import datetime as Datetime
from getpass import getpass
import cPickle as pickle
import logging
import os
import shutil
import stat
import sys
import tempfile
import textwrap
import time
log = logging.getLogger("adm")
def parse_args(args):
'''Parse command line'''
parser = ArgumentParser(
description="Manage S3QL File Systems.",
epilog=textwrap.dedent('''\
Hint: run `%(prog)s <action> --help` to get help on the additional
arguments that the different actions take.'''))
pparser = ArgumentParser(add_help=False, epilog=textwrap.dedent('''\
Hint: run `%(prog)s --help` to get help on other available actions and
optional arguments that can be used with all actions.'''))
pparser.add_storage_url()
subparsers = parser.add_subparsers(metavar='<action>', dest='action',
help='may be either of')
subparsers.add_parser("passphrase", help="change file system passphrase",
parents=[pparser])
subparsers.add_parser("upgrade", help="upgrade file system to newest revision",
parents=[pparser])
subparsers.add_parser("clear", help="delete file system and all data",
parents=[pparser])
subparsers.add_parser("download-metadata",
help="Interactively download metadata backups. "
"Use only if you know what you are doing.",
parents=[pparser])
parser.add_debug_modules()
parser.add_quiet()
parser.add_log()
parser.add_authfile()
parser.add_ssl()
parser.add_cachedir()
parser.add_version()
options = parser.parse_args(args)
return options
def main(args=None):
'''Change or show S3QL file system parameters'''
if args is None:
args = sys.argv[1:]
options = parse_args(args)
setup_logging(options)
# Check if fs is mounted on this computer
# This is not foolproof but should prevent common mistakes
match = options.storage_url + ' /'
with open('/proc/mounts', 'r') as fh:
for line in fh:
if line.startswith(match):
raise QuietError('Can not work on mounted file system.')
if options.action == 'clear':
try:
backend = get_backend(options, plain=True)
except DanglingStorageURLError as exc:
raise QuietError(str(exc))
return clear(backend,
get_backend_cachedir(options.storage_url, options.cachedir))
try:
backend = get_backend(options)
except DanglingStorageURLError as exc:
raise QuietError(str(exc))
if options.action == 'upgrade':
return upgrade(backend, get_backend_cachedir(options.storage_url,
options.cachedir))
if options.action == 'passphrase':
return change_passphrase(backend)
if options.action == 'download-metadata':
return download_metadata(backend, options.storage_url)
def download_metadata(backend, storage_url):
'''Download old metadata backups'''
backups = sorted(backend.list('s3ql_metadata_bak_'))
if not backups:
raise QuietError('No metadata backups found.')
log.info('The following backups are available:')
log.info('%3s %-23s %-15s', 'No', 'Name', 'Date')
for (i, name) in enumerate(backups):
try:
params = backend.lookup(name)
except:
log.error('Error retrieving information about %s, skipping', name)
continue
if 'last-modified' in params:
date = Datetime.fromtimestamp(params['last-modified']).strftime('%Y-%m-%d %H:%M:%S')
else:
# (metadata might from an older fs revision)
date = '(unknown)'
log.info('%3d %-23s %-15s', i, name, date)
name = None
while name is None:
buf = raw_input('Enter no to download: ')
try:
name = backups[int(buf.strip())]
except:
log.warn('Invalid input')
cachepath = get_backend_cachedir(storage_url, '.')
for i in ('.db', '.params'):
if os.path.exists(cachepath + i):
raise QuietError('%s already exists, aborting.' % cachepath + i)
param = backend.lookup(name)
log.info('Downloading and decompressing %s...', name)
def do_read(fh):
tmpfh = tempfile.TemporaryFile()
stream_read_bz2(fh, tmpfh)
return tmpfh
tmpfh = backend.perform_read(do_read, name)
log.info("Reading metadata...")
tmpfh.seek(0)
restore_metadata(tmpfh, cachepath + '.db')
# Raise sequence number so that fsck.s3ql actually uses the
# downloaded backup
seq_nos = [ int(x[len('s3ql_seq_no_'):]) for x in backend.list('s3ql_seq_no_') ]
param['seq_no'] = max(seq_nos) + 1
pickle.dump(param, open(cachepath + '.params', 'wb'), 2)
def change_passphrase(backend):
'''Change file system passphrase'''
if not isinstance(backend, BetterBackend) and backend.passphrase:
raise QuietError('File system is not encrypted.')
data_pw = backend.passphrase
if sys.stdin.isatty():
wrap_pw = getpass("Enter new encryption password: ")
if not wrap_pw == getpass("Confirm new encryption password: "):
raise QuietError("Passwords don't match")
else:
wrap_pw = sys.stdin.readline().rstrip()
backend.passphrase = wrap_pw
backend['s3ql_passphrase'] = data_pw
backend.passphrase = data_pw
def clear(backend, cachepath):
print('I am about to delete the S3QL file system in %s.' % backend,
'Please enter "yes" to continue.', '> ', sep='\n', end='')
if sys.stdin.readline().strip().lower() != 'yes':
raise QuietError()
log.info('Deleting...')
for suffix in ('.db', '.params'):
name = cachepath + suffix
if os.path.exists(name):
os.unlink(name)
name = cachepath + '-cache'
if os.path.exists(name):
shutil.rmtree(name)
backend.clear()
log.info('File system deleted.')
log.info('Note: it may take a while for the removals to propagate through the backend.')
def get_old_rev_msg(rev, prog):
return textwrap.dedent('''\
The last S3QL version that supported this file system revision
was %(version)s. You can run this version's %(prog)s by executing:
$ wget http://s3ql.googlecode.com/files/s3ql-%(version)s.tar.bz2
$ tar xjf s3ql-%(version)s.tar.bz2
$ (cd s3ql-%(version)s; ./setup.py build_ext)
$ s3ql-%(version)s/bin/%(prog)s <options>
''' % { 'version': REV_VER_MAP[rev],
'prog': prog })
def upgrade(backend, cachepath):
'''Upgrade file system to newest revision'''
log.info('Getting file system parameters..')
seq_nos = list(backend.list('s3ql_seq_no_'))
if (seq_nos[0].endswith('.meta')
or seq_nos[0].endswith('.dat')):
print(textwrap.dedent('''
File system revision too old to upgrade!
You need to use an older S3QL version to upgrade to a more recent
revision before you can use this version to upgrade to the newest
revision.
'''))
print(get_old_rev_msg(11 + 1, 's3qladm'))
raise QuietError()
seq_no = get_seq_no(backend)
# Check for cached metadata
db = None
if os.path.exists(cachepath + '.params'):
param = pickle.load(open(cachepath + '.params', 'rb'))
if param['seq_no'] < seq_no:
log.info('Ignoring locally cached metadata (outdated).')
param = backend.lookup('s3ql_metadata')
elif param['seq_no'] > seq_no:
print('File system not unmounted cleanly, need to run fsck before upgrade.')
print(get_old_rev_msg(param['revision'], 'fsck.s3ql'))
raise QuietError()
else:
log.info('Using cached metadata.')
db = Connection(cachepath + '.db')
else:
param = backend.lookup('s3ql_metadata')
# Check for unclean shutdown
if param['seq_no'] < seq_no:
print(textwrap.fill(textwrap.dedent('''\
Backend reports that fs is still mounted. If this is not the case, the file system may
have not been unmounted cleanly or the data from the most-recent mount may have not yet
propagated through the backend. In the later case, waiting for a while should fix the
problem, in the former case you should try to run fsck on the computer where the file
system has been mounted most recently.
''')))
print(get_old_rev_msg(param['revision'], 'fsck.s3ql'))
raise QuietError()
# Check that the fs itself is clean
if param['needs_fsck']:
print('File system is damaged, need to run fsck before upgrade.')
print(get_old_rev_msg(param['revision'], 'fsck.s3ql'))
raise QuietError()
# Check revision
if param['revision'] < CURRENT_FS_REV - 1:
print(textwrap.dedent('''
File system revision too old to upgrade!
You need to use an older S3QL version to upgrade to a more recent
revision before you can use this version to upgrade to the newest
revision.
'''))
print(get_old_rev_msg(param['revision'] + 1, 's3qladm'))
raise QuietError()
elif param['revision'] >= CURRENT_FS_REV:
print('File system already at most-recent revision')
return
print(textwrap.dedent('''
I am about to update the file system to the newest revision.
You will not be able to access the file system with any older version
of S3QL after this operation.
You should make very sure that this command is not interrupted and
that no one else tries to mount, fsck or upgrade the file system at
the same time.
'''))
print('Please enter "yes" to continue.', '> ', sep='\n', end='')
if sys.stdin.readline().strip().lower() != 'yes':
raise QuietError()
# Download metadata
if not db:
log.info("Downloading & uncompressing metadata...")
def do_read(fh):
tmpfh = tempfile.TemporaryFile()
stream_read_bz2(fh, tmpfh)
return tmpfh
tmpfh = backend.perform_read(do_read, "s3ql_metadata")
log.info("Reading metadata...")
tmpfh.seek(0)
db = restore_metadata(tmpfh, cachepath + '.db')
log.info('Upgrading from revision %d to %d...', param['revision'], CURRENT_FS_REV)
db.execute('UPDATE inodes SET mode=? WHERE id=?',
(stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, CTRL_INODE))
param['revision'] = CURRENT_FS_REV
param['last-modified'] = time.time()
log.info('Dumping metadata...')
fh = tempfile.TemporaryFile()
dump_metadata(db, fh)
def do_write(obj_fh):
fh.seek(0)
stream_write_bz2(fh, obj_fh)
return obj_fh
log.info("Compressing and uploading metadata...")
backend.store('s3ql_seq_no_%d' % param['seq_no'], 'Empty')
obj_fh = backend.perform_write(do_write, "s3ql_metadata_new", metadata=param,
is_compressed=True)
log.info('Wrote %.2f MiB of compressed metadata.', obj_fh.get_obj_size() / 1024 ** 2)
log.info('Cycling metadata backups...')
cycle_metadata(backend)
with open(cachepath + '.params', 'wb') as fh:
pickle.dump(param, fh, 2)
log.info('Cleaning up local metadata...')
db.execute('ANALYZE')
db.execute('VACUUM')
if __name__ == '__main__':
main(sys.argv[1:])
|
thefirstwind/s3qloss
|
src/s3ql/adm.py
|
Python
|
gpl-3.0
| 12,482
|
import numpy as np
import cv2
import time
windows = [(64, 128), (128, 128), (128, 64)]
def pyramid(image, scale=1.5, minSize=(20, 20)):
# yield the original image
# yield image
# keep looping over the pyramid
while True:
# compute the new dimensions of the image and resize it
w = int(image.shape[1] / scale)
h = int(image.shape[0] / scale)
image = cv2.resize(image, (w, h)) # imutils.resize(image, width=w)
# if the resized image does not meet the supplied minimum
# size, then stop constructing the pyramid
if image.shape[0] < minSize[1] or image.shape[1] < minSize[0]:
break
# yield the next image in the pyramid
yield image
def sliding_window(image, stepSize, windowSize):
# slide a window across the image
for y in xrange(0, image.shape[0], stepSize):
for x in xrange(0, image.shape[1], stepSize):
# yield the current window
yield (x, y, image[y:y + windowSize[1], x:x + windowSize[0]])
def capture():
(winW, winH) = (256, 256)
cap = cv2.VideoCapture(0)
while(1):
ret, frame = cap.read()
# loop over the image pyramid
for resized in pyramid(frame):
# loop over the sliding window for each layer of the pyramid
for (x, y, window) in sliding_window(resized, stepSize=32, windowSize=(winW, winH)):
# if the window does not meet our desired window size, ignore
# it
if window.shape[0] != winH or window.shape[1] != winW:
continue
# THIS IS WHERE YOU WOULD PROCESS YOUR WINDOW, SUCH AS APPLYING A
# MACHINE LEARNING CLASSIFIER TO CLASSIFY THE CONTENTS OF THE
# WINDOW
# since we do not have a classifier, we'll just draw the window
clone = resized.copy()
cv2.rectangle(clone, (x, y), (x + winW,
y + winH), (0, 255, 0), 2)
cv2.imshow("Window", clone)
cv2.waitKey(1)
# time.sleep(0.025)
cv2.imshow('SLIDING WINDOW', frame)
if cv2.waitKey(30) & 0xff == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
capture()
|
evamy/learning_to_see
|
VA4/utils.py
|
Python
|
gpl-3.0
| 2,358
|
#!/usr/bin/env python3
## TODO(alexr): We need to get these run through the whole preprocessing
## pipeline so we can evaluate the semeval test set.
## We need to be able to call freeling programmatically.
## And probably also cdec's tokenizer.
## XXX: magic string pointing into my files on my one particular computer.
import fileinput
import functools
from subprocess import Popen, PIPE, STDOUT
import os
from annotated_corpus import Token
home = os.path.expanduser("~")
FREELINGCONFIGDIR = home + "/terere/bibletools/freeling-config"
def freeling_output_to_sentence(freeling_output):
"""Return a list of tokens. We should only be given a single sentence at
once."""
sentence = []
lines = freeling_output.split("\n")
lemmas = []
lineno = 0
for i, line in enumerate(lines):
lineno += 1
line = line.strip()
if sentence and not line:
assert ("" == line.strip() for line in lines[i:])
return sentence
# sirvan servir VMSP3P0 0.885892
try:
## There can actually be more than the first four fields.
## But we just take the first four.
surface, lemma, tag, confidence = line.split()[:4]
token = Token(lemma, surface)
token.annotations.add("tag=" + tag)
sentence.append(token)
except:
print("surprising line:", line, lineno)
break
return sentence
## XXX: this assumes that Freeling is installed on the system and that we have a
## path to a directory of config files.
def run_freeling(sentence, sl, tokenize):
assert isinstance(sentence, str)
command = ["analyze", "-f", FREELINGCONFIGDIR + "/" + sl + ".cfg"]
if not tokenize:
command.extend(["--inplv", "splitted", "--input", "freeling"])
tokens = sentence.split(' ')
sentence = '\n'.join(tokens)
with Popen(command, stdout=PIPE, stdin=PIPE, stderr=STDOUT) as p:
stdout_b = p.communicate(input=sentence.encode("utf-8"))
stdout = stdout_b[0].decode("utf-8")
return freeling_output_to_sentence(stdout)
@functools.lru_cache(maxsize=100000)
def preprocess(sentence, sl, tokenize=True):
"""Run the preprocessing pipeline on the sentence, which should be a
string."""
assert isinstance(sentence, str)
return run_freeling(sentence, sl, tokenize=tokenize)
def main():
for line in fileinput.input():
line = line.strip()
preprocessed = preprocess(line, "es")
print(preprocessed)
if __name__ == "__main__": main()
|
alexrudnick/chipa
|
src/preprocessing.py
|
Python
|
gpl-3.0
| 2,570
|
# -*- coding: UTF-8 -*-
#author: James Zo
#email: james_email@sina.cn
import MySQLdb
from py.db import *
class Mysql(DB):
def __init__(self, host, user, passwd, port=3306):
self._conn = MySQLdb.connect(
host=host,
port = port,
user=user,
passwd=passwd,
charset="utf8"
)
|
jameszo/kit
|
py/mysql.py
|
Python
|
gpl-3.0
| 384
|
# ! /usr/bin/env python
# _*_ coding:utf-8 _*_
"""
@author = lucas.wang
@create_time = 2018-03-15
"""
# 引入selenium中的webdriver
from bs4 import BeautifulSoup
from selenium import webdriver
import time
from multiprocessing.dummy import Pool, Lock, freeze_support
import os
import sys
import json
def input_page_url_with_change_dir():
'''
转移到要存储的文件夹位置并获取专辑页面地址
'''
print('请输入存储文件夹(回车确认):')
while True:
dir_ = input()
if os.path.exists(dir_):
os.chdir(dir_)
break
else:
try:
os.mkdir(dir_)
os.chdir(dir_)
break
except Exception as e:
print('请输入有效的文件夹地址:')
print('请输入想下载FM页面的网址(回车确认) -\n'
'如 http://www.ximalaya.com/20251158/album/2758791:')
page_url = input()
return page_url
page_url = input_page_url_with_change_dir()
def get_json_urls_from_page_url(page_url):
'''
获取该专辑页面上所有音频的json链接
'''
# webdriver中的PhantomJS方法可以打开一个我们下载的静默浏览器。
# 输入executable_path为当前文件夹下的phantomjs.exe以启动浏览器
driver = webdriver.PhantomJS(executable_path="phantomjs.exe")
# 使用浏览器请求页面
driver.get(page_url)
# 加载3秒,等待所有数据加载完毕
# time.sleep(7)
driver.implicitly_wait(30)
# 通过id来定位元素,
# .text获取元素的文本数据
# soup = driver.find_element_by_class_name('personal_body')
mp3_ids = driver.find_element_by_class_name('personal_body').get_attribute('sound_ids')
# soup = BeautifulSoup(driver.page_source, "lxml")
#
# mp3_ids = soup.select_one('.personal_body').attrs['sound_ids']
# print(mp3_ids)
# 关闭浏览器
driver.close()
json_url = 'http://www.ximalaya.com/tracks/{id}.json'
urls = [json_url.format(id=i) for i in mp3_ids.split(',')]
return urls
mp3_json_urls = get_json_urls_from_page_url(page_url)
n_tasks = len(mp3_json_urls)
lock = Lock()
shared_dict = {}
def get_mp3_from_json_url(json_url):
'''
访问json链接获取音频名称与下载地址并开始下载
'''
driver = webdriver.PhantomJS(executable_path="phantomjs.exe")
# 使用浏览器请求页面
driver.get(json_url)
# 等待所有数据加载完毕
driver.implicitly_wait(30)
# 通过id来定位元素,
# .text获取元素的文本数据
mp3_info = json.loads(driver.find_element_by_tag_name("pre").text)
# print(mp3_ids)
# 关闭浏览器
driver.close()
title = mp3_info['album_title'] + '+ ' + mp3_info['title'] + '.m4a'
path = mp3_info['play_path']
title = title.replace('|', '-') # 避免特殊字符文件名异常
if os.path.exists(title):
return 'Already exists!'
# http://stackoverflow.com/questions/13137817/how-to-download-image-using-requests
while True:
try:
with open(title, 'wb') as f:
response = requests.get(path, headers=headers, stream=True)
if not response.ok:
# shared_dict.pop(title)
print('response error with', title)
continue
total_length = int(response.headers.get('content-length'))
chunk_size = 4096
dl = 0
shared_dict[title] = 0
for block in response.iter_content(chunk_size):
dl += len(block)
f.write(block)
done = int(50 * dl / total_length)
shared_dict[title] = done
global n_tasks
with lock:
n_tasks -= 1
shared_dict.pop(title)
return 'Done!'
except Exception as e:
print('other error with', title)
os.remove(title)
# http://stackoverflow.com/questions/28057445/python-threading-multiline-progress-report
# http://stackoverflow.com/questions/15644964/python-progress-bar-and-downloads
def report_status():
'''
根据共享字典汇报下载进度
'''
import time
n = len(mp3_json_urls)
print(u'准备下载...')
while len(shared_dict) == 0:
time.sleep(0.2)
while len(shared_dict) != 0:
line = '' # "\r"
for title, done in shared_dict.items():
line += "%s\n - [%s%s]\n" % (
title, '=' * done, ' ' * (50 - done)
)
line += '\n**** 剩余/总任务 = %s/%s ****' % (n_tasks, n)
os.system('cls')
sys.stdout.write(line)
sys.stdout.flush()
time.sleep(1)
# if __name__ == '__main__':
# 多线程下载并报告状态
freeze_support()
with Pool(6) as pool:
# http://stackoverflow.com/questions/35908987/python-multiprocessing-map-vs-map-async
r = pool.map_async(get_mp3_from_json_url, mp3_json_urls)
report_status()
r.wait()
os.system('cls')
print('下载完成!')
# if __name__ == '__main__':
# pass
|
Lucas-Wong/ToolsProject
|
Crawler/xmly-2.py
|
Python
|
gpl-3.0
| 5,214
|
from duckomatic.utils.resource import Resource
class Gps(Resource):
def __init__(self, *vargs, **kwargs):
super(Gps, self).__init__(*vargs, **kwargs)
self.count = 0
self.latitude = 49.289324
self.longitude = -123.129219
def get_message_to_publish(self):
self.count += 1
self.latitude += 0.000001
self.longitude += 0.000001
return ('feed', {
'latitude': self.latitude,
'longitude': self.longitude,
'count': self.count
})
def start(self):
self.start_polling_for_messages_to_publish(0.01)
|
morgangalpin/duckomatic
|
duckomatic/platform/resources/gps.py
|
Python
|
gpl-3.0
| 619
|
import pilas
import json
from pilas.escena import Base
class Individual(Base):
def iniciar(self):
archivo = open("data/archivos/usuarios.json","r")
self.datos_usuarios = json.load(archivo)
archivo.close()
archivo1 = open("data/archivos/estadisticas.json","r")
self.datos_estadisticas = json.load(archivo1)
archivo1.close()
self.sonido_boton = pilas.sonidos.cargar("data/audio/boton.ogg")
self.fondo()
self.calcular()
self.enunciado1 = pilas.actores.Texto(" ")
def fondo(self):
pilas.fondos.Fondo("data/img/fondos/trabalenguas.jpg")
opc = [("Volver",self.volver)]
self.menu = pilas.actores.Menu(opc ,y=-350 , fuente="data/fonts/American Captain.ttf")
self.menu.escala = 1.4
def volver(self):
self.sonido_boton.reproducir()
pilas.recuperar_escena()
def transformar(self,time):
minutes=(int(time / 60))
seconds=time-(minutes * 60)
secondsStr=str(seconds)
minutesStr=str(minutes)
if (len(str(seconds)) < 2):
secondsStr = '0'+secondsStr
if (len(str(minutes)) < 2):
minutesStr = '0'+minutesStr
texto = str(minutesStr) + ":" + str(secondsStr)
return texto
def calcular(self):
lista_coordenadas = [250,230,210,190,170,150,130,110,90,70,50,30,10,-10,-30,-50,-70,-90,-110,-130,-150,-170,-190,-210,-230,-250,-270,-290,-310,-330]
self.usuarios = self.datos_estadisticas.keys()
self.lista = []
for x in range(0,len(self.usuarios)):
if(x < 31):
yy= lista_coordenadas[x]
self.enunciado1 = pilas.actores.Texto(str(self.usuarios[x]) ,x = -400 ,y = yy)
self.enunciado1.escala = 0.5
if(self.datos_estadisticas[self.usuarios[x]]['tiempo_ahorcado'][2]>0):
self.enunciado2 = pilas.actores.Texto("Fallo "+str(self.datos_estadisticas[self.usuarios[x]]['tiempo_ahorcado'][2])+" veces", x = -250 , y = yy)
elif(self.datos_estadisticas[self.usuarios[x]]['tiempo_ahorcado'][0] != 0):
self.enunciado2 = pilas.actores.Texto("Acerto "+str(self.datos_estadisticas[self.usuarios[x]]['tiempo_ahorcado'][1])+" veces" , x = -250 , y = yy)
else:
self.enunciado2 = pilas.actores.Texto("No Jugo", x = -250 , y = yy)
self.enunciado2.escala = 0.5
if(self.datos_estadisticas[self.usuarios[x]]['tiempo_adivinanza'][2]>0):
self.enunciado3 = pilas.actores.Texto("Fallo "+str(self.datos_estadisticas[self.usuarios[x]]['tiempo_adivinanza'][2])+" veces", x = -150 , y = yy)
elif(self.datos_estadisticas[self.usuarios[x]]['tiempo_adivinanza'][0] != 0):
self.enunciado3 = pilas.actores.Texto("Acerto "+str(self.datos_estadisticas[self.usuarios[x]]['tiempo_adivinanza'][1])+" veces", x = -150 , y = yy)
else:
self.enunciado3 = pilas.actores.Texto("No Jugo", x = -150 , y = yy)
self.enunciado3.escala = 0.5
if(self.datos_estadisticas[self.usuarios[x]]['tiempo_trabalenguas'][2]>0):
self.enunciado4 = pilas.actores.Texto("Fallo "+str(self.datos_estadisticas[self.usuarios[x]]['tiempo_trabalenguas'][2])+" veces", x = -50 , y = yy)
elif(self.datos_estadisticas[self.usuarios[x]]['tiempo_trabalenguas'][0] != 0):
self.enunciado4 = pilas.actores.Texto("Acerto "+str(self.datos_estadisticas[self.usuarios[x]]['tiempo_trabalenguas'][1])+" veces", x = -50 , y = yy)
else:
self.enunciado4 = pilas.actores.Texto("No Jugo", x = -50 , y = yy)
self.enunciado4.escala = 0.5
try:
t_ahorcado = int(self.datos_estadisticas[self.usuarios[x]]['tiempo_ahorcado'][0]) / int(self.datos_estadisticas[self.usuarios[x]]['tiempo_ahorcado'][1])
except(ZeroDivisionError):
t_ahorcado = 0
try:
t_adivinanza = int(self.datos_estadisticas[self.usuarios[x]]['tiempo_adivinanza'][0]) / int(self.datos_estadisticas[self.usuarios[x]]['tiempo_adivinanza'][1])
except(ZeroDivisionError):
t_adivinanza = 0
try:
t_trabalenguas = int(self.datos_estadisticas[self.usuarios[x]]['tiempo_trabalenguas'][0]) / int(self.datos_estadisticas[self.usuarios[x]]['tiempo_trabalenguas'][1])
except(ZeroDivisionError):
t_trabalenguas = 0
tiempo = int(self.datos_usuarios[self.usuarios[x]]['tiempo']) - int(self.datos_estadisticas[self.usuarios[x]]['tiempo'])
promedio_adivinanzas = self.transformar(t_adivinanza)
promedio_trabalenguas = self.transformar(t_trabalenguas)
promedio_ahorcado = self.transformar(t_ahorcado)
tiempo = self.transformar(tiempo)
if(tiempo == "20:00"):
tiempo = "00:00"
rayos = self.datos_estadisticas[self.usuarios[x]]['rayos']
self.enunciado5 = pilas.actores.Texto(promedio_ahorcado,x = 50, y = yy)
self.enunciado5.escala = 0.5
self.enunciado6 = pilas.actores.Texto(promedio_adivinanzas,x= 150 , y = yy)
self.enunciado6.escala = 0.5
self.enunciado7 = pilas.actores.Texto(promedio_trabalenguas,x = 250 , y= yy)
self.enunciado7.escala = 0.5
self.enunciado8 = pilas.actores.Texto(tiempo, x = 400 , y= yy)
self.enunciado8.escala = 0.5
self.enunciado9 = pilas.actores.Texto(str(rayos) , x = 500 , y = yy)
self.enunciado9.escala = 0.5
self.titulo1 = pilas.actores.Texto("Nombres",x = -400 , y = 300)
self.titulo2 = pilas.actores.Texto("Intentos",x = -150 , y = 320)
self.titulo6 = pilas.actores.Texto("Tiempos",x = 150 , y = 320)
self.titulo10 = pilas.actores.Texto("Ultimo Tiempo",x=400,y=300)
self.titulo10.escala = 0.7
self.titulo3 = pilas.actores.Texto("Ahorcado",x = -250 , y = 290)
self.titulo3.escala = 0.5
self.titulo4 = pilas.actores.Texto("Adivinanza",x = -150 , y = 290)
self.titulo4.escala = 0.5
self.titulo5 = pilas.actores.Texto("Trabalenguas",x = -50 , y = 290)
self.titulo5.escala = 0.5
self.titulo7 = pilas.actores.Texto("Ahorcado",x = 50 , y = 290)
self.titulo7.escala = 0.5
self.titulo8 = pilas.actores.Texto("Adivinanza",x = 150 , y = 290)
self.titulo8.escala = 0.5
self.titulo9 = pilas.actores.Texto("Trabalenguas",x = 250 , y = 290)
self.titulo9.escala = 0.5
self.titulo11 = pilas.actores.Texto("Rayos",x= 500 , y=300)
self.titulo11.escala = 0.7
|
gercordero/va_de_vuelta
|
src/individual.py
|
Python
|
gpl-3.0
| 6,051
|
from __future__ import print_function
import httplib2
import os
import re
from apiclient import discovery
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
from DateTime import DateTime
import base64
import email
import csv
import pandas as pd
global mail_SNIPPET
global mail_MAILID
global mail_SUBJECT
global mail_SPAM
global mail_dateTime
global emailClassification
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
SCOPES = 'https://www.googleapis.com/auth/gmail.readonly https://www.googleapis.com/auth/gmail.modify'
CLIENT_SECRET_FILE = '/Users/XXXXXX/Documents/Google Gmail API /client_secret.json'
APPLICATION_NAME = 'Email_Classifier'
mail_SPAM = []
mail_SNIPPET = []
mail_MAILID = []
mail_SUBJECT = []
mail_dateTime = []
class GeneratingDataSet():
def createCSV(self):
path = '/Users/XXXXXXXX/Documents/Google Gmail API /'
fileName = 'EmailClass.csv'
with open(path+fileName,'w') as f:
w = csv.writer(f)
list1 = ["Date"] + ["SPAM"] + ["SNIPPET"] + ["MAIL_ID"] +["SUBJECT"]
w.writerow(list1)
f.close()
def DateTimeFormat(self,time):
dayhours = ['0','1','2','3','4','5','6','7','8','9']
months = ['1','2','3','4','5','6','7','8','9']
days = ['1','2','3','4','5','6','7','8','9']
minus = '-'
addition = '+'
global x
if minus in time:
splittz = time.split(minus)
y = DateTime(splittz[0])
x = [y.parts()]
month = str(x[0][1])
day = str(x[0][2])
hours = str(x[0][3])
if month in months:
month = '0'+month
#return month
if day in days:
day = '0'+day
#return day
if hours in dayhours:
hours = '0'+hours
#return hours
return int(str(x[0][0])+month+day+hours)
elif addition in time:
splitTZ = time.split('+')
y = DateTime(splitTZ[0])
x = [y.parts()]
month = str(x[0][1])
day = str(x[0][2])
hours = str(x[0][3])
if str(x[0][1]) in months:
month = '0'+month
#return month
if str(x[0][2]) in days:
day = '0'+day
#return day
if str(x[0][3]) in dayhours:
hours = '0'+hours
#return hours
return int(str(x[0][0])+month+day+hours)
def nextPage(self,results,service):
if 'nextPageToken' in results:
messages = []
page_token = results['nextPageToken']
response = service.users().messages().list(userId='me',pageToken=page_token,includeSpamTrash=True).execute()
return response
def getDetails(self,results,service):
emailClassifier = pd.read_csv('/Users/XXXXXXXXX/Documents/Google Gmail API /EmailClass.csv',header=None,encoding='ISO-8859-1')
for i in range(len(results['messages'])):
#for i in range(2):
message = service.users().messages().get(userId='me', id=results['messages'][i]['id'],format='full').execute()
#print(message)
try:
if "SPAM" in message['labelIds']:
mail_SPAM.append(1)
else:
mail_SPAM.append(0)
if message['snippet']:
mail_SNIPPET.append(message['snippet'].lower())
else:
mail_SNIPPET.append("")
for j in range(len(message['payload']['headers'])):
if message['payload']['headers'][j]['name'] == 'From':
m = re.search('<(.+?)>', message['payload']['headers'][j]['value'])
if m:
found = m.group(1)
mail_MAILID.append(found.lower())
else:
mail_MAILID.append("")
elif message['payload']['headers'][j]['name'] == "Subject":
if message['payload']['headers'][j]['value']:
mail_SUBJECT.append(message['payload']['headers'][j]['value'].lower())
else:
mail_SUBJECT.append("")
elif message['payload']['headers'][j]['name'] == 'Date':
if message['payload']['headers'][j]['value']:
mail_dateTime.append(self.DateTimeFormat(message['payload']['headers'][j]['value']))
#print(mail_dateTime)
else:
mail_dateTime.append("")
except KeyError:
print(message)
flags = None
def get_credentials(self):
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'gmail-python-quickstart.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def fetchEmails():
GenDataSet = GeneratingDataSet()
credentials = GenDataSet.get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build('gmail', 'v1', http=http)
results = service.users().messages().list(userId='me',includeSpamTrash=True).execute()
count = 0
GenDataSet.getDetails(results,service)
for x in range(1):
newResult = GenDataSet.nextPage(results,service)
print("Next Page Result ")
results = newResult
GenDataSet.getDetails(newResult,service)
return [mail_dateTime,mail_SPAM,mail_SNIPPET,mail_MAILID,mail_SUBJECT]
def main():
GenDataSet = GeneratingDataSet()
GenDataSet.createCSV()
fetchEmails()
with open('/Users/XXXXXXXXX/Documents/Google Gmail API /EmailClass.csv','a') as f:
w = csv.writer(f)
w.writerows(zip(mail_dateTime,mail_SPAM,mail_SNIPPET,mail_MAILID,mail_SUBJECT))
f.close()
if __name__ == '__main__':
main()
|
Mayurji/Machine-Learning
|
Email Classification Using ML/Email_Classifier.py
|
Python
|
gpl-3.0
| 6,955
|
import logging
indexerLogFile = 'indexer.log'
indexerlogFormat = '%(asctime)s - %(levelname)s - %(message)s'
indexerLogLevel = logging.WARNING
indexWorkerNumber = 4
indexFolders = ['D:\\Repository\\R_2_10_0\\doc\\examples\\service\\MS_LTE']
solrURL = 'http://135.251.142.115:8983/solr/myworkspace'
|
zhongzhu/searchmyworkspace
|
src/myconfig.py
|
Python
|
gpl-3.0
| 300
|
#!/usr/bin/env python3
import numpy as np
def diamond(n):
temp = np.eye(n, dtype=int)
fliped = np.fliplr(temp)
concat = np.concatenate((np.fliplr(temp), temp), axis=1)
concat = np.delete(concat, n, 1)
reverse_flipped= np.flip(concat)
return np.concatenate((concat, np.flip(concat)[1:,]))
def main():
pass
if __name__ == "__main__":
main()
|
mohanprasath/Course-Work
|
data_analysis/uh_data_analysis_with_python/hy-data-analysis-with-python-spring-2020/part02-e13_diamond/src/diamond.py
|
Python
|
gpl-3.0
| 379
|
"""
Description: A module that represents "spots" on the skewer.
Author: Sean Strout
Author: James Heliotis
Author: Jon O'Brien
"""
import food
class NoneNode():
"""A class to represent no node"""
__slots__ = ()
"""A global constant for no node"""
NONE_NODE = NoneNode()
class KebabSpot(object):
"""
This class is used to represent an individual
spot on the shish kebab skewer. Each spot contains
1. item - a food item.
2. succ - a reference to the next KebabSpot (or NoneNode).
In computer science terms, a KebabSpot acts like a node in a stack.
"""
__slots__ = (
'item', # The food item (of type Food)
'succ' # The next food item (of type Food, or None)
)
def mkKebabSpot( item, succ ):
"""
Construct a KebabSpot instance.
Arguments:
item - the item (type Food) to store at this spot
succ - the next KebabSpot on the skewer
Returns: New KebabSpot
"""
kebabSpot = KebabSpot()
kebabSpot.item = item
kebabSpot.succ = succ
return kebabSpot
def calories(kebabSpot):
"""
counts the number of calories on the skewer with a while loop. It takes
the parameter of kebabSpot that is used to find the position of the
object on the skewer and counts the calories it receives to accumulate.
"""
calories = 0
currSpot = kebabSpot
while currSpot != NONE_NODE:
calories += int(food.CALORIES[currSpot.item.name])
currSpot = currSpot.succ
return calories
def size(kebabSpot):
"""
Count the number of KebabSpots on the skewer starting
at this one and going to the end.
Idea: If one asks the first KebabSpot on the skewer
for a size, the total number of KebabSpot's (and therefore
food items) is computed.
Arguments:
kebabSpot - the KebabSpot instance
Returns: The number of KebabSpots starting at this one.
"""
#need to go through all the spots to continue through this stuff.
if kebabSpot==NONE_NODE:
return 0
else:
return 1 + size(kebabSpot.succ)
def isVeggie(kebabSpot):
"""
Return whether there are only vegetable foods from this
KebabSpot to the end of the skewer.
Idea: If one asks the first KebabSpot on the skewer
isVeggie, one can find out if the entire shish kebab
is vegetarian.
Arguments:
kebabSpot - the KebabSpot instance
Returns: True if there are no vegetables starting at
this KebabSpot, False otherwise.
"""
if kebabSpot==NONE_NODE:
return True
elif kebabSpot.item.veggie is True:
return isVeggie(kebabSpot.succ)
else:
return False
def has(kebabSpot, name):
"""
Return whether there are any foods of the given kind
from this KebabSpot to the end of the skewer.
Arguments:
kebabSpot - the KebabSpot instance
name - the name (string) being searched for.
Returns: True if any of the spots hold a food
item that equals the name, False otherwise.
"""
while(kebabSpot != NONE_NODE):
if str(kebabSpot.item.name)==str(name):
return True
else:
return has(kebabSpot.succ,name)
return False
def stringEm(kebabSpot):
"""
Return a string that contains the list of names of
items in the skewer from this spot down, with a comma
after each entry.
Arguments:
kebabSpot - the KebabSpot instance
Returns: A string containing the names of each
of the food items from this spot down.
"""
if isinstance(kebabSpot,NoneNode):
return''
elif isinstance(kebabSpot.succ,NoneNode):
return str(kebabSpot.item.name)
else:
return str(kebabSpot.item.name) + ', ' + stringEm(kebabSpot.succ)
def emptyKebab(kebabSpot):
"""Returns whether the kebabSpot is empty or not"""
if kebabSpot==NONE_NODE:
return True
else:
return False
|
jonobrien/School_Backups
|
cs1-python/Labs/week 9/kebab_spot.py
|
Python
|
gpl-3.0
| 3,998
|
import requests
user_auth = ('natas30','wie9iexae0Daihohv8vuu3cei9wahf0e')
target = 'http://natas30.natas.labs.overthewire.org/index.pl'
payload = 'username=natas31&password=2&password=1\' OR 1=1&password=2'
payload2 = 'username=natas31&password=1'
payload3 = "username='natas31' OR '1'='1'&username=3&password='1'"
header = {'Content-Type': 'application/x-www-form-urlencoded'}
response = requests.post(target, auth = user_auth, headers = header, data = payload3.replace(" ", "%20"))
print response.text
|
lcy2/otwPython
|
natas30/natas30.py
|
Python
|
gpl-3.0
| 516
|
# URL
# https://www.hackerrank.com/challenges/sock-merchant
import unittest
def main():
data = readFromStdin()
print(sock_pairs(data))
def readFromStdin():
_ = int(input().strip())
data = [int(c_temp) for c_temp in input().strip().split(' ')]
return data
def sock_pairs(data):
sdic = {}
pair = 0
for sock in data:
if sock in sdic:
if sdic[sock] == 1:
sdic[sock] = 0
pair +=1
else:
sdic[sock] = 1
else:
sdic[sock] = 1
return pair
if __name__ == "__main__":
# unittest.main()
main()
class Test(unittest.TestCase):
def testABC(self):
self.assertEqual(3, sock_pairs([10,20, 20, 10, 10, 30, 50, 10, 20]))
|
tuergeist/HackerRank
|
challenges/sock_merchant.py
|
Python
|
gpl-3.0
| 769
|
"""
Performance test created using multi-mechnize to analyze time
for query processing with MySQL.
"""
import random
import time
from DIRAC.WorkloadManagementSystem.DB.JobDB import JobDB
class Transaction(object):
def __init__(self):
self.JobDB = JobDB()
self.custom_timers = {}
def run(self):
start_time = time.time()
for i in range(0, random.randint(1000, 3000)):
jobID = random.randint(1, 1000)
self.JobDB.getJobParameters(jobID)
end_time = time.time()
self.custom_timers["Execution_Time"] = end_time - start_time
if __name__ == "__main__":
trans = Transaction()
trans.run()
print(trans.custom_timers)
|
DIRACGrid/DIRAC
|
tests/Performance/MySQLJobMonitoring/test_scripts/query.py
|
Python
|
gpl-3.0
| 711
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import zipfile
from flask import url_for, render_template
from mini_fiction.utils import misc
class BaseDownloadFormat(object):
extension = None
name = None
content_type = 'application/octet-stream'
debug_content_type = 'text/plain; charset=utf-8'
chapter_template = None
chapter_extension = None
def __init__(self, slugify_filenames=False):
assert self.extension is not None
assert self.name is not None
self.slugify_filenames = bool(slugify_filenames)
def url(self, story):
return url_for(
'story.download',
story_id=story.id,
filename=self.filename(story),
)
def filename(self, story):
title = (story.title or str(story.id)).strip().replace('.', '')
if self.slugify_filenames:
title = slugify(title)
else:
title = misc.sanitize_filename(title, strip=True)
return '{}.{}'.format(title, self.extension)
def render(self, **kw):
raise NotImplementedError
@property
def slug(self):
return slugify(str(self.name.lower()))
class ZipFileDownloadFormat(BaseDownloadFormat):
content_type = 'application/zip'
chapter_encoding = 'utf-8'
def render(self, **kw):
from io import BytesIO
buf = BytesIO()
zipobj = zipfile.ZipFile(buf, mode='w', compression=zipfile.ZIP_DEFLATED)
try:
self.render_zip_contents(zipobj, **kw)
finally:
zipobj.close()
return buf.getvalue()
def render_zip_contents(self, zipobj, story, **kw):
from mini_fiction.models import Chapter
dirname = slugify(story.title or str(story.id))
ext = self.chapter_extension
chapters = list(story.chapters.select(lambda x: not x.draft).order_by(Chapter.order, Chapter.id))
num_width = len(str(max(x.order for x in chapters))) if chapters else 1
for chapter in chapters:
data = render_template(
self.chapter_template,
chapter=chapter,
story=story,
).encode(self.chapter_encoding)
name = slugify(chapter.autotitle)
num = str(chapter.order).rjust(num_width, '0')
arcname = str('%s/%s_%s.%s' % (dirname, num, name, ext))
zipdate = chapter.updated
if chapter.first_published_at and chapter.first_published_at > zipdate:
zipdate = chapter.first_published_at
zipinfo = zipfile.ZipInfo(
arcname,
date_time=zipdate.timetuple()[:6],
)
zipinfo.compress_type = zipfile.ZIP_DEFLATED
zipinfo.external_attr = 0o644 << 16 # Python 3.4 ставит файлам права 000, фиксим
zipobj.writestr(zipinfo, data)
def slugify(s):
from mini_fiction.utils.unidecode import unidecode
return re.subn(r'\W+', '_', unidecode(s))[0]
|
andreymal/mini_fiction
|
mini_fiction/downloads/base.py
|
Python
|
gpl-3.0
| 3,025
|
# Custom markers for robottelo tests
def pytest_configure(config):
"""Register custom markers to avoid warnings."""
markers = [
"deselect(reason=None): Mark test to be removed from collection.",
"skip_if_open(issue): Skip test based on issue status.",
"tier1: Tier 1 tests", # CRUD tests
"tier2: Tier 2 tests", # Association tests
"tier3: Tier 3 tests", # Systems integration tests
"tier4: Tier 4 tests", # Long running tests
"destructive: Destructive tests",
"upgrade: Upgrade tests",
"pit_server: PIT server scenario tests",
"pit_client: PIT client scenario tests",
"run_in_one_thread: Sequential tests",
"build_sanity: Fast, basic tests that confirm build is ready for full test suite",
]
markers.extend(module_markers())
for marker in markers:
config.addinivalue_line("markers", marker)
def module_markers():
"""Register custom markers for each module"""
return [
"host_create: Marks host creation CLI tests",
"host_update: Marks host update CLI tests",
"host_parameter: Marks host parameter CLI tests",
"katello_host_tools: Marks host CLI tests where katello host tools is installed on client",
"host_subscription: Marks host subscription CLI tests",
]
|
rplevka/robottelo
|
pytest_plugins/markers.py
|
Python
|
gpl-3.0
| 1,342
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import random
x = random.randint(1,10)
y = random.choice(["jablka", "hrušky", "třešně", "švestky", "angrešt"])
print("Náhodné číslo: ", x)
print("Náhodná položka: ", y)
|
rludva/katara
|
python/kihon/fceRandom.py
|
Python
|
gpl-3.0
| 226
|
import numpy as np
import scipy.special as sps
import matplotlib.pyplot as plt
from pylab import rcParams
def make_beta(zer, gam):
return lambda x: 1.0 / (np.pi * gam * (1 + (x - zer / gam) ** 2))
def main():
rcParams['figure.figsize'] = 8, 6
# figure(num=None, facecolor='w', edgecolor='k')
ax = plt.figure().add_subplot(1,1,1)
x = np.arange(-5, 5, 0.01)
mus = [0.0, 0.0, 0.0, -2.0]
bet = [0.5, 2.0, 1.0, 1.0]
c = ['b','r','y','g']
for para, parb, color in zip(mus, bet, c):
beta = make_beta(para, parb)(x)
ax.plot(x, beta, color, linewidth=3)
ax.grid(True)
plt.xlim(-5, 5)
plt.ylim(0, 0.7)
# plt.legend(['0.2', '1.0', '5.0', '0.5'], loc='best',prop={'size':20})
plt.savefig('foo.pdf', bbox_inches='tight')
if __name__ == '__main__':
main()
|
octoalmawaukh/math-waffle
|
work in progress/## 60 list of probability distributions/imgsrc/pdf_cauchy.py
|
Python
|
gpl-3.0
| 825
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# title : rescale.py
# description : Normalize data into a defined range/type
# copyright : (c) 2017 I3MTO laboratory. All Rights Reserved
# author(s) : Thomas Janvier
# creation : 01 December 2017
# modification : 17 December 2017
#
# TODO:
# - based on numpy min and max, might speed it up...
import numpy as np
from numba import vectorize
__all__ = ['rescale']
def rescale(data, clip=(0.0, 1.0), cast='float64'):
"""Rescale numerical `data` inside the `clip` range and cast as `type`.
Parameters
----------
data : array_like
Input data.
clip : (scalar, scalar), int or float, optional
[lower, upper] desired boundaries
Returns
-------
data : array_like
rescaled (casted) data
Notes
-----
If boundaries 'lower' > 'upper' then the signal is inverted.
Examples
--------
Rescale [0, 1] random floats to [0, 255] integers:
>>> import numpy
>>> x = numpy.random.rand(9)
>>> rescale(x, (0, 255), 'uint8')
"""
# lazy numpy.ndarray cast
data = np.asarray(data)
# if no conversion asked, store the original type
if not cast:
cast = data.dtype
# use as float to avoid percision-dependant truncation
data.astype('float64')
# parse the desired boundaries as float
lb = float(clip[0])
ub = float(clip[1])
# extract the array min/max values
lv = np.min(data)
uv = np.max(data)
return (data - lv) * (ub - lb) / (uv - lv) + lb
# # rescale
# data = _rescale(data, lb, ub, lv, uv)
# # lazy cast
# return data.astype(cast)
@vectorize(cache=True, nopython=True)
def _rescale(data, lv, uv, lb, ub):
"""Vectorized rescale function.
Parameters
----------
data : array_like
Input data.
lv : int or float
lower value in data
uv : int or float
upper value in data
lb : int or float
lower boundary to fit
ub : int or float
upper boundary to fit
Returns
-------
data : array_like
rescaled (casted) data
"""
return (data - lv) * (ub - lb) / (uv - lv) + lb
|
Bone-Imaging-ToolKit/BItk
|
bitk/core/rescale.py
|
Python
|
gpl-3.0
| 2,205
|
__author__ = 'Sergey Tomin'
import sys
import matplotlib
from ocelot.rad import *
from ocelot.gui import *
from ocelot import *
from ocelot.rad.radiation_py import *
from ocelot.rad.undulator_params import *
import copy
sigma_tau = 100e-6/2.36
tau_p_cor = 0.013/2.36
tau = np.array([-1, 0, 1])*sigma_tau
phi = tau/1.45859E-04*360
font = {'size' : 14}
matplotlib.rc('font', **font)
p_array_init = ParticleArray(n=3)
p_array_init.tau()[:] = np.array([-1, 0, 1])*sigma_tau
p_array_init.p()[:] = tau_p_cor*tau/sigma_tau
p_array_init.E = 0.6
p_array_init.q_array[:] = 1e-10
p_array = copy.deepcopy(p_array_init)
screen = Screen()
screen.z = 1000.0
screen.size_x = 15
screen.size_y = 15
screen.nx = 2000
screen.ny = 1
screen.start_energy = 0.00850
screen.end_energy = 15e-3
screen.num_energy = 1
screen.update()
und = Undulator(lperiod=0.4, nperiods=9, Kx=44.821)
lat = MagneticLattice((und,))
screen = coherent_radiation(lat, screen, p_array, accuracy=2, end_poles=False)
show_flux(screen)
plt.show()
|
ocelot-collab/ocelot
|
demos/sr/coherent_rad.py
|
Python
|
gpl-3.0
| 1,014
|
"""Tests for the Docker feature.
:Requirement: Docker
:CaseAutomation: Automated
:CaseLevel: Component
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
from random import choice
from random import randint
import pytest
from fauxfactory import gen_string
from fauxfactory import gen_url
from wait_for import wait_for
from robottelo import ssh
from robottelo.cli.activationkey import ActivationKey
from robottelo.cli.base import CLIReturnCodeError
from robottelo.cli.contentview import ContentView
from robottelo.cli.docker import Docker
from robottelo.cli.factory import make_activation_key
from robottelo.cli.factory import make_content_view
from robottelo.cli.factory import make_lifecycle_environment
from robottelo.cli.factory import make_product_wait
from robottelo.cli.factory import make_repository
from robottelo.cli.lifecycleenvironment import LifecycleEnvironment
from robottelo.cli.product import Product
from robottelo.cli.repository import Repository
from robottelo.config import settings
from robottelo.constants import CONTAINER_REGISTRY_HUB
from robottelo.constants import CONTAINER_RH_REGISTRY_UPSTREAM_NAME
from robottelo.constants import CONTAINER_UPSTREAM_NAME
from robottelo.constants import REPO_TYPE
from robottelo.datafactory import invalid_docker_upstream_names
from robottelo.datafactory import parametrized
from robottelo.datafactory import valid_docker_repository_names
from robottelo.datafactory import valid_docker_upstream_names
from robottelo.logging import logger
def _repo(product_id, name=None, upstream_name=None, url=None):
"""Creates a Docker-based repository.
:param product_id: ID of the ``Product``.
:param str name: Name for the repository. If ``None`` then a random
value will be generated.
:param str upstream_name: A valid name of an existing upstream repository.
If ``None`` then defaults to CONTAINER_UPSTREAM_NAME constant.
:param str url: URL of repository. If ``None`` then defaults to
CONTAINER_REGISTRY_HUB constant.
:return: A ``Repository`` object.
"""
return make_repository(
{
'content-type': REPO_TYPE['docker'],
'docker-upstream-name': upstream_name or CONTAINER_UPSTREAM_NAME,
'name': name or gen_string('alpha', 5),
'product-id': product_id,
'url': url or CONTAINER_REGISTRY_HUB,
}
)
def _content_view(repo_id, org_id):
"""Create a content view and link it to the given repository."""
content_view = make_content_view({'composite': False, 'organization-id': org_id})
ContentView.add_repository({'id': content_view['id'], 'repository-id': repo_id})
return ContentView.info({'id': content_view['id']})
@pytest.fixture
def docker_host(rhel7_contenthost):
"""Instantiate and set up a docker host VM. Destroy VM when done."""
logger.info('Installing katello-ca on the external docker host')
rhel7_contenthost.install_katello_ca()
repos = {
'server': settings.repos.rhel7_os,
'optional': settings.repos.rhel7_optional,
'extras': settings.repos.rhel7_extras,
}
rhel7_contenthost.create_custom_repos(**repos)
rhel7_contenthost.execute('yum -y install docker')
rhel7_contenthost.execute('systemctl start docker')
return rhel7_contenthost
@pytest.fixture
def repo(module_product):
return _repo(module_product.id)
@pytest.fixture
def content_view(module_org, repo):
return _content_view(repo['id'], module_org.id)
@pytest.fixture
def content_view_publish(content_view):
ContentView.publish({'id': content_view['id']})
content_view = ContentView.info({'id': content_view['id']})
return ContentView.version_info({'id': content_view['versions'][0]['id']})
@pytest.fixture
def content_view_promote(content_view_publish, module_lce):
ContentView.version_promote(
{
'id': content_view_publish['id'],
'to-lifecycle-environment-id': module_lce.id,
}
)
return ContentView.version_info({'id': content_view_publish['id']})
class TestDockerManifest:
"""Tests related to docker manifest command
:CaseComponent: Repositories
:Assignee: chiggins
"""
@pytest.mark.tier2
def test_positive_read_docker_tags(self, repo):
"""docker manifest displays tags information for a docker manifest
:id: 59b605b5-ac2d-46e3-a85e-a259e78a07a8
:expectedresults: docker manifest displays tags info for a docker
manifest
:CaseImportance: Medium
:BZ: 1658274
"""
Repository.synchronize({'id': repo['id']})
# Grab all available manifests related to repository
manifests_list = Docker.manifest.list({'repository-id': repo['id']})
# Some manifests do not have tags associated with it, ignore those
# because we want to check the tag information
manifests = [m_iter for m_iter in manifests_list if not m_iter['tags'] == '']
assert manifests
tags_list = Docker.tag.list({'repository-id': repo['id']})
# Extract tag names for the repository out of docker tag list
repo_tag_names = [tag['tag'] for tag in tags_list]
for manifest in manifests:
manifest_info = Docker.manifest.info({'id': manifest['id']})
# Check that manifest's tag is listed in tags for the repository
for t_iter in manifest_info['tags']:
assert t_iter['name'] in repo_tag_names
class TestDockerRepository:
"""Tests specific to performing CRUD methods against ``Docker`` repositories.
:CaseComponent: Repositories
:Assignee: chiggins
"""
@pytest.mark.tier1
@pytest.mark.parametrize('name', **parametrized(valid_docker_repository_names()))
def test_positive_create_with_name(self, module_org, module_product, name):
"""Create one Docker-type repository
:id: e82a36c8-3265-4c10-bafe-c7e07db3be78
:parametrized: yes
:expectedresults: A repository is created with a Docker upstream
repository.
:CaseImportance: Critical
"""
repo = _repo(module_product.id, name)
assert repo['name'] == name
assert repo['upstream-repository-name'] == CONTAINER_UPSTREAM_NAME
assert repo['content-type'] == REPO_TYPE['docker']
@pytest.mark.tier2
def test_positive_create_repos_using_same_product(self, module_org, module_product):
"""Create multiple Docker-type repositories
:id: 6dd25cf4-f8b6-4958-976a-c116daf27b44
:expectedresults: Multiple docker repositories are created with a
Docker upstream repository and they all belong to the same product.
:CaseLevel: Integration
"""
repo_names = set()
for _ in range(randint(2, 5)):
repo = _repo(module_product.id)
repo_names.add(repo['name'])
product = Product.info({'id': module_product.id, 'organization-id': module_org.id})
assert repo_names.issubset({repo_['repo-name'] for repo_ in product['content']})
@pytest.mark.tier2
def test_positive_create_repos_using_multiple_products(self, module_org):
"""Create multiple Docker-type repositories on multiple
products.
:id: 43f4ab0d-731e-444e-9014-d663ff945f36
:expectedresults: Multiple docker repositories are created with a
Docker upstream repository and they all belong to their respective
products.
:CaseLevel: Integration
"""
for _ in range(randint(2, 5)):
product = make_product_wait({'organization-id': module_org.id})
repo_names = set()
for _ in range(randint(2, 3)):
repo = _repo(product['id'])
repo_names.add(repo['name'])
product = Product.info({'id': product['id'], 'organization-id': module_org.id})
assert repo_names == {repo_['repo-name'] for repo_ in product['content']}
@pytest.mark.tier1
def test_positive_sync(self, repo):
"""Create and sync a Docker-type repository
:id: bff1d40e-181b-48b2-8141-8c86e0db62a2
:expectedresults: A repository is created with a Docker repository and
it is synchronized.
:CaseImportance: Critical
"""
assert int(repo['content-counts']['container-image-manifests']) == 0
Repository.synchronize({'id': repo['id']})
repo = Repository.info({'id': repo['id']})
assert int(repo['content-counts']['container-image-manifests']) > 0
@pytest.mark.tier1
@pytest.mark.parametrize('new_name', **parametrized(valid_docker_repository_names()))
def test_positive_update_name(self, repo, new_name):
"""Create a Docker-type repository and update its name.
:id: 8b3a8496-e9bd-44f1-916f-6763a76b9b1b
:parametrized: yes
:expectedresults: A repository is created with a Docker upstream
repository and that its name can be updated.
:CaseImportance: Critical
"""
Repository.update({'id': repo['id'], 'new-name': new_name, 'url': repo['url']})
repo = Repository.info({'id': repo['id']})
assert repo['name'] == new_name
@pytest.mark.tier1
@pytest.mark.parametrize('new_upstream_name', **parametrized(valid_docker_upstream_names()))
def test_positive_update_upstream_name(self, repo, new_upstream_name):
"""Create a Docker-type repository and update its upstream name.
:id: 1a6985ed-43ec-4ea6-ba27-e3870457ac56
:parametrized: yes
:expectedresults: A repository is created with a Docker upstream
repository and that its upstream name can be updated.
:CaseImportance: Critical
"""
Repository.update(
{
'docker-upstream-name': new_upstream_name,
'id': repo['id'],
'url': repo['url'],
}
)
repo = Repository.info({'id': repo['id']})
assert repo['upstream-repository-name'] == new_upstream_name
@pytest.mark.tier1
@pytest.mark.parametrize('new_upstream_name', **parametrized(invalid_docker_upstream_names()))
def test_negative_update_upstream_name(self, repo, new_upstream_name):
"""Attempt to update upstream name for a Docker-type repository.
:id: 798651af-28b2-4907-b3a7-7c560bf66c7c
:parametrized: yes
:expectedresults: A repository is created with a Docker upstream
repository and that its upstream name can not be updated with
invalid values.
:CaseImportance: Critical
"""
with pytest.raises(CLIReturnCodeError, match='Validation failed: Docker upstream name'):
Repository.update(
{
'docker-upstream-name': new_upstream_name,
'id': repo['id'],
'url': repo['url'],
}
)
@pytest.mark.skip_if_not_set('docker')
@pytest.mark.tier1
def test_positive_create_with_long_upstream_name(self, module_product):
"""Create a docker repository with upstream name longer than 30
characters
:id: 4fe47c02-a8bd-4630-9102-189a9d268b83
:customerscenario: true
:BZ: 1424689
:expectedresults: docker repository is successfully created
:CaseImportance: Critical
"""
repo = _repo(
module_product.id,
upstream_name=CONTAINER_RH_REGISTRY_UPSTREAM_NAME,
url=settings.docker.external_registry_1,
)
assert repo['upstream-repository-name'] == CONTAINER_RH_REGISTRY_UPSTREAM_NAME
@pytest.mark.skip_if_not_set('docker')
@pytest.mark.tier1
def test_positive_update_with_long_upstream_name(self, repo):
"""Create a docker repository and update its upstream name with longer
than 30 characters value
:id: 97260cce-9677-4a3e-942b-e95e2714500a
:BZ: 1424689
:expectedresults: docker repository is successfully updated
:CaseImportance: Critical
"""
Repository.update(
{
'docker-upstream-name': CONTAINER_RH_REGISTRY_UPSTREAM_NAME,
'id': repo['id'],
'url': settings.docker.external_registry_1,
}
)
repo = Repository.info({'id': repo['id']})
assert repo['upstream-repository-name'] == CONTAINER_RH_REGISTRY_UPSTREAM_NAME
@pytest.mark.tier2
def test_positive_update_url(self, repo):
"""Create a Docker-type repository and update its URL.
:id: 73caacd4-7f17-42a7-8d93-3dee8b9341fa
:expectedresults: A repository is created with a Docker upstream
repository and that its URL can be updated.
"""
new_url = gen_url()
Repository.update({'id': repo['id'], 'url': new_url})
repo = Repository.info({'id': repo['id']})
assert repo['url'] == new_url
@pytest.mark.tier1
def test_positive_delete_by_id(self, repo):
"""Create and delete a Docker-type repository
:id: ab1e8228-92a8-45dc-a863-7181711f2745
:expectedresults: A repository with a upstream repository is created
and then deleted.
:CaseImportance: Critical
"""
Repository.delete({'id': repo['id']})
with pytest.raises(CLIReturnCodeError):
Repository.info({'id': repo['id']})
@pytest.mark.tier2
def test_positive_delete_random_repo_by_id(self, module_org):
"""Create Docker-type repositories on multiple products and
delete a random repository from a random product.
:id: d4db5eaa-7379-4788-9b72-76f2589d8f20
:expectedresults: Random repository can be deleted from random product
without altering the other products.
"""
products = [
make_product_wait({'organization-id': module_org.id}) for _ in range(randint(2, 5))
]
repos = []
for product in products:
for _ in range(randint(2, 3)):
repos.append(_repo(product['id']))
# Select random repository and delete it
repo = choice(repos)
repos.remove(repo)
Repository.delete({'id': repo['id']})
with pytest.raises(CLIReturnCodeError):
Repository.info({'id': repo['id']})
# Verify other repositories were not touched
product_ids = [product['id'] for product in products]
for repo in repos:
result = Repository.info({'id': repo['id']})
assert result['product']['id'] in product_ids
class TestDockerContentView:
"""Tests specific to using ``Docker`` repositories with Content Views.
:CaseComponent: ContentViews
:Assignee: ltran
:CaseLevel: Integration
"""
@pytest.mark.tier2
def test_positive_add_docker_repo_by_id(self, module_org, repo):
"""Add one Docker-type repository to a non-composite content view
:id: 87d6c7bb-92f8-4a32-8ad2-2a1af896500b
:expectedresults: A repository is created with a Docker repository and
the product is added to a non-composite content view
"""
content_view = make_content_view({'composite': False, 'organization-id': module_org.id})
ContentView.add_repository({'id': content_view['id'], 'repository-id': repo['id']})
content_view = ContentView.info({'id': content_view['id']})
assert repo['id'] in [repo_['id'] for repo_ in content_view['container-image-repositories']]
@pytest.mark.tier2
def test_positive_add_docker_repos_by_id(self, module_org, module_product):
"""Add multiple Docker-type repositories to a non-composite CV.
:id: 2eb19e28-a633-4c21-9469-75a686c83b34
:expectedresults: Repositories are created with Docker upstream
repositories and the product is added to a non-composite content
view.
"""
repos = [_repo(module_product.id) for _ in range(randint(2, 5))]
content_view = make_content_view({'composite': False, 'organization-id': module_org.id})
for repo in repos:
ContentView.add_repository({'id': content_view['id'], 'repository-id': repo['id']})
content_view = ContentView.info({'id': content_view['id']})
assert {repo['id'] for repo in repos} == {
repo['id'] for repo in content_view['container-image-repositories']
}
@pytest.mark.tier2
def test_positive_add_synced_docker_repo_by_id(self, module_org, repo):
"""Create and sync a Docker-type repository
:id: 6f51d268-ed23-48ab-9dea-cd3571daa647
:expectedresults: A repository is created with a Docker repository and
it is synchronized.
"""
Repository.synchronize({'id': repo['id']})
repo = Repository.info({'id': repo['id']})
assert int(repo['content-counts']['container-image-manifests']) > 0
content_view = make_content_view({'composite': False, 'organization-id': module_org.id})
ContentView.add_repository({'id': content_view['id'], 'repository-id': repo['id']})
content_view = ContentView.info({'id': content_view['id']})
assert repo['id'] in [repo_['id'] for repo_ in content_view['container-image-repositories']]
@pytest.mark.tier2
def test_positive_add_docker_repo_by_id_to_ccv(self, module_org, content_view):
"""Add one Docker-type repository to a composite content view
:id: 8e2ef5ba-3cdf-4ef9-a22a-f1701e20a5d5
:expectedresults: A repository is created with a Docker repository and
the product is added to a content view which is then added to a
composite content view.
:BZ: 1359665
"""
ContentView.publish({'id': content_view['id']})
content_view = ContentView.info({'id': content_view['id']})
assert len(content_view['versions']) == 1
comp_content_view = make_content_view({'composite': True, 'organization-id': module_org.id})
ContentView.update(
{
'id': comp_content_view['id'],
'component-ids': content_view['versions'][0]['id'],
}
)
comp_content_view = ContentView.info({'id': comp_content_view['id']})
assert content_view['versions'][0]['id'] in [
component['id'] for component in comp_content_view['components']
]
@pytest.mark.tier2
def test_positive_add_docker_repos_by_id_to_ccv(self, module_org, module_product):
"""Add multiple Docker-type repositories to a composite content view.
:id: b79cbc97-3dba-4059-907d-19316684d569
:expectedresults: One repository is created with a Docker upstream
repository and the product is added to a random number of content
views which are then added to a composite content view.
:BZ: 1359665
"""
cv_versions = []
for _ in range(randint(2, 5)):
content_view = make_content_view({'composite': False, 'organization-id': module_org.id})
repo = _repo(module_product.id)
ContentView.add_repository({'id': content_view['id'], 'repository-id': repo['id']})
ContentView.publish({'id': content_view['id']})
content_view = ContentView.info({'id': content_view['id']})
assert len(content_view['versions']) == 1
cv_versions.append(content_view['versions'][0])
comp_content_view = make_content_view({'composite': True, 'organization-id': module_org.id})
ContentView.update(
{
'component-ids': [cv_version['id'] for cv_version in cv_versions],
'id': comp_content_view['id'],
}
)
comp_content_view = ContentView.info({'id': comp_content_view['id']})
comp_ids = [component['id'] for component in comp_content_view['components']]
for cv_version in cv_versions:
assert cv_version['id'] in comp_ids
@pytest.mark.tier2
def test_positive_publish_with_docker_repo(self, content_view):
"""Add Docker-type repository to content view and publish it once.
:id: 28480de3-ffb5-4b8e-8174-fffffeef6af4
:expectedresults: One repository is created with a Docker upstream
repository and the product is added to a content view which is then
published only once.
"""
assert len(content_view['versions']) == 0
ContentView.publish({'id': content_view['id']})
content_view = ContentView.info({'id': content_view['id']})
assert len(content_view['versions']) == 1
@pytest.mark.tier2
def test_positive_publish_with_docker_repo_composite(self, content_view, module_org):
"""Add Docker-type repository to composite CV and publish it once.
:id: 2d75419b-73ed-4f29-ae0d-9af8d9624c87
:expectedresults: One repository is created with a Docker upstream
repository and the product is added to a content view which is then
published once and added to a composite content view which is also
published once.
:BZ: 1359665
"""
assert len(content_view['versions']) == 0
ContentView.publish({'id': content_view['id']})
content_view = ContentView.info({'id': content_view['id']})
assert len(content_view['versions']) == 1
comp_content_view = make_content_view({'composite': True, 'organization-id': module_org.id})
ContentView.update(
{
'component-ids': content_view['versions'][0]['id'],
'id': comp_content_view['id'],
}
)
comp_content_view = ContentView.info({'id': comp_content_view['id']})
assert content_view['versions'][0]['id'] in [
component['id'] for component in comp_content_view['components']
]
ContentView.publish({'id': comp_content_view['id']})
comp_content_view = ContentView.info({'id': comp_content_view['id']})
assert len(comp_content_view['versions']) == 1
@pytest.mark.tier2
def test_positive_publish_multiple_with_docker_repo(self, content_view):
"""Add Docker-type repository to content view and publish it multiple
times.
:id: 33c1b2ee-ae8a-4a7e-8254-123d97aaaa58
:expectedresults: One repository is created with a Docker upstream
repository and the product is added to a content view which is then
published multiple times.
"""
assert len(content_view['versions']) == 0
publish_amount = randint(2, 5)
for _ in range(publish_amount):
ContentView.publish({'id': content_view['id']})
content_view = ContentView.info({'id': content_view['id']})
assert len(content_view['versions']) == publish_amount
@pytest.mark.tier2
def test_positive_publish_multiple_with_docker_repo_composite(self, module_org, content_view):
"""Add Docker-type repository to content view and publish it multiple
times.
:id: 014adf90-d399-4a99-badb-76ee03a2c350
:expectedresults: One repository is created with a Docker upstream
repository and the product is added to a content view which is then
added to a composite content view which is then published multiple
times.
:BZ: 1359665
"""
assert len(content_view['versions']) == 0
ContentView.publish({'id': content_view['id']})
content_view = ContentView.info({'id': content_view['id']})
assert len(content_view['versions']) == 1
comp_content_view = make_content_view({'composite': True, 'organization-id': module_org.id})
ContentView.update(
{
'component-ids': content_view['versions'][0]['id'],
'id': comp_content_view['id'],
}
)
comp_content_view = ContentView.info({'id': comp_content_view['id']})
assert content_view['versions'][0]['id'] in [
component['id'] for component in comp_content_view['components']
]
publish_amount = randint(2, 5)
for _ in range(publish_amount):
ContentView.publish({'id': comp_content_view['id']})
comp_content_view = ContentView.info({'id': comp_content_view['id']})
assert len(comp_content_view['versions']) == publish_amount
@pytest.mark.tier2
def test_positive_promote_with_docker_repo(self, module_org, module_lce, content_view):
"""Add Docker-type repository to content view and publish it.
Then promote it to the next available lifecycle-environment.
:id: a7df98f4-0ec0-40f6-8941-3dbb776d47b9
:expectedresults: Docker-type repository is promoted to content view
found in the specific lifecycle-environment.
"""
ContentView.publish({'id': content_view['id']})
content_view = ContentView.info({'id': content_view['id']})
assert len(content_view['versions']) == 1
cvv = ContentView.version_info({'id': content_view['versions'][0]['id']})
assert len(cvv['lifecycle-environments']) == 1
ContentView.version_promote({'id': cvv['id'], 'to-lifecycle-environment-id': module_lce.id})
cvv = ContentView.version_info({'id': content_view['versions'][0]['id']})
assert len(cvv['lifecycle-environments']) == 2
@pytest.mark.tier2
@pytest.mark.upgrade
def test_positive_promote_multiple_with_docker_repo(self, module_org, content_view):
"""Add Docker-type repository to content view and publish it.
Then promote it to multiple available lifecycle-environments.
:id: e9432bc4-a709-44d7-8e1d-00ca466aa32d
:expectedresults: Docker-type repository is promoted to content view
found in the specific lifecycle-environments.
"""
ContentView.publish({'id': content_view['id']})
content_view = ContentView.info({'id': content_view['id']})
assert len(content_view['versions']) == 1
cvv = ContentView.version_info({'id': content_view['versions'][0]['id']})
assert len(cvv['lifecycle-environments']) == 1
lces = [
make_lifecycle_environment({'organization-id': module_org.id})
for _ in range(1, randint(3, 6))
]
for expected_lces, lce in enumerate(lces, start=2):
ContentView.version_promote({'id': cvv['id'], 'to-lifecycle-environment-id': lce['id']})
cvv = ContentView.version_info({'id': cvv['id']})
assert len(cvv['lifecycle-environments']) == expected_lces
@pytest.mark.tier2
def test_positive_promote_with_docker_repo_composite(
self, module_org, module_lce, content_view
):
"""Add Docker-type repository to composite content view and publish it.
Then promote it to the next available lifecycle-environment.
:id: fb7d132e-d7fa-4890-a0ec-746dd093513e
:expectedresults: Docker-type repository is promoted to content view
found in the specific lifecycle-environment.
:BZ: 1359665
"""
ContentView.publish({'id': content_view['id']})
content_view = ContentView.info({'id': content_view['id']})
assert len(content_view['versions']) == 1
comp_content_view = make_content_view({'composite': True, 'organization-id': module_org.id})
ContentView.update(
{
'component-ids': content_view['versions'][0]['id'],
'id': comp_content_view['id'],
}
)
comp_content_view = ContentView.info({'id': comp_content_view['id']})
assert content_view['versions'][0]['id'] in [
component['id'] for component in comp_content_view['components']
]
ContentView.publish({'id': comp_content_view['id']})
comp_content_view = ContentView.info({'id': comp_content_view['id']})
cvv = ContentView.version_info({'id': comp_content_view['versions'][0]['id']})
assert len(cvv['lifecycle-environments']) == 1
ContentView.version_promote(
{
'id': comp_content_view['versions'][0]['id'],
'to-lifecycle-environment-id': module_lce.id,
}
)
cvv = ContentView.version_info({'id': comp_content_view['versions'][0]['id']})
assert len(cvv['lifecycle-environments']) == 2
@pytest.mark.tier2
@pytest.mark.upgrade
def test_positive_promote_multiple_with_docker_repo_composite(self, content_view, module_org):
"""Add Docker-type repository to composite content view and publish it.
Then promote it to the multiple available lifecycle-environments.
:id: 345288d6-581b-4c07-8062-e58cb6343f1b
:expectedresults: Docker-type repository is promoted to content view
found in the specific lifecycle-environments.
:BZ: 1359665
"""
ContentView.publish({'id': content_view['id']})
content_view = ContentView.info({'id': content_view['id']})
assert len(content_view['versions']) == 1
comp_content_view = make_content_view({'composite': True, 'organization-id': module_org.id})
ContentView.update(
{
'component-ids': content_view['versions'][0]['id'],
'id': comp_content_view['id'],
}
)
comp_content_view = ContentView.info({'id': comp_content_view['id']})
assert content_view['versions'][0]['id'] in [
component['id'] for component in comp_content_view['components']
]
ContentView.publish({'id': comp_content_view['id']})
comp_content_view = ContentView.info({'id': comp_content_view['id']})
cvv = ContentView.version_info({'id': comp_content_view['versions'][0]['id']})
assert len(cvv['lifecycle-environments']) == 1
lces = [
make_lifecycle_environment({'organization-id': module_org.id})
for _ in range(1, randint(3, 6))
]
for expected_lces, lce in enumerate(lces, start=2):
ContentView.version_promote(
{
'id': cvv['id'],
'to-lifecycle-environment-id': lce['id'],
}
)
cvv = ContentView.version_info({'id': cvv['id']})
assert len(cvv['lifecycle-environments']) == expected_lces
@pytest.mark.tier2
@pytest.mark.upgrade
def test_positive_name_pattern_change(self, module_org):
"""Promote content view with Docker repository to lifecycle environment.
Change registry name pattern for that environment. Verify that repository
name on product changed according to new pattern.
:id: 63c99ae7-238b-40ed-8cc1-d847eb4e6d65
:expectedresults: Container repository name is changed
according to new pattern.
"""
lce = make_lifecycle_environment({'organization-id': module_org.id})
pattern_prefix = gen_string('alpha', 5)
docker_upstream_name = 'hello-world'
new_pattern = (
f'{pattern_prefix}-<%= content_view.label %>/<%= repository.docker_upstream_name %>'
)
repo = _repo(
make_product_wait({'organization-id': module_org.id})['id'],
name=gen_string('alpha', 5),
upstream_name=docker_upstream_name,
)
Repository.synchronize({'id': repo['id']})
content_view = make_content_view({'composite': False, 'organization-id': module_org.id})
ContentView.add_repository({'id': content_view['id'], 'repository-id': repo['id']})
ContentView.publish({'id': content_view['id']})
content_view = ContentView.info({'id': content_view['id']})
ContentView.version_promote(
{'id': content_view['versions'][0]['id'], 'to-lifecycle-environment-id': lce['id']}
)
LifecycleEnvironment.update(
{
'registry-name-pattern': new_pattern,
'id': lce['id'],
'organization-id': module_org.id,
}
)
lce = LifecycleEnvironment.info({'id': lce['id'], 'organization-id': module_org.id})
assert lce['registry-name-pattern'] == new_pattern
repo = Repository.list(
{'name': repo['name'], 'environment-id': lce['id'], 'organization-id': module_org.id}
)[0]
expected_name = f'{pattern_prefix}-{content_view["label"]}/{docker_upstream_name}'.lower()
assert Repository.info({'id': repo['id']})['container-repository-name'] == expected_name
@pytest.mark.tier2
def test_positive_product_name_change_after_promotion(self, module_org):
"""Promote content view with Docker repository to lifecycle environment.
Change product name. Verify that repository name on product changed
according to new pattern.
:id: 92279755-717c-415c-88b6-4cc1202072e2
:expectedresults: Container repository name is changed
according to new pattern.
"""
old_prod_name = gen_string('alpha', 5)
new_prod_name = gen_string('alpha', 5)
docker_upstream_name = 'hello-world'
new_pattern = '<%= content_view.label %>/<%= product.name %>'
lce = make_lifecycle_environment({'organization-id': module_org.id})
prod = make_product_wait({'organization-id': module_org.id, 'name': old_prod_name})
repo = _repo(prod['id'], name=gen_string('alpha', 5), upstream_name=docker_upstream_name)
Repository.synchronize({'id': repo['id']})
content_view = make_content_view({'composite': False, 'organization-id': module_org.id})
ContentView.add_repository({'id': content_view['id'], 'repository-id': repo['id']})
ContentView.publish({'id': content_view['id']})
content_view = ContentView.info({'id': content_view['id']})
LifecycleEnvironment.update(
{
'registry-name-pattern': new_pattern,
'id': lce['id'],
'organization-id': module_org.id,
}
)
lce = LifecycleEnvironment.info({'id': lce['id'], 'organization-id': module_org.id})
assert lce['registry-name-pattern'] == new_pattern
ContentView.version_promote(
{'id': content_view['versions'][0]['id'], 'to-lifecycle-environment-id': lce['id']}
)
Product.update({'name': new_prod_name, 'id': prod['id']})
repo = Repository.list(
{'name': repo['name'], 'environment-id': lce['id'], 'organization-id': module_org.id}
)[0]
expected_name = f'{content_view["label"]}/{old_prod_name}'.lower()
assert Repository.info({'id': repo['id']})['container-repository-name'] == expected_name
ContentView.publish({'id': content_view['id']})
content_view = ContentView.info({'id': content_view['id']})
ContentView.version_promote(
{
'id': content_view['versions'][-1]['id'],
'to-lifecycle-environment-id': lce['id'],
}
)
repo = Repository.list(
{
'name': repo['name'],
'environment-id': lce['id'],
'organization-id': module_org.id,
}
)[0]
expected_name = f'{content_view["label"]}/{new_prod_name}'.lower()
assert Repository.info({'id': repo['id']})['container-repository-name'] == expected_name
@pytest.mark.tier2
def test_positive_repo_name_change_after_promotion(self, module_org):
"""Promote content view with Docker repository to lifecycle environment.
Change repository name. Verify that Docker repository name on product
changed according to new pattern.
:id: f094baab-e823-47e0-939d-bd0d88eb1538
:expectedresults: Container repository name is changed
according to new pattern.
"""
old_repo_name = gen_string('alpha', 5)
new_repo_name = gen_string('alpha', 5)
docker_upstream_name = 'hello-world'
new_pattern = '<%= content_view.label %>/<%= repository.name %>'
lce = make_lifecycle_environment({'organization-id': module_org.id})
prod = make_product_wait({'organization-id': module_org.id})
repo = _repo(prod['id'], name=old_repo_name, upstream_name=docker_upstream_name)
Repository.synchronize({'id': repo['id']})
content_view = make_content_view({'composite': False, 'organization-id': module_org.id})
ContentView.add_repository({'id': content_view['id'], 'repository-id': repo['id']})
ContentView.publish({'id': content_view['id']})
content_view = ContentView.info({'id': content_view['id']})
LifecycleEnvironment.update(
{
'registry-name-pattern': new_pattern,
'id': lce['id'],
'organization-id': module_org.id,
}
)
ContentView.version_promote(
{'id': content_view['versions'][0]['id'], 'to-lifecycle-environment-id': lce['id']}
)
Repository.update({'name': new_repo_name, 'id': repo['id'], 'product-id': prod['id']})
repo = Repository.list(
{
'name': new_repo_name,
'environment-id': lce['id'],
'organization-id': module_org.id,
}
)[0]
expected_name = f'{content_view["label"]}/{old_repo_name}'.lower()
assert Repository.info({'id': repo['id']})['container-repository-name'] == expected_name
ContentView.publish({'id': content_view['id']})
content_view = ContentView.info({'id': content_view['id']})
ContentView.version_promote(
{
'id': content_view['versions'][-1]['id'],
'to-lifecycle-environment-id': lce['id'],
}
)
repo = Repository.list(
{
'name': new_repo_name,
'environment-id': lce['id'],
'organization-id': module_org.id,
}
)[0]
expected_name = f'{content_view["label"]}/{new_repo_name}'.lower()
assert Repository.info({'id': repo['id']})['container-repository-name'] == expected_name
@pytest.mark.tier2
def test_negative_set_non_unique_name_pattern_and_promote(self, module_org):
"""Set registry name pattern to one that does not guarantee uniqueness.
Try to promote content view with multiple Docker repositories to
lifecycle environment. Verify that content has not been promoted.
:id: eaf5e7ac-93c9-46c6-b538-4d6bd73ab9fc
:expectedresults: Content view is not promoted
"""
docker_upstream_names = ['hello-world', 'alpine']
new_pattern = '<%= organization.label %>'
lce = make_lifecycle_environment(
{'organization-id': module_org.id, 'registry-name-pattern': new_pattern}
)
prod = make_product_wait({'organization-id': module_org.id})
content_view = make_content_view({'composite': False, 'organization-id': module_org.id})
for docker_name in docker_upstream_names:
repo = _repo(prod['id'], upstream_name=docker_name)
Repository.synchronize({'id': repo['id']})
ContentView.add_repository({'id': content_view['id'], 'repository-id': repo['id']})
ContentView.publish({'id': content_view['id']})
content_view = ContentView.info({'id': content_view['id']})
with pytest.raises(CLIReturnCodeError):
ContentView.version_promote(
{'id': content_view['versions'][0]['id'], 'to-lifecycle-environment-id': lce['id']}
)
@pytest.mark.tier2
def test_negative_promote_and_set_non_unique_name_pattern(self, module_org, module_product):
"""Promote content view with multiple Docker repositories to
lifecycle environment. Set registry name pattern to one that
does not guarantee uniqueness. Verify that pattern has not been
changed.
:id: 9f952224-084f-48d1-b2ea-85f3621becea
:expectedresults: Registry name pattern is not changed
"""
docker_upstream_names = ['hello-world', 'alpine']
new_pattern = '<%= organization.label %>'
content_view = make_content_view({'composite': False, 'organization-id': module_org.id})
for docker_name in docker_upstream_names:
repo = _repo(module_product.id, upstream_name=docker_name)
Repository.synchronize({'id': repo['id']})
ContentView.add_repository({'id': content_view['id'], 'repository-id': repo['id']})
ContentView.publish({'id': content_view['id']})
content_view = ContentView.info({'id': content_view['id']})
lce = make_lifecycle_environment({'organization-id': module_org.id})
ContentView.version_promote(
{'id': content_view['versions'][0]['id'], 'to-lifecycle-environment-id': lce['id']}
)
with pytest.raises(CLIReturnCodeError):
LifecycleEnvironment.update(
{
'registry-name-pattern': new_pattern,
'id': lce['id'],
'organization-id': module_org.id,
}
)
class TestDockerActivationKey:
"""Tests specific to adding ``Docker`` repositories to Activation Keys.
:CaseComponent: ActivationKeys
:Assignee: chiggins
:CaseLevel: Integration
"""
@pytest.mark.tier2
def test_positive_add_docker_repo_cv(self, module_org, module_lce, content_view_promote):
"""Add Docker-type repository to a non-composite content view
and publish it. Then create an activation key and associate it with the
Docker content view.
:id: bb128642-d39f-45c2-aa69-a4776ea536a2
:expectedresults: Docker-based content view can be added to activation
key
"""
activation_key = make_activation_key(
{
'content-view-id': content_view_promote['content-view-id'],
'lifecycle-environment-id': module_lce.id,
'organization-id': module_org.id,
}
)
assert activation_key['content-view'] == content_view_promote['content-view-name']
@pytest.mark.tier2
def test_positive_remove_docker_repo_cv(self, module_org, module_lce, content_view_promote):
"""Add Docker-type repository to a non-composite content view
and publish it. Create an activation key and associate it with the
Docker content view. Then remove this content view from the activation
key.
:id: d696e5fe-1818-46ce-9499-924c96e1ef88
:expectedresults: Docker-based content view can be added and then
removed from the activation key.
"""
activation_key = make_activation_key(
{
'content-view-id': content_view_promote['content-view-id'],
'lifecycle-environment-id': module_lce.id,
'organization-id': module_org.id,
}
)
assert activation_key['content-view'] == content_view_promote['content-view-name']
# Create another content view replace with
another_cv = make_content_view({'composite': False, 'organization-id': module_org.id})
ContentView.publish({'id': another_cv['id']})
another_cv = ContentView.info({'id': another_cv['id']})
ContentView.version_promote(
{'id': another_cv['versions'][0]['id'], 'to-lifecycle-environment-id': module_lce.id}
)
ActivationKey.update(
{
'id': activation_key['id'],
'organization-id': module_org.id,
'content-view-id': another_cv['id'],
'lifecycle-environment-id': module_lce.id,
}
)
activation_key = ActivationKey.info({'id': activation_key['id']})
assert activation_key['content-view'] != content_view_promote['content-view-name']
@pytest.mark.tier2
def test_positive_add_docker_repo_ccv(self, module_org, module_lce, content_view_publish):
"""Add Docker-type repository to a non-composite content view
and publish it. Then add this content view to a composite content view
and publish it. Create an activation key and associate it with the
composite Docker content view.
:id: 1d9b82fd-8dab-4fd9-ad35-656d712d56a2
:expectedresults: Docker-based content view can be added to activation
key
:BZ: 1359665
"""
comp_content_view = make_content_view({'composite': True, 'organization-id': module_org.id})
ContentView.update(
{
'component-ids': content_view_publish['id'],
'id': comp_content_view['id'],
}
)
comp_content_view = ContentView.info({'id': comp_content_view['id']})
assert content_view_publish['id'] in [
component['id'] for component in comp_content_view['components']
]
ContentView.publish({'id': comp_content_view['id']})
comp_content_view = ContentView.info({'id': comp_content_view['id']})
comp_cvv = ContentView.version_info({'id': comp_content_view['versions'][0]['id']})
ContentView.version_promote(
{'id': comp_cvv['id'], 'to-lifecycle-environment-id': module_lce.id}
)
activation_key = make_activation_key(
{
'content-view-id': comp_content_view['id'],
'lifecycle-environment-id': module_lce.id,
'organization-id': module_org.id,
}
)
assert activation_key['content-view'] == comp_content_view['name']
@pytest.mark.tier2
def test_positive_remove_docker_repo_ccv(self, module_org, module_lce, content_view_publish):
"""Add Docker-type repository to a non-composite content view
and publish it. Then add this content view to a composite content view
and publish it. Create an activation key and associate it with the
composite Docker content view. Then, remove the composite content view
from the activation key.
:id: b4e63537-d3a8-4afa-8e18-57052b93fb4c
:expectedresults: Docker-based composite content view can be added and
then removed from the activation key.
:BZ: 1359665
"""
comp_content_view = make_content_view({'composite': True, 'organization-id': module_org.id})
ContentView.update(
{
'component-ids': content_view_publish['id'],
'id': comp_content_view['id'],
}
)
comp_content_view = ContentView.info({'id': comp_content_view['id']})
assert content_view_publish['id'] in [
component['id'] for component in comp_content_view['components']
]
ContentView.publish({'id': comp_content_view['id']})
comp_content_view = ContentView.info({'id': comp_content_view['id']})
comp_cvv = ContentView.version_info({'id': comp_content_view['versions'][0]['id']})
ContentView.version_promote(
{'id': comp_cvv['id'], 'to-lifecycle-environment-id': module_lce.id}
)
activation_key = make_activation_key(
{
'content-view-id': comp_content_view['id'],
'lifecycle-environment-id': module_lce.id,
'organization-id': module_org.id,
}
)
assert activation_key['content-view'] == comp_content_view['name']
# Create another content view replace with
another_cv = make_content_view({'composite': False, 'organization-id': module_org.id})
ContentView.publish({'id': another_cv['id']})
another_cv = ContentView.info({'id': another_cv['id']})
ContentView.version_promote(
{'id': another_cv['versions'][0]['id'], 'to-lifecycle-environment-id': module_lce.id}
)
ActivationKey.update(
{
'id': activation_key['id'],
'organization-id': module_org.id,
'content-view-id': another_cv['id'],
'lifecycle-environment-id': module_lce.id,
}
)
activation_key = ActivationKey.info({'id': activation_key['id']})
assert activation_key['content-view'] != comp_content_view['name']
class TestDockerClient:
"""Tests specific to using ``Docker`` as a client to pull Docker images
from a Satellite 6 instance.
:CaseComponent: ContentManagement
:Assignee: ltran
:CaseLevel: System
:CaseImportance: Medium
"""
@pytest.mark.tier3
def test_positive_pull_image(self, module_org, docker_host):
"""A Docker-enabled client can use ``docker pull`` to pull a
Docker image off a Satellite 6 instance.
:id: 023f0538-2aad-4f87-b8a8-6ccced648366
:Steps:
1. Publish and promote content view with Docker content
2. Register Docker-enabled client against Satellite 6.
:expectedresults: Client can pull Docker images from server and run it.
"""
product = make_product_wait({'organization-id': module_org.id})
repo = _repo(product['id'])
Repository.synchronize({'id': repo['id']})
repo = Repository.info({'id': repo['id']})
try:
result = docker_host.execute(
f'docker login -u {settings.server.admin_username}'
f' -p {settings.server.admin_password} {settings.server.hostname}'
)
assert result.status == 0
# publishing takes few seconds sometimes
result, _ = wait_for(
lambda: docker_host.execute(f'docker pull {repo["published-at"]}'),
num_sec=60,
delay=2,
fail_condition=lambda out: out.status != 0,
logger=logger,
)
assert result.status == 0
try:
result = docker_host.execute(f'docker run {repo["published-at"]}')
assert result.status == 0
finally:
# Stop and remove the container
result = docker_host.execute(f'docker ps -a | grep {repo["published-at"]}')
container_id = result.stdout[0].split()[0]
docker_host.execute(f'docker stop {container_id}')
docker_host.execute(f'docker rm {container_id}')
finally:
# Remove docker image
docker_host.execute(f'docker rmi {repo["published-at"]}')
@pytest.mark.skip_if_not_set('docker')
@pytest.mark.tier3
def test_positive_container_admin_end_to_end_search(self, module_org, docker_host):
"""Verify that docker command line can be used against
Satellite server to search for container images stored
on Satellite instance.
:id: cefa74e1-e40d-4f47-853b-1268643cea2f
:steps:
1. Publish and promote content view with Docker content
2. Set 'Unauthenticated Pull' option to false
3. Try to search for docker images on Satellite
4. Use Docker client to login to Satellite docker hub
5. Search for docker images
6. Use Docker client to log out of Satellite docker hub
7. Try to search for docker images (ensure last search result
is caused by change of Satellite option and not login/logout)
8. Set 'Unauthenticated Pull' option to true
9. Search for docker images
:expectedresults: Client can search for docker images stored
on Satellite instance
"""
pattern_prefix = gen_string('alpha', 5)
registry_name_pattern = (
f'{pattern_prefix}-<%= content_view.label %>/<%= repository.docker_upstream_name %>'
)
# Satellite setup: create product and add Docker repository;
# create content view and add Docker repository;
# create lifecycle environment and promote content view to it
lce = make_lifecycle_environment({'organization-id': module_org.id})
product = make_product_wait({'organization-id': module_org.id})
repo = _repo(product['id'], upstream_name=CONTAINER_UPSTREAM_NAME)
Repository.synchronize({'id': repo['id']})
content_view = make_content_view({'composite': False, 'organization-id': module_org.id})
ContentView.add_repository({'id': content_view['id'], 'repository-id': repo['id']})
ContentView.publish({'id': content_view['id']})
content_view = ContentView.info({'id': content_view['id']})
ContentView.version_promote(
{'id': content_view['versions'][0]['id'], 'to-lifecycle-environment-id': lce['id']}
)
LifecycleEnvironment.update(
{
'registry-name-pattern': registry_name_pattern,
'registry-unauthenticated-pull': 'false',
'id': lce['id'],
'organization-id': module_org.id,
}
)
docker_repo_uri = (
f' {settings.server.hostname}/{pattern_prefix}-{content_view["label"]}/'
f'{CONTAINER_UPSTREAM_NAME} '
).lower()
# 3. Try to search for docker images on Satellite
remote_search_command = (
f'docker search {settings.server.hostname}/{CONTAINER_UPSTREAM_NAME}'
)
result = docker_host.execute(remote_search_command)
assert result.status == 0
assert docker_repo_uri not in result.stdout
# 4. Use Docker client to login to Satellite docker hub
result = docker_host.execute(
f'docker login -u {settings.server.admin_username}'
f' -p {settings.server.admin_password} {settings.server.hostname}'
)
assert result.status == 0
# 5. Search for docker images
result = docker_host.execute(remote_search_command)
assert result.status == 0
assert docker_repo_uri in result.stdout
# 6. Use Docker client to log out of Satellite docker hub
result = docker_host.execute(f'docker logout {settings.server.hostname}')
assert result.status == 0
# 7. Try to search for docker images
result = docker_host.execute(remote_search_command)
assert result.status == 0
assert docker_repo_uri not in result.stdout
# 8. Set 'Unauthenticated Pull' option to true
LifecycleEnvironment.update(
{
'registry-unauthenticated-pull': 'true',
'id': lce['id'],
'organization-id': module_org.id,
}
)
# 9. Search for docker images
result = docker_host.execute(remote_search_command)
assert result.status == 0
assert docker_repo_uri in result.stdout
@pytest.mark.skip_if_not_set('docker')
@pytest.mark.tier3
def test_positive_container_admin_end_to_end_pull(self, module_org, docker_host):
"""Verify that docker command line can be used against
Satellite server to pull in container images stored
on Satellite instance.
:id: 2a331f88-406b-4a5c-ae70-302a9994077f
:steps:
1. Publish and promote content view with Docker content
2. Set 'Unauthenticated Pull' option to false
3. Try to pull in docker image from Satellite
4. Use Docker client to login to Satellite container registry
5. Pull in docker image
6. Use Docker client to log out of Satellite container registry
7. Try to pull in docker image (ensure next pull result
is caused by change of Satellite option and not login/logout)
8. Set 'Unauthenticated Pull' option to true
9. Pull in docker image
:expectedresults: Client can pull in docker images stored
on Satellite instance
"""
pattern_prefix = gen_string('alpha', 5)
docker_upstream_name = CONTAINER_UPSTREAM_NAME
registry_name_pattern = (
f'{pattern_prefix}-<%= content_view.label %>/<%= repository.docker_upstream_name %>'
)
# Satellite setup: create product and add Docker repository;
# create content view and add Docker repository;
# create lifecycle environment and promote content view to it
lce = make_lifecycle_environment({'organization-id': module_org.id})
product = make_product_wait({'organization-id': module_org.id})
repo = _repo(product['id'], upstream_name=docker_upstream_name)
Repository.synchronize({'id': repo['id']})
content_view = make_content_view({'composite': False, 'organization-id': module_org.id})
ContentView.add_repository({'id': content_view['id'], 'repository-id': repo['id']})
ContentView.publish({'id': content_view['id']})
content_view = ContentView.info({'id': content_view['id']})
ContentView.version_promote(
{'id': content_view['versions'][0]['id'], 'to-lifecycle-environment-id': lce['id']}
)
LifecycleEnvironment.update(
{
'registry-name-pattern': registry_name_pattern,
'registry-unauthenticated-pull': 'false',
'id': lce['id'],
'organization-id': module_org.id,
}
)
docker_repo_uri = (
f'{settings.server.hostname}/{pattern_prefix}-{content_view["label"]}/'
f'{docker_upstream_name}'
).lower()
# 3. Try to pull in docker image from Satellite
docker_pull_command = f'docker pull {docker_repo_uri}'
result = docker_host.execute(docker_pull_command)
assert result.status == 1
# 4. Use Docker client to login to Satellite docker hub
result = docker_host.execute(
f'docker login -u {settings.server.admin_username}'
f' -p {settings.server.admin_password} {settings.server.hostname}'
)
assert result.status == 0
# 5. Pull in docker image
# publishing takes few seconds sometimes
result, _ = wait_for(
lambda: docker_host.execute(docker_pull_command),
num_sec=60,
delay=2,
fail_condition=lambda out: out.status != 0,
logger=logger,
)
assert result.status == 0
# 6. Use Docker client to log out of Satellite docker hub
result = docker_host.execute(f'docker logout {settings.server.hostname}')
assert result.status == 0
# 7. Try to pull in docker image
result = docker_host.execute(docker_pull_command)
assert result.status == 1
# 8. Set 'Unauthenticated Pull' option to true
LifecycleEnvironment.update(
{
'registry-unauthenticated-pull': 'true',
'id': lce['id'],
'organization-id': module_org.id,
}
)
# 9. Pull in docker image
result = docker_host.execute(docker_pull_command)
assert result.status == 0
@pytest.mark.stubbed
@pytest.mark.skip_if_not_set('docker')
@pytest.mark.tier3
@pytest.mark.upgrade
def test_positive_upload_image(self, module_org):
"""A Docker-enabled client can create a new ``Dockerfile``
pointing to an existing Docker image from a Satellite 6 and modify it.
Then, using ``docker build`` generate a new image which can then be
uploaded back onto the Satellite 6 as a new repository.
:id: 2c47559c-b27f-436e-9b1e-df5c3633b007
:Steps:
1. Create a local docker compute resource
2. Create a container and start it
3. [on docker host] Commit a new image from the container
4. [on docker host] Export the image to tar
5. scp the image to satellite box
6. create a new docker repo
7. upload the image to the new repo
:expectedresults: Client can create a new image based off an existing
Docker image from a Satellite 6 instance, add a new package and
upload the modified image (plus layer) back to the Satellite 6.
"""
try:
"""
These functions were removed, but let's leave them here
to maintain overall test logic - in case required functionality
is eventually implemented
compute_resource = make_compute_resource({
'organization-ids': [module_org.id],
'provider': 'Docker',
'url': f'http://{docker_host.ip_addr}:2375',
})
container = make_container({
'compute-resource-id': compute_resource['id'],
'organization-ids': [module_org.id],
})
Docker.container.start({'id': container['id']})
"""
container = {'uuid': 'stubbed test'}
repo_name = gen_string('alphanumeric').lower()
# Commit a new docker image and verify image was created
image_name = f'{repo_name}/{CONTAINER_UPSTREAM_NAME}'
result = docker_host.execute(
f'docker commit {container["uuid"]} {image_name}:latest && '
f'docker images --all | grep {image_name}'
)
assert result.status == 0
# Save the image to a tar archive
result = docker_host.execute(f'docker save -o {repo_name}.tar {image_name}')
assert result.status == 0
tar_file = f'{repo_name}.tar'
ssh.download_file(tar_file, hostname=docker_host.ip_addr)
ssh.upload_file(
local_file=tar_file,
remote_file=f'/tmp/{tar_file}',
hostname=settings.server.hostname,
)
# Upload tarred repository
product = make_product_wait({'organization-id': module_org.id})
repo = _repo(product['id'])
Repository.upload_content({'id': repo['id'], 'path': f'/tmp/{repo_name}.tar'})
# Verify repository was uploaded successfully
repo = Repository.info({'id': repo['id']})
assert settings.server.hostname == repo['published-at']
repo_name = '-'.join((module_org.label, product['label'], repo['label'])).lower()
assert repo_name in repo['published-at']
finally:
# Remove the archive
ssh.command(f'rm -f /tmp/{repo_name}.tar')
|
jyejare/robottelo
|
tests/foreman/cli/test_docker.py
|
Python
|
gpl-3.0
| 62,994
|
"""
ldr.py
Display analog data from Arduino using Python (matplotlib)
Author: Mahesh Venkitachalam
Website: electronut.in
"""
import argparse
import numpy as np
import math
import matplotlib.pyplot as plt
import matplotlib.animation as animation
_S_THK = 1.1
_D_DIA = 120
WDY_MAX_POSITION = 30000
WDY_STRAP_DEAD_OFFSET = 0
WDY_DRUM_OFFSET_TURNS = 0
_PI = 3.1415
def spiral_length_to_turns(length):
Nturns = (_S_THK - _D_DIA + math.sqrt(
pow(_D_DIA - _S_THK, 2) + ((4 * _S_THK * (
(WDY_MAX_POSITION - length + WDY_STRAP_DEAD_OFFSET))) / _PI))) / \
(2 * _S_THK)
return Nturns
def spiral_length_to_diameter(length):
Ddiameter = 2 * (spiral_length_to_turns(length) + WDY_DRUM_OFFSET_TURNS) \
* _S_THK + _D_DIA
return Ddiameter
# main() function
def main():
# create parser
parser = argparse.ArgumentParser(description="Graph from serial data")
parser.add_argument('--max', '-M', type=int, default=30000)
parser.add_argument('--min', '-m', type=int, default=0)
parser.add_argument('--step', '-s', type=int, default=1)
# parse args
args = parser.parse_args()
# plot parameters
data1 = []
data2 = []
for i in range(args.min, args.max, args.step):
data1.append(spiral_length_to_turns(i))
data2.append(spiral_length_to_diameter(i))
print('Plotting data...')
# set up animation
ax = plt.axes(xlim=(0, args.max), ylim=(0, data2[0] + 10))
ax.plot(data1, label='Length to turns')
ax.plot(data2, label='Length to diameter')
# show plot
ax.legend()
plt.show()
print('Exiting.')
# call main
if __name__ == '__main__':
main()
|
exmachina-dev/WDY-firmware
|
tools/graph_spiral.py
|
Python
|
gpl-3.0
| 1,683
|
####################################################################################################
#
# Patro - A Python library to make patterns for fashion design
# Copyright (C) 2017 Fabrice Salvaire
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
####################################################################################################
# cf. PEP 396 -- Module Version Numbers https://www.python.org/dev/peps/pep-0396/
__version__ = '0.3.0'
|
FabriceSalvaire/PyValentina
|
Patro/__init__.py
|
Python
|
gpl-3.0
| 1,070
|
#
# Copyright (C) 2013,2014,2015,2016 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import espressomd
from espressomd import thermostat
from espressomd import integrate
from espressomd import interactions
import numpy
# System parameters
#############################################################
system = espressomd.System()
#if no seed is provided espresso generates a seed
system.time_step = 0.01
system.skin = 0.4
system.box_l = [100, 100, 100]
system.thermostat.set_langevin(1.0, 1.0)
system.cell_system.set_n_square(use_verlet_lists=False)
system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=1, sigma=1,
cutoff=2**(1. / 6), shift="auto")
fene = interactions.FeneBond(k=10, d_r_max=2)
system.bonded_inter.add(fene)
poly = system.polymer
poly(N_P = 1, bond_length = 1.0, MPC=50, bond_id=0)
#############################################################
# Integration #
#############################################################
for i in range(20):
integrate.integrate(1000)
energies = system.analysis.energy()
print energies
|
tbereau/espresso
|
samples/python/minimal-polymer.py
|
Python
|
gpl-3.0
| 1,764
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#
#
# Copyright (C) Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Authors:
# Alvaro del Castillo San Felix <acs@bitergia.com>
#
import json
import logging
from dateutil import parser
from grimoire.elk.enrich import Enrich, metadata
from .utils import get_time_diff_days
class KitsuneEnrich(Enrich):
def get_field_author(self):
return "creator"
def get_elastic_mappings(self):
mapping = """
{
"properties": {
"content_analyzed": {
"type": "string",
"index":"analyzed"
},
"tags_analyzed": {
"type": "string",
"index":"analyzed"
}
}
} """
return {"items":mapping}
def get_sh_identity(self, item, identity_field=None):
identity = {}
user = item
if 'data' in item and type(item) == dict:
user = item['data'][identity_field]
elif identity_field in item:
# for answers
user = item[identity_field]
identity['username'] = user['username']
identity['email'] = None
identity['name'] = user['username']
if user['display_name']:
identity['name'] = user['display_name']
return identity
def get_identities(self, item):
""" Return the identities from an item """
identities = []
item = item['data']
for identity in ['creator']:
# Todo: questions has also involved and solved_by
if identity in item and item[identity]:
user = self.get_sh_identity(item[identity])
identities.append(user)
if 'answers_data' in item:
for answer in item['answers_data']:
user = self.get_sh_identity(answer[identity])
identities.append(user)
return identities
@metadata
def get_rich_item(self, item, kind='question'):
eitem = {}
# Fields common in questions and answers
common_fields = ["product", "topic", "locale", "is_spam", "title"]
if kind == 'question':
eitem['type'] = kind
# metadata fields to copy
copy_fields = ["metadata__updated_on","metadata__timestamp","ocean-unique-id","origin","offset"]
for f in copy_fields:
if f in item:
eitem[f] = item[f]
else:
eitem[f] = None
# The real data
question = item['data']
# data fields to copy
copy_fields = ["content", "num_answers", "solution"]
copy_fields += common_fields
for f in copy_fields:
if f in question:
eitem[f] = question[f]
else:
eitem[f] = None
eitem["content_analyzed"] = question['content']
# Fields which names are translated
map_fields = {
"id": "question_id",
"num_votes": "score"
}
for fn in map_fields:
eitem[map_fields[fn]] = question[fn]
tags = ''
for tag in question['tags']:
tags += tag['slug'] + ","
tags = tags[0:-1] # remove last ,
eitem["tags"] = tags
eitem["tags_analyzed"] = tags
# Enrich dates
eitem["creation_date"] = parser.parse(question["created"]).isoformat()
eitem["last_activity_date"] = parser.parse(question["updated"]).isoformat()
eitem['lifetime_days'] = \
get_time_diff_days(question['created'], question['updated'])
eitem.update(self.get_grimoire_fields(question['created'], "question"))
eitem['author'] = question['creator']['username']
if question['creator']['display_name']:
eitem['author'] = question['creator']['display_name']
if self.sortinghat:
eitem.update(self.get_item_sh(item))
elif kind == 'answer':
answer = item
eitem['type'] = kind
# data fields to copy
copy_fields = ["content", "solution"]
copy_fields += common_fields
for f in copy_fields:
if f in answer:
eitem[f] = answer[f]
else:
eitem[f] = None
eitem["content_analyzed"] = answer['content']
# Fields which names are translated
map_fields = {
"id": "answer_id",
"question": "question_id",
"num_helpful_votes": "score",
"num_unhelpful_votes":"unhelpful_answer"
}
for fn in map_fields:
eitem[map_fields[fn]] = answer[fn]
eitem["helpful_answer"] = answer['num_helpful_votes']
# Enrich dates
eitem["creation_date"] = parser.parse(answer["created"]).isoformat()
eitem["last_activity_date"] = parser.parse(answer["updated"]).isoformat()
eitem['lifetime_days'] = \
get_time_diff_days(answer['created'], answer['updated'])
eitem.update(self.get_grimoire_fields(answer['created'], "answer"))
eitem['author'] = answer['creator']['username']
if answer['creator']['display_name']:
eitem['author'] = answer['creator']['display_name']
if self.sortinghat:
# date field must be the same than in question to share code
answer[self.get_field_date()] = answer['updated']
eitem[self.get_field_date()] = answer[self.get_field_date()]
eitem.update(self.get_item_sh(answer))
return eitem
def enrich_items(self, items):
max_items = self.elastic.max_items_bulk
current = 0
bulk_json = ""
total = 0
url = self.elastic.index_url+'/items/_bulk'
logging.debug("Adding items to %s (in %i packs)", url, max_items)
for item in items:
if current >= max_items:
self.requests.put(url, data=bulk_json)
bulk_json = ""
current = 0
rich_item = self.get_rich_item(item)
data_json = json.dumps(rich_item)
bulk_json += '{"index" : {"_id" : "%s" } }\n' % \
(item[self.get_field_unique_id()])
bulk_json += data_json +"\n" # Bulk document
current += 1
total += 1
# Time to enrich also de answers
if 'answers_data' in item['data']:
for answer in item['data']['answers_data']:
# Add question title in answers
answer['title'] = item['data']['title']
answer['solution'] = 0
if answer['id'] == item['data']['solution']:
answer['solution'] = 1
rich_answer = self.get_rich_item(answer, kind='answer')
data_json = json.dumps(rich_answer)
bulk_json += '{"index" : {"_id" : "%s_%i" } }\n' % \
(item[self.get_field_unique_id()],
rich_answer['answer_id'])
bulk_json += data_json +"\n" # Bulk document
current += 1
total += 1
self.requests.put(url, data = bulk_json)
return total
|
sanacl/GrimoireELK
|
grimoire/elk/kitsune.py
|
Python
|
gpl-3.0
| 8,313
|
###########################################################################
#
# Copyright (c) 2010 Davide Pesavento <davidepesa@gmail.com>
#
# This file is part of FORSE.
#
# FORSE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FORSE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with FORSE. If not, see <http://www.gnu.org/licenses/>.
#
###########################################################################
from PyQt4.Qt import Qt
from PyQt4.QtCore import QAbstractTableModel, QVariant
from PyQt4.QtGui import QIcon
from OTPApplication import OTPApplication
class WeatherModel(QAbstractTableModel):
def __init__(self):
QAbstractTableModel.__init__(self)
self.__changes = {}
self.__weather = {}
self.__sun = QIcon(":/icons/sun.png")
self.__lightRain = QIcon(":/icons/light-rain.png")
self.__rain = QIcon(":/icons/rain.png")
self.__heavyRain = QIcon(":/icons/heavy-rain.png")
self.__icons = [self.__sun]
self.__icons[1:3] = [self.__lightRain] * 3
self.__icons[4:7] = [self.__rain] * 4
self.__icons[8:10] = [self.__heavyRain] * 3
handlers = {('init', 'weather'): self._setWeather,
('update', 'weather'): self._setWeather}
OTPApplication.registerMsgHandlers(handlers)
def changes(self):
return self.__changes.items()
def discardChanges(self):
self.__changes = {}
self.reset()
def columnCount(self, _parent):
return 3
def rowCount(self, _parent):
return len(self.__weather)
def flags(self, index):
flags = QAbstractTableModel.flags(self, index)
if index.column() == 2:
flags |= Qt.ItemIsEditable
return flags
def data(self, index, role):
if role == Qt.DisplayRole:
try:
if index.column() == 0:
return QVariant(index.row())
elif index.column() == 1:
return QVariant(self.__weather[index.row()])
elif index.column() == 2:
return QVariant(self.__changes[index.row()])
except KeyError:
pass
elif role == Qt.DecorationRole and index.column() == 1:
return QVariant(self.__icons[self.__weather[index.row()]])
elif role == Qt.EditRole and index.column() == 2:
try:
return QVariant(self.__changes[index.row()])
except KeyError:
return QVariant(self.__weather[index.row()])
elif role == Qt.TextAlignmentRole:
return QVariant(Qt.AlignCenter)
return QVariant()
def headerData(self, section, orientation, role):
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
if section == 0:
return QVariant("Sector")
elif section == 1:
return QVariant("Current")
elif section == 2:
return QVariant("Local changes")
return QVariant()
def setData(self, index, value, role):
changed = False
if index.column() == 2 and role == Qt.EditRole:
r = index.row()
if value != self.__weather[r]:
self.__changes[r] = value
changed = True
elif r in self.__changes:
del self.__changes[r]
changed = True
if changed:
self.dataChanged.emit(index, index)
return changed
def _setWeather(self, weather):
for sectId, rain in weather:
self.__weather[sectId] = rain
try:
if self.__changes[sectId] == rain:
del self.__changes[sectId]
except KeyError:
pass
self.reset()
|
Pesa/forse
|
src/forse/weather_station/WeatherModel.py
|
Python
|
gpl-3.0
| 4,250
|
# -*- coding: utf-8 -*-
#
# Grid cell modeling and data analysis documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 31 21:47:10 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
print('Inside RTD environment!')
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('../simtools'))
sys.path.insert(0, os.path.abspath('../noisefigs'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.pngmath',
'sphinx.ext.mathjax',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.todo',
'sphinx.ext.inheritance_diagram',
'numpydoc'
]
# Include todos?
todo_include_todos = True
#
# Fix an issue with nonexistent documents:
# https://github.com/phn/pytpm/issues/3#issuecomment-12133978
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ei-attractor'
copyright = u'2010-2015, Lukas Solanka'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'data_descriptions/*']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if on_rtd:
html_theme = 'default'
html_context = {
'css_files': [
'https://media.readthedocs.org/css/sphinx_rtd_theme.css',
'https://media.readthedocs.org/css/readthedocs-doc-embed.css',
'_static/theme_overrides.css',
],
}
else:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
def setup(app):
app.add_stylesheet('theme_overrides.css')
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Gridcellmodelinganddataanalysisdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Gridcellmodelinganddataanalysis.tex', u'Grid cell modeling and data analysis Documentation',
u'Lukas Solanka', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'gridcellmodelinganddataanalysis', u'Grid cell modeling and data analysis Documentation',
[u'Lukas Solanka'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Gridcellmodelinganddataanalysis', u'Grid cell modeling and data analysis Documentation',
u'Lukas Solanka', 'Gridcellmodelinganddataanalysis', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# inheritance
inheritance_graph_attrs = dict(rankdir="TB", fontsize=14, ratio='compress')
graphviz_output_format = 'svg'
##############################################################################
class Mock(object):
__all__ = []
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
mockType = type(name, (), {})
mockType.__module__ = __name__
return mockType
else:
return Mock()
class NestMock(Mock):
def __init__(self, *args, **kwargs):
pass
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
mockType = type(name, (), {})
mockType.__module__ = __name__
return mockType
else:
return NestMock()
def Install(self, *args, **kwargs):
pass
MOCK_MODULES = [
'numpy', 'numpy.ma', 'numpy.ma.core', 'numpy.fft', 'numpy.fft.fftpack',
'numpy.random', 'numpy.core', 'numpy.core.umath',
'scipy', 'scipy.integrate', 'scipy.signal', 'scipy.ndimage', 'scipy.stats',
'scipy.ndimage.interpolation', 'scipy.optimize', 'scipy.interpolate',
'scipy.io',
'matplotlib', 'matplotlib.axes', 'matplotlib.pyplot', 'matplotlib.patches',
'matplotlib.ticker', 'matplotlib.colors', 'matplotlib.transforms',
'matplotlib.colorbar', 'matplotlib.gridspec', 'matplotlib.backends',
'matplotlib.backends.backend_pdf',
'grid_cell_model.analysis.Wavelets',
'gridcells', 'gridcells.analysis', 'gridcells.analysis.signal',
'pyentropy', 'minepy',
'configobj',
]
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
sys.modules['nest'] = NestMock()
|
MattNolanLab/ei-attractor
|
doc/conf.py
|
Python
|
gpl-3.0
| 11,016
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from builtins import range
import warnings
import unittest
import sys
import itertools as it
import copy
import time
import math
import logging
import os.path
import os
import shutil
import contextlib
try:
from unittest.mock import patch
except ImportError:
from mock import patch
import numpy as np
import numpy.testing as nptest
import forgi.threedee.model.coarse_grain as ftmc
import forgi.graph.bulge_graph as fgb
import forgi.threedee.model.similarity as ftme
import forgi.threedee.utilities.graph_pdb as ftug
import forgi.threedee.utilities.vector as ftuv
import forgi.utilities.debug as fud
from forgi.utilities.stuff import make_temp_directory
from ...graph import bulge_graph_test as tfgb
log = logging.getLogger(__name__)
@contextlib.contextmanager
def ignore_warnings():
with warnings.catch_warnings():
warnings.simplefilter("ignore")
yield None
def cg_from_sg(cg, sg):
'''
Create a coarse-grain structure from a subgraph.
@param cg: The original structure
@param sg: The list of elements that are in the subgraph
'''
new_cg = ftmc.cg_from_sg(cg, sg)
return new_cg
for d in sg:
new_cg.defines[d] = cg.defines[d]
if d in cg.coords.keys():
new_cg.coords[d] = cg.coords[d]
if d in cg.twists.keys():
new_cg.twists[d] = cg.twists[d]
if d in cg.longrange.keys():
new_cg.longrange[d] = cg.longrange[d]
for x in cg.edges[d]:
if x in new_cg.defines.keys():
new_cg.edges[d].add(x)
new_cg.edges[x].add(d)
return new_cg
def mock_run_mc_annotate(original_function):
"""
Caching of MC-Annotate output for speedup
"""
def mocked_run_mc_annotate(filename, subprocess_kwargs):
new_fn = os.path.split(filename)[1]
new_fn += ".mcAnnotate.out"
try:
with open(os.path.join("test", "forgi", "threedee", "data", new_fn)) as f:
lines = f.readlines()
log.error("Using cached MC-Annotate output")
except IOError: # on py3 this is an alias of oserror
lines = original_function(filename, subprocess_kwargs)
with open(os.path.join("test", "forgi", "threedee", "data", new_fn), "w") as f:
print("\n".join(lines), file=f)
log.info("Returning lines: %s", lines)
return lines
return mocked_run_mc_annotate
def mocked_read_config():
"""
Require MC-Annotate for consistency. If not installed, tests should be skipped.
"""
if not ftmc.which("MC-Annotate"):
raise unittest.SkipTest("This Test requires MC-Annotate for consistency.")
else:
return {"PDB_ANNOTATION_TOOL": "MC-Annotate"}
@patch('forgi.config.read_config', mocked_read_config)
@patch('forgi.threedee.model.coarse_grain._run_mc_annotate',
mock_run_mc_annotate(ftmc._run_mc_annotate))
class CoarseGrainIoTest(tfgb.GraphVerification):
def check_cg_integrity(self, cg):
self.assertGreater(len(list(cg.stem_iterator())), 0)
for s in cg.stem_iterator():
edges = list(cg.edges[s])
if len(edges) < 2:
continue
multiloops = False
for e in edges:
if e[0] != 'i':
multiloops = True
if multiloops:
continue
self.assertFalse(np.allclose(cg.coords[edges[0]][0],
cg.coords[edges[1]][0]))
self.assertFalse(np.allclose(cg.coords[edges[0]][0],
cg.coords[edges[1]][1]))
self.assertFalse(np.allclose(cg.coords[edges[0]][1],
cg.coords[edges[1]][0]))
self.assertFalse(np.allclose(cg.coords[edges[0]][1],
cg.coords[edges[1]][1]))
def test_dssr_backslash_in_filename(self):
"""
DSSR puts the input filename in the JSON, which makes the JSON invalid,
if a backslash is in it. We patch the DSSR JSON before parsing.
"""
with make_temp_directory() as d:
# On Windows, bla is a directory, and the backslash is
# part of the path,
# on decent operating systems,
# the backslash is part of the filename.
filename=os.path.join(d, "bla\\something.pdb")
dir, rest = os.path.split(filename)
# On Windows, make the directory bla, on Linux do nothing
try:
os.makedirs(dir)
except OSError:
# Directory exists
pass
shutil.copy('test/forgi/threedee/data/1y26.pdb', filename)
try:
# Make sure we do not raise any error.
cg, = ftmc.CoarseGrainRNA.from_pdb(filename,
annotation_tool="DSSR")
except ftmc.AnnotationToolNotInstalled:
self.skipTest("This Test requires DSSR")
self.check_graph_integrity(cg)
self.assertGreater(len(cg.defines), 2)
def test_from_mmcif(self):
import Bio.PDB as bpdb
cg, = ftmc.CoarseGrainRNA.from_pdb('test/forgi/threedee/data/1Y26.cif')
cg2, = ftmc.CoarseGrainRNA.from_pdb(
'test/forgi/threedee/data/1y26.pdb')
self.assertEqual(cg.defines, cg2.defines)
self.assertGreater(len(cg.defines), 3)
for d in cg.defines:
nptest.assert_almost_equal(cg.coords[d], cg2.coords[d])
def test_from_mmcif_missing_residues(self):
import Bio.PDB as bpdb
cg, = ftmc.CoarseGrainRNA.from_pdb(
'test/forgi/threedee/data/2x1f.cif', load_chains="B")
cg2, = ftmc.CoarseGrainRNA.from_pdb(
'test/forgi/threedee/data/2X1F.pdb', load_chains="B")
log.error(cg.seq._missing_nts)
self.assertEqual(len(cg.seq._missing_nts), 3)
self.assertEqual(len(cg2.seq._missing_nts), 3)
self.assertEqual(cg.seq, cg2.seq)
def test_from_pdb(self):
import time
now = time.time()
cg, = ftmc.CoarseGrainRNA.from_pdb(
'test/forgi/threedee/data/4GV9.pdb', load_chains='E')
log.error(time.time() - now)
now = time.time()
cg, = ftmc.CoarseGrainRNA.from_pdb(
'test/forgi/threedee/data/RS_363_S_5.pdb')
log.error(time.time() - now)
now = time.time()
self.check_cg_integrity(cg)
log.error(time.time() - now)
now = time.time()
cg, = ftmc.CoarseGrainRNA.from_pdb(
'test/forgi/threedee/data/RS_118_S_0.pdb')
log.error(time.time() - now)
now = time.time()
self.check_cg_integrity(cg)
log.error(time.time() - now)
now = time.time()
self.assertTrue(len(cg.defines) > 1)
cg, = ftmc.CoarseGrainRNA.from_pdb(
'test/forgi/threedee/data/ideal_1_4_5_8.pdb')
self.check_cg_integrity(cg)
log.error(time.time() - now)
now = time.time()
cg, = ftmc.CoarseGrainRNA.from_pdb(
'test/forgi/threedee/data/ideal_1_4_5_8.pdb')
log.error(time.time() - now)
now = time.time()
cg, = ftmc.CoarseGrainRNA.from_pdb(
'test/forgi/threedee/data/1y26_missing.pdb')
self.check_cg_integrity(cg)
log.error(time.time() - now)
now = time.time()
cg, = ftmc.CoarseGrainRNA.from_pdb('test/forgi/threedee/data/1y26_two_chains.pdb',
load_chains='Y')
self.assertEqual(len(cg.defines), 1)
self.assertIn("f0", cg.defines)
self.assertEqual(cg.seq, "U")
log.error(time.time() - now)
now = time.time()
# commented out for 3 ec speedup
# cg, = ftmc.CoarseGrainRNA.from_pdb('test/forgi/threedee/data/1X8W.pdb',
# load_chains='A')
# self.check_cg_integrity(cg)
#log.error (time.time()-now); now=time.time()
cg, = ftmc.CoarseGrainRNA.from_pdb(
'test/forgi/threedee/data/1FJG_reduced.pdb')
self.check_cg_integrity(cg)
log.error(time.time() - now)
now = time.time()
cg, = ftmc.CoarseGrainRNA.from_pdb('test/forgi/threedee/data/1y26.pdb')
log.error(time.time() - now)
now = time.time()
def test_file_with_numeric_chain_id(self):
# Numeric chain ids
cg, = ftmc.CoarseGrainRNA.from_pdb(
'test/forgi/threedee/data/3J7A_part.pdb', load_chains=["7"])
self.check_cg_integrity(cg)
self.assertEqual(cg.seq._seqids[0].chain, '7')
def test_from_pdb_cofold(self):
# 1FUF triggers the if fromA.chain != fromB.chain clause in _are_adjacent_basepairs
cg, = ftmc.CoarseGrainRNA.from_pdb('test/forgi/threedee/data/1FUF.pdb',
dissolve_length_one_stems=True)
self.check_cg_integrity(cg)
def verify_multiple_chains(self, cg, single_chain_cgs):
log.warning("Backbone in %s breaks after %s",
cg.name, cg.backbone_breaks_after)
self.assertEqual(len(cg.backbone_breaks_after),
len(single_chain_cgs) - 1)
self.assertEqual(cg.seq_length, sum(
x.seq_length for x in single_chain_cgs))
# There might be stems spanning multiple chains.
self.assertGreaterEqual(len([s for s in cg.defines if s[0] == "s"]), len(
[s for c in single_chain_cgs for s in c.defines if s[0] == "s"]))
self.assertEqual(cg.seq, "&".join(str(x.seq)
for x in single_chain_cgs))
def test_from_pdb_f_in_second_chain(self):
cg, = ftmc.CoarseGrainRNA.from_pdb(
'test/forgi/threedee/data/4GV9.pdb', load_chains=None)
self.assertEqual(set(cg.defines.keys()), set(["t0", "s0", "f0"]))
def test_from_pdb_multiple(self):
cgE, = ftmc.CoarseGrainRNA.from_pdb(
'test/forgi/threedee/data/4GV9.pdb', load_chains='E')
cgF, = ftmc.CoarseGrainRNA.from_pdb(
'test/forgi/threedee/data/4GV9.pdb', load_chains='F')
cg, = ftmc.CoarseGrainRNA.from_pdb(
'test/forgi/threedee/data/4GV9.pdb', load_chains=None)
self.assertEqual(set(cg.chains.keys()), set(["E", "F"]))
self.assertEqual(len(cg.backbone_breaks_after), 1)
bp = cg.backbone_breaks_after[0]
self.assertEqual(bp, 3)
self.assertEqual(cg.seq[:bp], cgE.seq)
self.assertEqual(cg.seq[1:bp], cgE.seq)
self.assertEqual(cg.seq[bp + 1:], cgF.seq)
self.verify_multiple_chains(cg, [cgE, cgF])
cgA, = ftmc.CoarseGrainRNA.from_pdb(
'test/forgi/threedee/data/3CQS.pdb', load_chains='A')
cgB, = ftmc.CoarseGrainRNA.from_pdb(
'test/forgi/threedee/data/3CQS.pdb', load_chains='B')
cgC, = ftmc.CoarseGrainRNA.from_pdb(
'test/forgi/threedee/data/3CQS.pdb', load_chains='C')
cg, = ftmc.CoarseGrainRNA.from_pdb(
'test/forgi/threedee/data/3CQS.pdb', load_chains=None)
log.warning("cg now has %s cutpoints: %s", len(
cg.seq._breaks_after), cg.backbone_breaks_after)
self.verify_multiple_chains(cg, [cgA, cgB, cgC])
def test_multiple_chain_to_cg(self):
cg, = ftmc.CoarseGrainRNA.from_pdb(
'test/forgi/threedee/data/4GV9.pdb', load_chains=None)
log.debug("======= FIRST IS LOADED =========")
cg_str = cg.to_cg_string()
log.debug("\n" + cg_str)
print(cg_str)
cg2 = ftmc.CoarseGrainRNA.from_bg_string(cg_str)
self.assertEqual(cg.defines, cg2.defines)
self.assertLess(ftme.cg_rmsd(cg, cg2), 10**-6)
self.assertEqual(cg.backbone_breaks_after, cg2.backbone_breaks_after)
cg, = ftmc.CoarseGrainRNA.from_pdb(
'test/forgi/threedee/data/3CQS.pdb', load_chains=None)
cg.log(logging.WARNING)
cg_str = cg.to_cg_string()
cg2 = ftmc.CoarseGrainRNA.from_bg_string(cg_str)
self.assertEqual(cg.defines, cg2.defines)
# This only looks at stems
self.assertLess(ftme.cg_rmsd(cg, cg2), 10**-6)
self.assertEqual(cg.backbone_breaks_after, cg2.backbone_breaks_after)
def test_connected_cgs_from_pdb(self):
cgs = ftmc.CoarseGrainRNA.from_pdb("test/forgi/threedee/data/1DUQ.pdb")
self.assertEqual(len(cgs), 4)
# This PDB file contains 4 similar RNA dimers
self.assertEqual(cgs[0].name, "1DUQ_A-B")
self.assertEqual(cgs[1].name, "1DUQ_C-D")
self.assertEqual(cgs[2].name, "1DUQ_E-F")
self.assertEqual(cgs[3].name, "1DUQ_G-H")
self.assertEqual(cgs[0].defines, cgs[2].defines)
self.assertEqual(cgs[1].defines, cgs[3].defines)
def test_multiple_models_in_file(self):
with self.assertWarns(UserWarning) if hasattr(self, 'assertWarns') else ignore_warnings():
cgs = ftmc.CoarseGrainRNA.from_pdb('test/forgi/threedee/data/1byj.pdb')
self.assertEqual(len(cgs), 1) # Only look at first model!
def test_annotating_with_dssr(self):
pass
class CoarseGrainTest(tfgb.GraphVerification):
'''
Simple tests for the BulgeGraph data structure.
For now the main objective is to make sure that a graph is created
and nothing crashes in the process. In the future, test cases for
bugs should be added here.
'''
def setUp(self):
self.longMessage = True
def check_cg_integrity(self, cg):
for s in cg.stem_iterator():
edges = list(cg.edges[s])
if len(edges) < 2:
continue
multiloops = False
for e in edges:
if e[0] != 'i':
multiloops = True
if multiloops:
continue
self.assertFalse(np.allclose(cg.coords[edges[0]][0],
cg.coords[edges[1]][0]))
self.assertFalse(np.allclose(cg.coords[edges[0]][0],
cg.coords[edges[1]][1]))
self.assertFalse(np.allclose(cg.coords[edges[0]][1],
cg.coords[edges[1]][0]))
self.assertFalse(np.allclose(cg.coords[edges[0]][1],
cg.coords[edges[1]][1]))
def compare_bg_to_cg(self, bg, cg):
for d in bg.defines.keys():
self.assertTrue(d in cg.defines.keys())
self.assertTrue(bg.defines[d] == cg.defines[d])
for e in bg.edges.keys():
self.assertTrue(e in cg.edges.keys())
self.assertTrue(bg.edges[e] == cg.edges[e])
def test_get_node_from_residue_num(self):
cg, = ftmc.CoarseGrainRNA.from_pdb('test/forgi/threedee/data/1X8W.pdb',
load_chains='A', dissolve_length_one_stems=True)
self.check_cg_integrity(cg)
elem_name = cg.get_node_from_residue_num(1)
cg.log()
self.assertEqual(elem_name, "f0")
def test_from_cg(self):
cg = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/1y26.cg')
self.check_graph_integrity(cg)
self.check_cg_integrity(cg)
# self.assertEqual(len(cg.coords), 8)
for key in cg.defines.keys():
self.assertTrue(key in cg.coords)
def test_from_and_to_cgstring(self):
cg1 = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/1y26.cg')
cg1.project_from = np.array([1, 2, 3.5])
stri = cg1.to_cg_string()
cg2 = ftmc.CoarseGrainRNA.from_bg_string(stri)
for key in set(cg1.defines):
self.assertTrue(key in cg2.defines)
self.assertTrue(key in cg2.coords)
nptest.assert_allclose(cg1.defines[key], cg2.defines[key])
nptest.assert_allclose(cg1.coords[key][0], cg2.coords[key][0])
nptest.assert_allclose(cg1.coords[key][1], cg2.coords[key][1])
for key in set(cg2.defines):
self.assertTrue(key in cg1.defines)
self.assertTrue(key in cg1.coords)
nptest.assert_allclose(cg1.defines[key], cg2.defines[key])
nptest.assert_allclose(cg1.coords[key][0], cg2.coords[key][0])
nptest.assert_allclose(cg1.coords[key][1], cg2.coords[key][1])
nptest.assert_allclose(cg1.project_from, cg2.project_from)
def test_to_and_from_cgstring_vres(self):
cg, = ftmc.CoarseGrainRNA.from_pdb('test/forgi/threedee/data/2mis.pdb')
cg.add_all_virtual_residues()
cgstri = cg.to_cg_string()
self.assertIn("vres", cgstri)
cg2 = ftmc.CoarseGrainRNA.from_bg_string(cgstri)
self.assertEqual(
len(cg2.vposs["h0"]), cg2.defines["h0"][1] - cg2.defines["h0"][0] + 1)
self.assertLess(ftuv.vec_distance(
cg.vposs["h0"][0], cg2.vposs["h0"][0]), 10**-8)
self.assertLess(ftuv.vec_distance(
cg.vposs["i0"][2], cg2.vposs["i0"][2]), 10**-8)
def test_get_bulge_angle_stats_core(self):
cg = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/1y26.cg')
self.check_graph_integrity(cg)
for d in cg.mloop_iterator():
cg.get_bulge_angle_stats(d)
def test_get_bulge_angle_stats_for_start(self):
cg = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/1y26.cg')
s1, s2 = cg.get_bulge_angle_stats("start")
def test_read_longrange_interactions(self):
cg = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/1y26.cg')
self.check_graph_integrity(cg)
self.assertGreater(len(cg.longrange), 0)
def test_radius_of_gyration(self):
cg = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/1y26.cg')
self.check_graph_integrity(cg)
rog = cg.radius_of_gyration()
self.assertGreater(rog, 0.)
maxDist = max(ftuv.vec_distance(p0, p1) for p0, p1 in it.combinations(cg.coords._coordinates, 2))
estimated_radius_circum_cricle = maxDist / 2
# NOTE: The ROG is 0.77 times the radius of the circumcircle, for m->inf many points
# in a 3D unit sphere with the nth point placed at radius (n/m)**1/3
self.assertLess(rog, estimated_radius_circum_cricle * 0.77)
def test_radius_of_gyration_different_methods(self):
cg = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/1y26.cg')
rog_fast = cg.radius_of_gyration(method="fast")
rog_vres = cg.radius_of_gyration(method="vres")
print(rog_fast, rog_vres, rog_fast - rog_vres, file=sys.stderr)
self.assertGreater(abs(rog_fast - rog_vres), 0, msg="Different methods for ROG calculation "
"producting the exactly same result? Something seems to be wrong.")
self.assertLess(abs(rog_fast - rog_vres), 3, msg="Different methods for ROG calculation "
"should produce roughly the same result.")
def test_radius_of_gyration_no_stems(self):
cg, = ftmc.CoarseGrainRNA.from_fasta_text("AUCG\n....")
cg.coords["f0"] = [0, 0, 0.], [12., 1, 1]
self.assertTrue(math.isnan(cg.radius_of_gyration()))
def test_get_sides(self):
cg = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/1gid.cg')
self.check_graph_integrity(cg)
self.check_cg_integrity(cg)
log.info(cg.to_dotbracket_string())
log.info(cg.to_element_string(True))
# The file 1gid.cg still starts with f1, not f0
(s1b, s1e) = cg.get_sides('s0', 'f1')
(s1b, s1e) = cg.get_sides('s8', 't1')
'''
def test_cg_from_sg(self):
bg = ftmc.CoarseGrainRNA(
dotbracket_str='.(((((..(((.(((((((.((.((((..((((((....))))))..)))).)).))........(((((.....((((...((((....))))...))))...))))).))))).)))...)))))')
self.check_graph_integrity(bg)
# bg = cgb.BulgeGraph(dotbracket_str='.(((((........)))))..((((((((.(((.((...........((((((..(((((.((((((((..(((..)))...((((....)))).....))))))))..)))))................((((((...........))))))..((...(((((((...((((((..)))))).....((......))....)))))))...(((((((((.........))))))))).(((....))).))..........(((((.(((((.......))))))))))..........))))..))............(((.((((((((...((.......))...))))))..))))).........((((((((((((..(((((((((......))))))..))).((((.......)))).....)))))..))..))).))....((...............))....))..)))))))))))...')
for j in range(40):
sg = bg.random_subgraph()
new_cg = cg_from_sg(bg, sg)
for i in it.chain(new_cg.iloop_iterator(), new_cg.mloop_iterator()):
c = new_cg.connections(i)
if len(c) != 2:
self.assertEqual(len(c), 2)
'''
def test_get_stem_stats(self):
cg, = ftmc.CoarseGrainRNA.from_pdb('test/forgi/threedee/data/2mis.pdb')
cg.get_stem_stats("s0")
def test_get_angle_stats(self):
cg, = ftmc.CoarseGrainRNA.from_pdb('test/forgi/threedee/data/2mis.pdb')
for d in cg.defines:
if d[0] in "mi":
cg.get_bulge_angle_stats(d)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
cg, = ftmc.CoarseGrainRNA.from_pdb('test/forgi/threedee/data/1byj.pdb')
for d in cg.defines:
if d[0] in "mi":
cg.get_bulge_angle_stats(d)
cg, = ftmc.CoarseGrainRNA.from_pdb('test/forgi/threedee/data/2QBZ.pdb')
for d in cg.defines:
if d[0] in "mi":
cg.get_bulge_angle_stats(d)
def test_get_loop_stat(self):
cg, = ftmc.CoarseGrainRNA.from_pdb('test/forgi/threedee/data/2mis.pdb')
cg.get_loop_stat("h0")
cg = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/4GXY_A.cg') # Contains a loop with r=0
self.check_graph_integrity(cg)
self.check_cg_integrity(cg)
cg.get_loop_stat('h3')
def test_length_one_stems(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
cg, = ftmc.CoarseGrainRNA.from_pdb('test/forgi/threedee/data/1byj.pdb',
remove_pseudoknots=False)
self.check_graph_integrity(cg)
self.check_cg_integrity(cg)
cg, = ftmc.CoarseGrainRNA.from_pdb('test/forgi/threedee/data/2QBZ.pdb',
remove_pseudoknots=False)
self.check_graph_integrity(cg)
self.check_cg_integrity(cg)
def test_pseudoknot(self):
#cg, = ftmc.CoarseGrainRNA.from_pdb('test/forgi/threedee/data/1ymo.pdb')
# self.check_graph_integrity(cg)
# self.check_cg_integrity(cg)
cg = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/3D0U_A.cg')
self.check_graph_integrity(cg)
self.check_cg_integrity(cg)
cg.traverse_graph()
self.assertEqual(cg.get_angle_type("i3"), 1)
def test_small_molecule(self):
cg, = ftmc.CoarseGrainRNA.from_pdb('test/forgi/threedee/data/2X1F.pdb')
log.info(cg.to_dotbracket_string())
log.info(cg.to_element_string(True))
log.info("COORDS {}".format(cg.coords))
self.assertTrue('f0' in cg.coords)
def test_longrange_iterator(self):
cg = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/1y26.cg')
interactions = list(cg.longrange_iterator())
self.assertEqual(len(interactions), 4)
self.assertTrue(('i0', 's0') in interactions)
def test_longrange_distance(self):
cg = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/1y26.cg')
dist = cg.element_physical_distance('h0', 'h1')
self.assertTrue(dist < 10)
def test_total_length(self):
cg = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/1y26.cg')
self.assertEqual(cg.total_length(), cg.seq_length)
cg, = ftmc.CoarseGrainRNA.from_pdb('test/forgi/threedee/data/2X1F.pdb')
self.assertEqual(cg.total_length(), cg.seq_length)
cg = ftmc.CoarseGrainRNA.from_dotbracket('..((..((...))..))..((..))..')
self.assertEqual(cg.total_length(), cg.seq_length)
self.assertEqual(cg.total_length(), 27)
def test_get_load_coordinates(self):
cg = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/1y26.cg')
coords = cg.get_coordinates_array()
new_cg = copy.deepcopy(cg)
for key in new_cg.coords:
new_cg.coords[key] = [0, 0, 0], [0, 0, 0]
new_cg.load_coordinates_array(coords)
for key in new_cg.coords:
for i in range(len(new_cg.coords[key])):
nptest.assert_allclose(new_cg.coords[key][i],
cg.coords[key][i])
"""
def test_is_stacking(self):
cg = ftmc.CoarseGrainRNA.from_bg_file('test/forgi/threedee/data/3way.cg')
self.assertFalse(cg.is_stacking("m0")) #Distance
self.assertFalse(cg.is_stacking("m1")) #distance
self.assertFalse(cg.is_stacking("m2")) #shear angle
def test_is_stacking2(self):
cg = ftmc.CoarseGrainRNA.from_bg_file('test/forgi/threedee/data/1I9V_noPK.cg')
self.assertFalse(cg.is_stacking("m0"))
self.assertFalse(cg.is_stacking("m2"))
self.assertTrue(cg.is_stacking("m1"))
"""
def test_coords_from_direction(self):
cg = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/1I9V_noPK.cg')
cg_old = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/1I9V_noPK.cg')
coords = cg.get_coordinates_array()
directions = coords[1::2] - coords[0::2]
cg._init_coords()
cg.coords_from_directions(directions)
# self.assertAlmostEqual(ftme.cg_rmsd(cg, cg_old), 0) #This only looks at stems
# The coordinates should be the same as before except for a constant offset
new_coords = cg.get_coordinates_array()
offset = (coords - new_coords)
print(offset)
# I use allclose, because it uses broadcasting
assert np.allclose(offset, offset[0])
def test_coords_from_direction_with_pseudoknot(self):
# This tests the case where the link is inserted from reverse direction.
cg = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/3D0U_A.cg')
cg_old = copy.deepcopy(cg)
coords = cg.get_coordinates_array()
directions = cg.coords_to_directions()
cg._init_coords()
cg.twists = cg_old.twists
log.info("len(coords):{}, len(directions):{}, len(defines):{}".format(
len(coords), len(directions), len(cg.defines)))
cg.coords_from_directions(directions)
self.assertLess(ftme.cg_rmsd(cg, cg_old), 10**-6)
new_coords = cg.get_coordinates_array()
offset = (coords - new_coords)
assert np.allclose(offset, offset[0])
@unittest.skip("It is hard to do the subgraph thing correctly in a way consistent with the RNA model. Thus it has been disabled in the current release!")
def test_cg_from_sg_invalid_subgraph_breaking_m(self):
cg = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/3D0U_A.cg')
"""
/s3 --h1
m1 |
/ |
s0 m2
\ |
m0 |
\s1--i0--s2--h0
"""
split_ml = ["s0", "m0", "s1"]
with self.assertRaises(Exception):
ftmc.cg_from_sg(cg, split_ml)
@unittest.skip("It is hard to do the subgraph thing correctly in a way consistent with the RNA model. Thus it has been disabled in the current release!")
def test_cg_from_sg_breaking_after_i(self):
cg = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/3D0U_A.cg')
"""
/s3 --h1
m1 |
/ |
s0 m2
\ |
m0 |
\s1--i0--s2--h0
"""
split_ml = ["s0", "m0", "s1", "m2", "s3", "m1", "h1", "i0"]
sg = ftmc.cg_from_sg(cg, split_ml)
self.check_graph_integrity(sg)
@unittest.skip("It is hard to do the subgraph thing correctly in a way consistent with the RNA model. Thus it has been disabled in the current release!")
def test_cg_from_sg_breaking_after_s(self):
cg = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/3D0U_A.cg')
"""
/s3 --h1
m1 |
/ |
s0 m2
\ |
m0 |
\s1--i0--s2--h0
"""
split_ml = ["s0", "m0", "s1", "m2", "s3", "m1", "h1"]
sg = ftmc.cg_from_sg(cg, split_ml)
self.check_graph_integrity(sg)
class TestVirtualAtoms(unittest.TestCase):
def setUp(self):
self.longMessage = True
@unittest.skip("This test currently fails. Should be fixed in version 0.5")
def test_virtual_atoms_only_single_stranded(self):
cg, = ftmc.CoarseGrainRNA.from_pdb('test/forgi/threedee/data/2X1F.pdb')
va = cg.virtual_atoms(1)
self.assertIn("C1'", va) # C1' should be always present
def test_virtual_atoms_stem_distance_to_pairing_partner(self):
cg = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/1y26.cg')
va1 = cg.virtual_atoms(1)
va2 = cg.virtual_atoms(cg.pairing_partner(1))
self.assertLess(ftuv.vec_distance(
va1["C1'"], va2["C1'"]), 25, msg="Virtual atoms too far apart")
self.assertGreater(ftuv.vec_distance(
va1["C1'"], va2["C1'"]), 8, msg="Virtual atoms too close")
def test_virtual_atoms_stem_distance_to_stacked_base(self):
cg = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/1y26.cg')
va1 = cg.virtual_atoms(1)
va2 = cg.virtual_atoms(2)
self.assertLess(ftuv.vec_distance(
va1["C1'"], va2["C1'"]), 10, msg="Virtual atoms too far apart")
self.assertGreater(ftuv.vec_distance(
va1["C1'"], va2["C1'"]), 2, msg="Virtual atoms too close")
def test_virtuel_atom_caching_is_reset(self):
cg = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/1y26.cg')
va_old = cg.virtual_atoms(1)["C1'"]
# Stay orthogonal to twists
cg.coords["s0"] = cg.coords["s0"][0] + \
(cg.coords["s0"][1] - cg.coords["s0"][0]) * 0.5, cg.coords["s0"][1]
va_new = cg.virtual_atoms(1)["C1'"]
self.assertTrue(np.any(np.not_equal(va_old, va_new)),
msg="A stale virtual atom position was used.")
class RotationTranslationTest(unittest.TestCase):
def setUp(self):
self.cg1 = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/1y26.cg')
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.cg2, = ftmc.CoarseGrainRNA.from_pdb(
'test/forgi/threedee/data/1byj.pdb')
def test_rotate_keeps_RMSD_zero0(self):
cg1_rot = copy.deepcopy(self.cg1)
cg1_rot.rotate(30, unit="degrees")
cg1_rot.rotate(-30, unit="degrees")
self.assertLess(ftme.cg_rmsd(self.cg1, cg1_rot), 10**-6)
def test_rotate_keeps_RMSD_zero(self):
cg1_rot = copy.deepcopy(self.cg1)
cg1_rot.rotate(30, unit="degrees")
# This currently uses virtual atoms, thus takes twists into account.
self.assertLess(ftme.cg_rmsd(self.cg1, cg1_rot), 10**-6)
cg2_rot = copy.deepcopy(self.cg2)
cg2_rot.rotate(45, unit="degrees")
a,b = self.cg2.get_ordered_virtual_residue_poss(True)
log.warning("------------------------")
c,d = cg2_rot.get_ordered_virtual_residue_poss(True)
c2 = np.dot(c, ftuv.rotation_matrix("x", math.radians(-45)).T)
log.warning("==================================")
for i, coord in enumerate(a):
if any(abs(coord-c2[i])>10**-4):
log.warning("%s %s %s %s",coord, b[i], c2[i], d[i])
self.assertLess(ftme.cg_rmsd(self.cg2, cg2_rot), 10**-6)
class StericValueTest(unittest.TestCase):
def setUp(self):
self.cg1 = ftmc.CoarseGrainRNA.from_bg_file(
'test/forgi/threedee/data/1y26.cg')
self.cg2, = ftmc.CoarseGrainRNA.from_pdb(
'test/forgi/threedee/data/1byj.pdb')
@unittest.skip("Manual test")
def test_stericValue_1(self):
print("m0, m1, m2", self.cg1.steric_value(["m0", "m1", "m2"]))
from_ = np.amin(self.cg1.coords._coordinates)
to_ = np.amax(self.cg1.coords._coordinates)
x, y, z = np.mgrid[from_:to_:4, from_:to_:4, from_:to_:4]
from mayavi import mlab
s = np.zeros_like(x)
for i, j, k in np.ndindex(x.shape):
s[i, j, k] = self.cg1.steric_value(
np.array([x[i, j, k], y[i, j, k], z[i, j, k]]), "r**-3")
#mlab.contour3d(x,y,z,s, contours= [0.5, 1, 2, 5], opacity=0.3)
src = mlab.pipeline.scalar_field(x, y, z, s)
mlab.pipeline.volume(src)
#mlab.pipeline.iso_surface(src, contours=[0.1, ], opacity=0.3)
#mlab.pipeline.iso_surface(src, contours=[0.5, ], opacity=0.7)
#mlab.pipeline.iso_surface(src, contours=[1, ])
colors = {"s": (0, 1, 0), "h": (0, 0, 1), "m": (1, 0, 0), "i": (
1, 1, 0), "f": (0.5, 0.5, 0.5), "t": (0.5, 0.5, 0.5)}
for d in self.cg1.defines:
x = self.cg1.coords[d][0][0], self.cg1.coords[d][1][0]
y = self.cg1.coords[d][0][1], self.cg1.coords[d][1][1]
z = self.cg1.coords[d][0][2], self.cg1.coords[d][1][2]
mlab.plot3d(x, y, z, tube_radius=2, color=colors[d[0]])
mlab.show()
assert False
|
ViennaRNA/forgi
|
test/forgi/threedee/model/coarse_grain_test.py
|
Python
|
gpl-3.0
| 34,193
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'hackYourOwn.ui'
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(718, 475)
MainWindow.setMinimumSize(QtCore.QSize(370, 0))
MainWindow.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.SCF2 = QtGui.QWidget(MainWindow)
self.SCF2.setStyleSheet(_fromUtf8(""))
self.SCF2.setObjectName(_fromUtf8("SCF2"))
self.horizontalLayout = QtGui.QHBoxLayout(self.SCF2)
self.horizontalLayout.setMargin(0)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.frame = QtGui.QFrame(self.SCF2)
self.frame.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame.setFrameShadow(QtGui.QFrame.Raised)
self.frame.setObjectName(_fromUtf8("frame"))
self.WidgetLayout = QtGui.QVBoxLayout(self.frame)
self.WidgetLayout.setSpacing(0)
self.WidgetLayout.setMargin(0)
self.WidgetLayout.setObjectName(_fromUtf8("WidgetLayout"))
self.pushButton = QtGui.QPushButton(self.frame)
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.WidgetLayout.addWidget(self.pushButton)
self.horizontalLayout.addWidget(self.frame)
self.scrollArea_4 = QtGui.QScrollArea(self.SCF2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.scrollArea_4.sizePolicy().hasHeightForWidth())
self.scrollArea_4.setSizePolicy(sizePolicy)
self.scrollArea_4.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.scrollArea_4.setStyleSheet(_fromUtf8(""))
self.scrollArea_4.setWidgetResizable(True)
self.scrollArea_4.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.scrollArea_4.setObjectName(_fromUtf8("scrollArea_4"))
self.SCF1 = QtGui.QWidget()
self.SCF1.setGeometry(QtCore.QRect(0, 0, 532, 473))
self.SCF1.setStyleSheet(_fromUtf8(""))
self.SCF1.setObjectName(_fromUtf8("SCF1"))
self.gridLayout_5 = QtGui.QGridLayout(self.SCF1)
self.gridLayout_5.setMargin(0)
self.gridLayout_5.setSpacing(0)
self.gridLayout_5.setObjectName(_fromUtf8("gridLayout_5"))
self.frame_5 = QtGui.QFrame(self.SCF1)
self.frame_5.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame_5.setFrameShadow(QtGui.QFrame.Raised)
self.frame_5.setObjectName(_fromUtf8("frame_5"))
self.gridLayout_7 = QtGui.QGridLayout(self.frame_5)
self.gridLayout_7.setSpacing(5)
self.gridLayout_7.setContentsMargins(0, 5, 0, 0)
self.gridLayout_7.setObjectName(_fromUtf8("gridLayout_7"))
self.ExperimentLayout = QtGui.QGridLayout()
self.ExperimentLayout.setMargin(5)
self.ExperimentLayout.setSpacing(7)
self.ExperimentLayout.setObjectName(_fromUtf8("ExperimentLayout"))
self.gridLayout_7.addLayout(self.ExperimentLayout, 1, 0, 1, 1)
self.gridLayout_5.addWidget(self.frame_5, 0, 0, 1, 1)
self.scrollArea_4.setWidget(self.SCF1)
self.horizontalLayout.addWidget(self.scrollArea_4)
self.horizontalLayout.setStretch(0, 1)
self.horizontalLayout.setStretch(1, 3)
MainWindow.setCentralWidget(self.SCF2)
self.retranslateUi(MainWindow)
QtCore.QObject.connect(self.pushButton, QtCore.SIGNAL(_fromUtf8("clicked()")), MainWindow.run)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "DIY programs", None))
self.pushButton.setToolTip(_translate("MainWindow", "run the code", None))
self.pushButton.setText(_translate("MainWindow", "Run", None))
self.SCF1.setProperty("class", _translate("MainWindow", "PeripheralCollectionInner", None))
self.frame_5.setToolTip(_translate("MainWindow", "Widgets specific to detected sensors will be displayed\n"
"here after you click the button below", None))
|
jithinbp/pslab-desktop-apps
|
psl_res/GUI/E_MISCELLANEOUS/B/templates/hackYourOwn.py
|
Python
|
gpl-3.0
| 4,707
|
# -*- coding: utf-8 -*-
'''
DynamoDB KeyConditionExpression and FilterExpression
http://boto3.readthedocs.io/en/stable/reference/customizations/dynamodb.html#ref-dynamodb-conditions
'''
from __future__ import print_function
from decimal import Decimal
from boto3.dynamodb.conditions import Key, Attr
from .errors import ValidationException
from .helpers import smart_unicode
__all__ = ['Expression']
class Expression(object):
def set(self, value,
set_path=None,
attr_label=None,
if_not_exists=None,
list_append=None):
'''
parameters:
- value: value
- set_path: attr path if not use attr name
- attr_label: string attr label ex: label=':p'
- if_not_exists: string path ex: Price
- list_append: (tuple) path, index
ex: (#pr.FiveStar, -1) to last
(#pr.FiveStar, 0) to first
examples:
Test(realname='gs', score=100).update(Test.order_score.set(100))
Test(realname='gs', score=100).update(
Test.order_score.set(5, label=':p')
Test(realname='gs', score=100).update(
Test.order_score.set(100, is_not_exists=('order_score', 50)))
Test(realname='gs', score=100).update(
Test.ids.set(100, list_append=('ids')))
or
Test(realname='gs', score=100).update(
Test.ids.list_append(100))
return exp, {label: value}
'''
path = attr_label or self.name
label = ":{name}".format(name=path)
attr_name = "#{name}".format(name=path)
# ExpressionAttributeValues
if isinstance(value, float) or self.use_decimal_types:
value = Decimal(str(value))
eav = {label: value}
ean = {}
if if_not_exists:
no_path, operand = if_not_exists, value
if isinstance(operand, float):
operand = Decimal(str(operand))
eav[label] = operand
ean[attr_name] = path
exp = '{name} = if_not_exists({path}, {label})'.format(
name=attr_name, path=no_path, label=label)
elif list_append:
list_path, index = list_append
if index == 0:
exp = "{path} = list_append({label}, {path})".format(
path=list_path, label=label)
elif index == -1:
exp = "{path} = list_append({path}, {label})".format(
path=list_path, label=label)
else:
raise ValidationException('index error')
else:
path = set_path or self.name
attr_name = "#{name}".format(name=attr_label or path)
ean[attr_name] = path
exp = '{path} = {label}'.format(path=attr_name, label=label)
exp_attr = {
'name': ean,
'value': eav
}
return exp, exp_attr, 'SET'
def list_append(self, value, path=None, index=-1,
attr_label=None):
path = attr_label or path or self.name
label = ":{name}".format(name=path)
attr_name = "#{name}".format(name=path)
if index == 0:
exp = "{path} = list_append({label}, {path})".format(
path=attr_name, label=label)
elif index == -1:
exp = "{path} = list_append({path}, {label})".format(
path=attr_name, label=label)
else:
raise ValidationException('index error')
exp_attr = {
'value': {label: value},
'name': {attr_name: path}
}
return exp, exp_attr, 'SET'
def remove(self, path=None, indexes=None):
'''
parameters:
path: attr path
index: (list) index ex: [2, 4]
'''
exp = ''
path = path or self.name
attr_name = "#{name}".format(name=path)
ean = {attr_name: path}
if self.field_type == 'list':
for index in indexes:
sub_exp = '{name}[{index}]'.format(name=attr_name,
index=index)
if not exp:
exp = '{sub_exp}'.format(sub_exp=sub_exp)
else:
exp = '{exp}, {sub_exp}'.format(exp=exp,
sub_exp=sub_exp)
return exp, {'name': ean}, 'REMOVE'
else:
exp = '{path}'.format(path=path)
return exp, {}, 'REMOVE'
def add(self, value, path=None, attr_label=None):
'''
support num and set
ADD Price :n price += n
ADD Color :c
'''
if self.field_type not in ('integer', 'float', 'set', 'dict'):
raise ValidationException('Incorrect data type, only [integer, float, set, dict]')
exp_attr = {}
if not path:
attr_name = "#{name}".format(name=attr_label or self.name)
exp_attr['name'] = {attr_name: self.name}
else:
attr_name = attr_label or path
label = ":{name}".format(name=self.name)
exp = '{name} {label}'.format(name=attr_name, label=label)
exp_attr['value'] = {label: value}
return exp, exp_attr, 'ADD'
def typecast_for_storage(self, value):
return smart_unicode(value)
def _expression_func(self, op, *values, **kwargs):
# for use by index ... bad
values = map(self.typecast_for_storage, values)
self.op = op
self.express_args = values
use_key = kwargs.get('use_key', False)
if self.hash_key and op != 'eq':
raise ValidationException('Query key condition not supported')
elif self.hash_key or self.range_key or use_key:
use_key = True
func = getattr(Key(self.name), op, None)
else:
func = getattr(Attr(self.name), op, None)
if not func:
raise ValidationException('Query key condition not supported')
return self, func(*values), use_key
def _expression(self, op, value):
if self.use_decimal_types:
value = Decimal(str(value))
label = ':%s' % self.name
exp = '{name} {op} {value}'.format(name=self.name, op=op, value=label)
return exp, label, value
def eq(self, value): # ==
# Creates a condition where the attribute is equal to the value.
# Attr & Key
return self._expression_func('eq', value)
def ne(self, value): # !=
# Creates a condition where the attribute is not equal to the value
# Attr
return self._expression_func('ne', value)
def lt(self, value): # <
# Creates a condition where the attribute is less than the value.
# Attr & Key
return self._expression_func('lt', value)
def lte(self, value): # <=
# Creates a condition where the attribute is less than or
# equal to the value.
# Attr & Key
return self._expression_func('lte', value)
def gt(self, value): # >
# Creates a condition where the attribute is greater than the value.
# Attr & Key
return self._expression_func('gt', value)
def gte(self, value): # >=
# Creates a condition where the attribute is greater than or equal to
# the value.
# Attr & Key
return self._expression_func('gte', value)
def between(self, low_value, high_value):
# Creates a condition where the attribute is greater than or equal to
# the low value and less than or equal to the high value.
# Attr & Key
return self._expression_func('between', low_value, high_value)
def begins_with(self, value):
# Creates a condition where the attribute begins with the value
# Attr & Key
return self._expression_func('begins_with', value)
def is_in(self, value):
# Creates a condition where the attribute is in the value
# Attr
if self.hash_key or self.range_key:
# ValidationException
raise ValidationException('Query key condition not supported')
return self.name, Attr(self.name).is_in(value), False
def contains(self, value):
# Creates a condition where the attribute contains the value.
# Attr
if self.hash_key or self.range_key:
# ValidationException
raise ValidationException('Query key condition not supported')
return self.name, Attr(self.name).contains(value), False
def exists(self):
# Creates a condition where the attribute exists.
# Attr
if self.hash_key or self.range_key:
# ValidationException
raise ValidationException('Query key condition not supported')
return self.name, Attr(self.name).exists(), False
def not_exists(self):
# Creates a condition where the attribute does not exists.
# Attr
if self.hash_key or self.range_key:
# ValidationException
raise ValidationException('Query key condition not supported')
return self.name, Attr(self.name).not_exists(), False
|
gusibi/dynamodb-py
|
dynamodb/expression.py
|
Python
|
gpl-3.0
| 9,252
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import calendar
from frappe import _
from frappe.desk.form import assign_to
from dateutil.relativedelta import relativedelta
from frappe.utils.user import get_system_managers
from frappe.utils import cstr, getdate, split_emails, add_days, today
from frappe.model.document import Document
month_map = {'Monthly': 1, 'Quarterly': 3, 'Half-yearly': 6, 'Yearly': 12}
class Subscription(Document):
def validate(self):
self.update_status()
self.validate_dates()
self.validate_next_schedule_date()
self.validate_email_id()
def before_submit(self):
self.set_next_schedule_date()
def on_submit(self):
self.update_subscription_id()
def on_update_after_submit(self):
self.validate_dates()
self.set_next_schedule_date()
def validate_dates(self):
if self.end_date and getdate(self.start_date) > getdate(self.end_date):
frappe.throw(_("End date must be greater than start date"))
def validate_next_schedule_date(self):
if self.repeat_on_day and self.next_schedule_date:
next_date = getdate(self.next_schedule_date)
if next_date.day != self.repeat_on_day:
# if the repeat day is the last day of the month (31)
# and the current month does not have as many days,
# then the last day of the current month is a valid date
lastday = calendar.monthrange(next_date.year, next_date.month)[1]
if self.repeat_on_day < lastday:
# the specified day of the month is not same as the day specified
# or the last day of the month
frappe.throw(_("Next Date's day and Repeat on Day of Month must be equal"))
def validate_email_id(self):
if self.notify_by_email:
if self.recipients:
email_list = split_emails(self.recipients.replace("\n", ""))
from frappe.utils import validate_email_add
for email in email_list:
if not validate_email_add(email):
frappe.throw(_("{0} is an invalid email address in 'Recipients'").format(email))
else:
frappe.throw(_("'Recipients' not specified"))
def set_next_schedule_date(self):
self.next_schedule_date = get_next_schedule_date(self.start_date,
self.frequency, self.repeat_on_day)
def update_subscription_id(self):
doc = frappe.get_doc(self.reference_doctype, self.reference_document)
if not doc.meta.get_field('subscription'):
frappe.throw(_("Add custom field Subscription Id in the doctype {0}").format(self.reference_doctype))
doc.db_set('subscription', self.name)
def update_status(self):
self.status = {
'0': 'Draft',
'1': 'Submitted',
'2': 'Cancelled'
}[cstr(self.docstatus or 0)]
def get_next_schedule_date(start_date, frequency, repeat_on_day):
mcount = month_map.get(frequency)
if mcount:
next_date = get_next_date(start_date, mcount, repeat_on_day)
else:
days = 7 if frequency == 'Weekly' else 1
next_date = add_days(start_date, days)
return next_date
def make_subscription_entry(date=None):
date = date or today()
for data in get_subscription_entries(date):
schedule_date = getdate(data.next_schedule_date)
while schedule_date <= getdate(today()):
create_documents(data, schedule_date)
schedule_date = get_next_schedule_date(schedule_date,
data.frequency, data.repeat_on_day)
if schedule_date:
frappe.db.set_value('Subscription', data.name, 'next_schedule_date', schedule_date)
def get_subscription_entries(date):
return frappe.db.sql(""" select * from `tabSubscription`
where docstatus = 1 and next_schedule_date <=%s
and reference_document is not null and reference_document != ''
and next_schedule_date <= ifnull(end_date, '2199-12-31')
and ifnull(disabled, 0) = 0""", (date), as_dict=1)
def create_documents(data, schedule_date):
try:
doc = make_new_document(data, schedule_date)
if data.notify_by_email:
send_notification(doc, data.print_format, data.recipients)
frappe.db.commit()
except Exception:
frappe.db.rollback()
frappe.db.begin()
frappe.log_error(frappe.get_traceback())
frappe.db.commit()
if data.reference_document and not frappe.flags.in_test:
notify_error_to_user(data)
def notify_error_to_user(data):
party = ''
party_type = ''
if data.reference_doctype in ['Sales Order', 'Sales Invoice', 'Delivery Note']:
party_type = 'customer'
elif data.reference_doctype in ['Purchase Order', 'Purchase Invoice', 'Purchase Receipt']:
party_type = 'supplier'
if party_type:
party = frappe.db.get_value(data.reference_doctype, data.reference_document, party_type)
notify_errors(data.reference_document, data.reference_doctype, party, data.owner)
def make_new_document(args, schedule_date):
doc = frappe.get_doc(args.reference_doctype, args.reference_document)
new_doc = frappe.copy_doc(doc, ignore_no_copy=False)
update_doc(new_doc, doc , args, schedule_date)
new_doc.insert(ignore_permissions=True)
if args.submit_on_creation:
new_doc.submit()
return new_doc
def update_doc(new_document, reference_doc, args, schedule_date):
new_document.docstatus = 0
if new_document.meta.get_field('set_posting_time'):
new_document.set('set_posting_time', 1)
if new_document.meta.get_field('subscription'):
new_document.set('subscription', args.name)
new_document.run_method("on_recurring", reference_doc=reference_doc, subscription_doc=args)
for data in new_document.meta.fields:
if data.fieldtype == 'Date' and data.reqd:
new_document.set(data.fieldname, schedule_date)
def get_next_date(dt, mcount, day=None):
dt = getdate(dt)
dt += relativedelta(months=mcount, day=day)
return dt
def send_notification(new_rv, print_format='Standard', recipients=None):
"""Notify concerned persons about recurring document generation"""
recipients = recipients or new_rv.notification_email_address
print_format = print_format or new_rv.recurring_print_format
frappe.sendmail(recipients,
subject= _("New {0}: #{1}").format(new_rv.doctype, new_rv.name),
message = _("Please find attached {0} #{1}").format(new_rv.doctype, new_rv.name),
attachments = [frappe.attach_print(new_rv.doctype, new_rv.name, file_name=new_rv.name, print_format=print_format)])
def notify_errors(doc, doctype, party, owner):
recipients = get_system_managers(only_name=True)
frappe.sendmail(recipients + [frappe.db.get_value("User", owner, "email")],
subject="[Urgent] Error while creating recurring %s for %s" % (doctype, doc),
message = frappe.get_template("templates/emails/recurring_document_failed.html").render({
"type": doctype,
"name": doc,
"party": party or ""
}))
assign_task_to_owner(doc, doctype, "Recurring Invoice Failed", recipients)
def assign_task_to_owner(doc, doctype, msg, users):
for d in users:
args = {
'assign_to' : d,
'doctype' : doctype,
'name' : doc,
'description' : msg,
'priority' : 'High'
}
assign_to.add(args)
@frappe.whitelist()
def make_subscription(doctype, docname):
doc = frappe.new_doc('Subscription')
doc.reference_doctype = doctype
doc.reference_document = docname
return doc
|
mbauskar/erpnext
|
erpnext/subscription/doctype/subscription/subscription.py
|
Python
|
gpl-3.0
| 7,116
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2017 Vincent Noel (vincent.noel@butantan.gov.br)
#
# This file is part of libSigNetSim.
#
# libSigNetSim is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# libSigNetSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with libSigNetSim. If not, see <http://www.gnu.org/licenses/>.
"""
This file ...
"""
class RuledVariable(object):
def __init__(self, model):
self.__model = model
self.__isRuled = False
self.__isRuledBy = None
def setRuledBy(self, rule, shift=0):
self.__isRuled = True
self.__isRuledBy = rule.objId + shift
def unsetRuledBy(self):
self.__isRuled = False
self.__isRuledById = None
def isRuled(self):
return self.__isRuled
def isRuledBy(self):
if self.isRuled():
return self.__model.listOfRules[self.__isRuledBy]
else:
return None
def isRateRuled(self):
""" Tests is the compartment size is computed with a rate rule """
return self.isRuled() and self.__model.listOfRules[self.__isRuledBy].isRate()
def isAssignmentRuled(self):
""" Tests is the compartment size is computed with a rate rule """
return self.isRuled() and self.__model.listOfRules[self.__isRuledBy].isAssignment()
|
vincent-noel/libSigNetSim
|
libsignetsim/model/sbml/RuledVariable.py
|
Python
|
gpl-3.0
| 1,666
|
#standard packages
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
matplotlib.use('Agg')
import sqlite3
import pickle
import re
from flask import json
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB, BernoulliNB
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.model_selection import ShuffleSplit, RandomizedSearchCV
from sklearn.base import BaseEstimator
from scipy.stats import randint, expon, norm
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, precision_score
from sklearn.metrics import make_scorer
from nltk.corpus import stopwords
from nltk.stem.snowball import DutchStemmer
from functools import partial,reduce
import os
localdir = '/app'
path_database = localdir + '/static/data/databases/'
filename_database = 'database_NA_v1.db'
path_thresholds =localdir + '/static/Images/'
filename_thresholds = 'thresholds.npy'
path_confusion_matrix = localdir + '/static/Images/confusion_matrices_NA/'
path_wordcloud = localdir + '/static/Images/wordcloud_NA/'
path_pies = localdir + '/static/Images/pies_NA/'
path_rocs = localdir + '/static/Images/rocs_NA/'
path_models = localdir + '/static/Images/models_NA/'
path_emails_feature_importance = localdir + '/static/Images/Emails/feature_importance_email_NA/'
path_emails_pie_prob = localdir + '/static/Images/Emails/pie_probability_NA/'
path_info_images = localdir + '/static/Images/'
filename_info_images = 'filenames_imagesNA.npy'
path_json_info_email_images = localdir + '/static/Images/Emails/'
filename_json_info_email_images = 'json_email_data_NA.txt'
path_user_email_images = localdir + '/static/Images/Emails/Users/'
def tokenize(text, stop, stemmer):
"""Converts text to tokens."""
# tokens = word_tokenize(text, language='dutch')
tokens = [word.lower() for word in text.split()]
tokens = [i for i in tokens if i not in stop]
tokens = ["".join(re.findall("[a-zA-Z]+", word)) for word in tokens]
tokens = list(filter(lambda x: len(x) > 2, tokens))
# tokens = [stemmer.stem(word) for word in tokens]
return tokens
model_dict = {'mnb': MultinomialNB(fit_prior=False), 'rf': RandomForestClassifier(n_estimators=50),
'etr': ExtraTreesClassifier(n_estimators=50)}
def load_filenames_images():
filenames_dict = np.load(path_info_images+filename_info_images).item()
return filenames_dict
def get_threshold_dic():
return np.load(path_thresholds+filename_thresholds ).item()
def set_threshold_dic(name_model,new_thres):
old_thresholds = np.load(path_thresholds+filename_thresholds ).item()
print('delete old thresholds...')
os.remove(path_thresholds+filename_thresholds)
old_thresholds[name_model] = new_thres
np.save(path_thresholds+filename_thresholds , old_thresholds)
threshold_dic = get_threshold_dic()
def get_estimator(model_name = 'mnb'):
stopwords = set(stopwords.words('dutch'))
dutch_stemmer = stemmer = DutchStemmer()
model = model_dict[model_name]
estimator = Pipeline(steps=[
('vectorizer', TfidfVectorizer(input=u'content', encoding=u'latin1', decode_error=u'strict', strip_accents=None,
lowercase=True,
preprocessor=None,
tokenizer=partial(tokenize, stop=stopwords, stemmer=dutch_stemmer),
analyzer=u'word', # stop_words=(stopwords.words('dutch')),
ngram_range=(1, 3), # max_df=0.9, min_df=0.005,
max_features=10000, vocabulary=None, binary=False,
norm=u'l1', use_idf=True, smooth_idf=True, sublinear_tf=False)),
('classifier', model)
]
)
return estimator
def get_train_test(path_database,filename_database,test_size=0.3):
df = pd.DataFrame()
index_ = 0
conn = sqlite3.connect(path_database+filename_database)
c = conn.cursor()
c.execute('SELECT mail_id,body,truth_class FROM TABLE_MAILS ')
for x in c:
df = pd.concat([df, pd.DataFrame({'Id': x[0], 'body': x[1], 'Target': x[2]}, index=[index_])])
index_ += 1
conn.close()
df = df.loc[(df['body'].notnull()) & (df['Target'].notnull()), :]
X = df['body'].astype(str).values
y = df['Target'].astype(int).values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=0)
return X_train, X_test, y_train, y_test
#
# def get_mail_test(mail_id):
# X,y,df = get_mail(path_database,filename_database,mail_id)
# return df
def get_mail(path_database,filename_database,mail_id):
df = pd.DataFrame()
index_ = 0
conn = sqlite3.connect(path_database+filename_database)
c = conn.cursor()
c.execute('SELECT mail_id,body,truth_class,date_sent,from_email_address,subject FROM TABLE_MAILS where mail_id=?',[(mail_id)])
for x in c:
df = pd.concat([df, pd.DataFrame({'Id': x[0], 'body': x[1], 'Target': x[2], 'Date':x[3],'From':x[4],'Subject':x[5]}, index=[index_])])
index_ += 1
break
conn.close()
X = df['body'].astype(str).values
target = df['Target'].astype(str).values[0]
return X,target,df
def get_n_mails_of(path_database,filename_database, nmails=10, address=''):
index_ = 0
df = pd.DataFrame()
conn = sqlite3.connect(path_database + filename_database)
c = conn.cursor()
c.execute('SELECT mail_id,body,truth_class,date_sent,from_email_address,subject FROM TABLE_MAILS ')
for x in c:
df = pd.concat([df, pd.DataFrame({'Id': x[0], 'body': x[1], 'Target': x[2], 'Date':x[3],'From':x[4],'Subject':x[5]}, index=[index_])])
index_ += 1
if(index_>=nmails):
break
conn.close()
print(df.columns)
return df
def fit_model(X,y,estimator,weights = [0.49,0.5]):
sample_weights = (y == 0) * weights[0] + (y == 1) * weights[1]
estimator.fit(X, y, **{'classifier__sample_weight': sample_weights} )
return estimator
def predict_target(X,name_model,estimator):
th = threshold_dic[name_model]
y_score = estimator.predict_proba(X)
y_pred = (y_score[:, 0] < th).astype(int)
return y_pred
def fit_grid_search(X,y,name_model='mnb',n_splits=3,n_iter=10):
class weightEst(BaseEstimator):
def __init__(self, w_0, w_1, thres):
self.w_0 = w_0
self.w_1 = w_1
self.thres = thres
self.estimator = get_estimator(name_model)
def fit(self, X, y):
weight = self.w_0 * (y == 0) + self.w_1 * (y == 1)
self.estimator.fit(X, y, **{'classifier__sample_weight': weight} )
return self
def predict(self, X):
score = self.estimator.predict_proba(X)
ypred = (score[:, 0] < self.thres).astype(int)
return ypred
def predict_proba(self, X):
score = self.estimator.predict_proba(X)
return score
def get_params(self, deep=True):
params = {'w_0': self.w_0, 'w_1': self.w_1, 'thres': self.thres}
return params
def set_params(self, **params):
self.w_0 = params['w_0']
self.w_1 = params['w_1']
self.thres = params['thres']
return self
estimator = weightEst(0.5, 0.5, 0.5)
cv_dev = ShuffleSplit(n_splits=n_splits, test_size=0.3)
scorer = make_scorer(accuracy_score)
grid_search = RandomizedSearchCV(estimator,
scoring=scorer,
refit=True,
cv=cv_dev,
n_iter=n_iter,
param_distributions={'w_0': norm(0.5, 0.1), 'w_1': norm(0.5, 0.1),
'thres': norm(0.5, 0.1)},
verbose=4
)
grid_search.fit(X, y)
clf = grid_search.best_estimator_
print('Best Parameters...')
print(grid_search.best_params_)
print('Best Score...')
print(grid_search.best_score_)
return {'opt_estimator':clf.estimator,'opt_weight_taak':clf.w_0,'opt_weight_non_taak':clf.w_1,'opt_thres':clf.thres}
########################################################################################################################
# MODEL PROPERTIES #
########################################################################################################################
def get_logProb(estimator,name_model,class_label):
if (name_model == 'mnb'):
logProb = estimator.named_steps['classifier'].feature_log_prob_
if(class_label == 'NON_TAAK'):
return logProb[1,:]
elif(class_label == 'TAAK'):
return logProb[0,:]
else:
return None
elif(name_model == 'rf'):
p = estimator.named_steps['classifier'].feature_importances_
logProb = np.log( 1e-10 + p/np.sum(p) )
return logProb
elif(name_model == 'etr'):
p = estimator.named_steps['classifier'].feature_importances_
logProb = np.log( 1e-10 + p/np.sum(p) )
return logProb
else:
return None
def get_model_properties(estimator,name_model,class_label):
log_probs = get_logProb(estimator,name_model,class_label)
words_key = estimator.named_steps['vectorizer'].vocabulary_
key_words = dict(zip([item[1] for item in words_key.items()],[item[0] for item in words_key.items()]))
return log_probs,words_key,key_words
########################################################################################################################
# FIGURES #
########################################################################################################################
def add_new_email_images(mail_id,user='Mette'):
spam_ham_dic = {'0': 'TAAK', '1': 'NON_TAAK'}
def shorten_word(word,MAX_LEN=35):
if len(word)>MAX_LEN:
return word[:MAX_LEN]+'...'
return word
with open(path_json_info_email_images + "Users/"+user+'/'+ filename_json_info_email_images, 'r') as outfile:
json_email_data = json.load(outfile)
print(json_email_data.keys())
X,target,df = get_mail(path_database, filename_database, mail_id)
for name_model in model_dict.keys():
for filename in os.listdir(path_models+name_model+'/'):
if ( filename.split('.')[1]== 'pkl'):
filename_model = filename
break
with open(path_models + name_model+'/'+filename_model, 'rb') as fid:
estimator = pickle.load(fid)
log_probs, words_key, key_words = get_model_properties(estimator, name_model, 'TAAK')
body = X
date = df['Date']
_from = df['From']
subject = df['Subject']
X_transformed = estimator.named_steps['vectorizer'].transform(body)
word_list = create_word_list(X_transformed, estimator, name_model, key_words)
score = estimator.predict_proba(body)
y_pred = int(score[0][0] < threshold_dic[name_model])
print(X_transformed.shape)
html_body = return_html_body(body[0], word_list, y_pred, top_n_words=20)
extra_info = 'email_' + mail_id.replace('.','').replace('>','').replace('<','').replace('/','').replace('\\','')
create_prob_pie_email(name_model, score[0][0], extra_info , user,threshold_dic[name_model])
create_feature_importance_email(name_model, word_list, extra_info ,user, top_n_words=5)
print('here...')
#print(y)
email_data = {'pred': spam_ham_dic[str(y_pred)],
'truth': spam_ham_dic.get(target,'NONE'),
'date': date[0],
'from': _from[0],
'subject': shorten_word(subject[0]),
'html_body': html_body,
'eFimp': "/static/Images/Emails/Users/"+user+'/feature_importance_email_NA/' + name_model + '/' + "efeature_imp_" + extra_info + '.png',
'epie': "/static/Images/Emails/Users/" +user+'/pie_probability_NA/'+ name_model + '/' + "epie_prob_" + extra_info + '.png'}
if name_model not in json_email_data.keys():
json_email_data[name_model] = list([email_data])
else:
json_email_data[name_model]+= [email_data]
print('Remove old file...')
os.remove(path_json_info_email_images + "Users/"+user+'/'+ filename_json_info_email_images)
print('Create new file')
with open( path_json_info_email_images + "Users/"+user+'/'+ filename_json_info_email_images, 'w') as outfile:
json.dump(json_email_data, outfile)
def clean_dir(pathdir,extra_dir = ''):
'''
:param pathdir:
:return: deletes all .png and .txt within the dir
'''
for filename in os.listdir(pathdir+extra_dir):
if (filename.split('.')[1]== 'txt') or (filename.split('.')[1]== 'png')or (filename.split('.')[1]== 'pkl'):
print('Deleting File: '+str(filename) )
os.remove(pathdir+extra_dir+filename)
def clean_file(pathdir,selectFilename):
'''
:param pathdir:
:return: deletes all .png and .txt within the dir
'''
for filename in os.listdir(pathdir):
if (filename== selectFilename):
print('Deleting File: '+str(filename) )
os.remove(pathdir+filename)
if __name__ == '__main__':
X_train, X_test, y_train, y_test = get_train_test(path_database, filename_database, test_size=0.3)
fit_grid_search(X_train,y_train,name_model='etr',n_splits=3,n_iter=10)
|
discipl/NAML
|
app/ml_model_v1.py
|
Python
|
gpl-3.0
| 14,098
|
from django.db import models
from django.contrib.auth.models import User
class Announcement(models.Model):
author = models.ForeignKey(
User,
related_name='+',
verbose_name='Reporter'
)
title = models.CharField(
verbose_name='Title',
max_length=256
)
created = models.DateTimeField(
verbose_name='Created',
auto_now_add=True,
editable=False
)
body = models.TextField(
verbose_name='body'
)
|
freieslabor/info-display
|
info_display/screens/announcer/models.py
|
Python
|
mpl-2.0
| 498
|
# -*- coding: utf-8 -*-
"""Tests for the Source class."""
from __future__ import unicode_literals
import mock
import pytest
from kuma.scrape.sources import Source
from . import mock_requester, mock_storage
class FakeSource(Source):
"""A Fake source for testing shared Source functionality."""
PARAM_NAME = 'name'
OPTIONS = {
'pressed': ('bool', False),
'length': ('int', 0),
'unbounded': ('int_all', 0),
'flavor': ('text', ''),
}
def test_init_param():
"""Omitted Source parameters are initialized to defaults."""
source = FakeSource('param')
assert source.name == 'param'
assert source.length == 0
assert source.pressed is False
assert source.unbounded == 0
assert source.flavor == ''
@pytest.mark.parametrize(
'option,value',
(('pressed', True),
('length', 1),
('unbounded', 'all'),
('flavor', 'curry'),
), ids=('bool', 'int', 'int_all', 'text'))
def test_init_options(option, value):
"""Source parameters are initialized by name."""
source = FakeSource('popcorn', **{option: value})
assert source.name == 'popcorn'
assert getattr(source, option) == value
def test_init_invalid_option():
"""An invalid parameter name raises an exception."""
with pytest.raises(Exception):
FakeSource('param', unknown=1)
def test_merge_none():
"""An empty merge does not change the Source state."""
source = FakeSource('merge')
source.state = source.STATE_PREREQ
assert source.merge_options() == {}
assert source.state == source.STATE_PREREQ
@pytest.mark.parametrize(
'option,lesser_value,greater_value',
(('pressed', False, True),
('length', 1, 2),
('unbounded', 2, 3),
), ids=('bool', 'int', 'int_all'))
def test_merge_less(option, lesser_value, greater_value):
"""A merge to smaller parameters keeps the current values and state."""
source = FakeSource('merge', **{option: greater_value})
source.state = source.STATE_PREREQ
assert source.merge_options(**{option: lesser_value}) == {}
assert getattr(source, option) == greater_value
assert source.state == source.STATE_PREREQ
@pytest.mark.parametrize(
'option,value',
(('pressed', True),
('length', 2),
('unbounded', 1),
('flavor', 'country'),
), ids=('bool', 'int', 'int_all', 'text'))
def test_merge_same(option, value):
"""A merge with the current values keeps the current state."""
source = FakeSource('merge', **{option: value})
source.state = source.STATE_PREREQ
assert source.merge_options(**{option: value}) == {}
assert getattr(source, option) == value
assert source.state == source.STATE_PREREQ
@pytest.mark.parametrize(
'option,lesser_value,greater_value',
(('pressed', False, True),
('length', 1, 2),
('unbounded', 2, 3),
), ids=('bool', 'int', 'int_all'))
def test_merge_upgrade(option, lesser_value, greater_value):
"""An updating merge updates the values and resets the state."""
source = FakeSource('merge', **{option: lesser_value})
source.state = source.STATE_PREREQ
result = source.merge_options(**{option: greater_value})
assert result == {option: greater_value}
assert getattr(source, option) == greater_value
assert source.state == source.STATE_INIT
def test_merge_more_multiple():
"""Multiple parameters can be updated in one merge call."""
source = FakeSource('merge')
res = source.merge_options(
length=1, pressed=True, unbounded=1, flavor='salty')
assert res == {
'length': 1, 'pressed': True, 'unbounded': 1, 'flavor': 'salty'}
def test_merge_int_all():
"""For the 'int_all' parameter type, 'all' is a valid and maximum value."""
source = FakeSource('merge')
assert source.merge_options(unbounded='all') == {'unbounded': 'all'}
assert source.merge_options(unbounded='all') == {}
def test_merge_text():
"""For the 'text' parameter type, any non-empty change is an update."""
source = FakeSource('merge')
assert source.merge_options(flavor='sweet') == {'flavor': 'sweet'}
assert source.merge_options(flavor='sour') == {'flavor': 'sour'}
assert source.merge_options(flavor='sour') == {}
assert source.merge_options(flavor='sweet') == {'flavor': 'sweet'}
assert source.merge_options(flavor='') == {}
def test_current_options_default():
"""current_options returns empty dict for default options."""
source = FakeSource('default')
assert source.current_options() == {}
@pytest.mark.parametrize(
'option,value',
(('pressed', True),
('length', 1),
('unbounded', 'all'),
('flavor', 'curry'),
), ids=('bool', 'int', 'int_all', 'text'))
def test_current_options_nondefault(option, value):
"""current_options returns the non-default options as a dict."""
source = FakeSource('default', **{option: value})
assert source.current_options() == {option: value}
@pytest.mark.parametrize(
'option_type,option,bad_value',
(('bool', 'pressed', 1),
('int', 'length', '0'),
('int_all', 'unbounded', '1'),
('text', 'flavor', 1),
), ids=('bool', 'int', 'int_all', 'text'))
def test_invalid_values(option_type, option, bad_value):
"""Invalid parameter values raise a ValueError."""
with pytest.raises(ValueError) as err:
FakeSource('fails', **{option: bad_value})
assert option_type in str(err.value)
@pytest.mark.parametrize(
"href,decoded", [
(b'binary', u'binary'),
(b'%E7%A7%BB%E8%A1%8C%E4%BA%88%E5%AE%9A', u'移行予定'),
(u'Slug#Anchor_\u2014_With_Dash', u'Slug#Anchor_\u2014_With_Dash'),
])
def test_decode_href(href, decoded):
"""Source.decode_href() turns URL-encoded hrefs into unicode strings."""
source = FakeSource('conversions')
assert decoded == source.decode_href(href)
def test_source_error_str():
"""The Source.Error exception can be turned into a string."""
error1 = Source.SourceError('A simple error')
assert "%s" % error1 == 'A simple error'
error2 = Source.SourceError('A formatted error, like "%s" and %d.',
"a string", 123)
assert "%s" % error2 == 'A formatted error, like "a string" and 123.'
def test_gather_done_is_done():
"""A source that is done can still be gathered."""
source = FakeSource('existing')
source.state = source.STATE_DONE
assert source.gather(mock_requester(), mock_storage()) == []
assert source.state == source.STATE_DONE
assert source.freshness == source.FRESH_UNKNOWN
def test_gather_load_storage_existing():
"""A source that is already in storage loads quickly."""
source = FakeSource('existing')
source.load_and_validate_existing = mock.Mock(
return_value=(True, ['next']))
ret = source.gather(mock_requester(), mock_storage())
assert ret == ['next']
assert source.state == source.STATE_DONE
assert source.freshness == source.FRESH_NO
def test_gather_load_storage_error():
"""A source can raise an error when loading from storage."""
source = FakeSource('existing')
source.load_and_validate_existing = mock.Mock(
side_effect=source.SourceError('Storage complained.'))
ret = source.gather(mock_requester(), mock_storage())
assert ret == []
assert source.state == source.STATE_ERROR
assert source.freshness == source.FRESH_UNKNOWN
def test_gather_load_prereqs_more_needed():
"""A source can request other sources as prerequisites."""
source = FakeSource('needs_prereqs')
data = {'needs': ['bonus']}
source.load_prereqs = mock.Mock(return_value=(False, data))
ret = source.gather(mock_requester(), mock_storage())
assert ret == ['bonus']
assert source.state == source.STATE_PREREQ
assert source.freshness == source.FRESH_UNKNOWN
def test_gather_load_prereqs_error():
"""A source may raise an error when loading prerequisites."""
source = FakeSource('bad_prereqs')
source.load_prereqs = mock.Mock(side_effect=source.SourceError('bad'))
ret = source.gather(mock_requester(), mock_storage())
assert ret == []
assert source.state == source.STATE_ERROR
assert source.freshness == source.FRESH_UNKNOWN
def test_gather_save_data_error():
"""A source can fail when saving the data."""
source = FakeSource('needs_prereqs')
source.load_prereqs = mock.Mock(return_value=(True, {}))
source.save_data = mock.Mock(side_effect=source.SourceError('failed'))
ret = source.gather(mock_requester(), mock_storage())
assert ret == []
assert source.state == source.STATE_ERROR
assert source.freshness == source.FRESH_YES
def test_gather_success_with_more_sources():
"""A source with all prereqs can request further sources."""
source = FakeSource('needs_prereqs')
source.load_prereqs = mock.Mock(return_value=(True, {}))
source.save_data = mock.Mock(return_value=['bonus'])
ret = source.gather(mock_requester(), mock_storage())
assert ret == ['bonus']
assert source.state == source.STATE_DONE
assert source.freshness == source.FRESH_YES
|
jwhitlock/kuma
|
kuma/scrape/tests/test_source.py
|
Python
|
mpl-2.0
| 9,151
|
# -*- coding: utf-8 -*-
'''
Library for JSON RPC 2.0 and BSON RPC
'''
from bsonrpc.exceptions import BsonRpcError
from bsonrpc.framing import (
JSONFramingNetstring, JSONFramingNone, JSONFramingRFC7464)
from bsonrpc.interfaces import (
notification, request, rpc_notification, rpc_request, service_class)
from bsonrpc.options import NoArgumentsPresentation, ThreadingModel
from bsonrpc.rpc import BSONRpc, JSONRpc
from bsonrpc.util import BatchBuilder
__version__ = '0.2.1'
__license__ = 'http://mozilla.org/MPL/2.0/'
__all__ = [
'BSONRpc',
'BatchBuilder',
'BsonRpcError',
'JSONFramingNetstring',
'JSONFramingNone',
'JSONFramingRFC7464',
'JSONRpc',
'NoArgumentsPresentation',
'ThreadingModel',
'notification',
'request',
'rpc_notification',
'rpc_request',
'service_class',
]
|
seprich/py-bson-rpc
|
bsonrpc/__init__.py
|
Python
|
mpl-2.0
| 843
|
# -*- coding: utf-8 -*-
params = {
'volume': 19,
'title': u'全新影音平台「Firefox Vision 狐電視」正式上線、Firefox OS App 開發入門系列影片 (中文字幕)',
}
|
elin-moco/bedrock
|
bedrock/newsletter/templates/newsletter/2014-04-14/config.py
|
Python
|
mpl-2.0
| 191
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import pytest
from pages.firefox.new.download import DownloadPage
# ?v=a param added temporarily to evade a traffic cop experiment
# See https://github.com/mozilla/bedrock/issues/9019
@pytest.mark.sanity
@pytest.mark.nondestructive
def test_download_button_displayed(base_url, selenium):
page = DownloadPage(selenium, base_url, params='?v=a').open()
assert page.download_button.is_displayed
# Firefox and Internet Explorer don't cope well with file prompts whilst using Selenium.
@pytest.mark.skip_if_firefox(reason='http://saucelabs.com/jobs/5a8a62a7620f489d92d6193fa67cf66b')
@pytest.mark.skip_if_internet_explorer(reason='https://github.com/SeleniumHQ/selenium/issues/448')
@pytest.mark.nondestructive
def test_click_download_button(base_url, selenium):
page = DownloadPage(selenium, base_url, params='?v=a').open()
thank_you_page = page.download_firefox()
assert thank_you_page.seed_url in selenium.current_url
@pytest.mark.nondestructive
def test_other_platforms_modal(base_url, selenium):
page = DownloadPage(selenium, base_url, params='?v=a').open()
modal = page.open_other_platforms_modal()
assert modal.is_displayed
modal.close()
@pytest.mark.nondestructive
@pytest.mark.skip_if_not_firefox(reason='Join Firefox form is only displayed to Firefox users')
def test_firefox_account_modal(base_url, selenium):
page = DownloadPage(selenium, base_url, params='?v=a').open()
modal = page.open_join_firefox_modal()
assert modal.is_displayed
modal.close()
|
ericawright/bedrock
|
tests/functional/firefox/new/test_download.py
|
Python
|
mpl-2.0
| 1,718
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""This is an extension to configman for Socorro. It creates a ValueSource
object that is also a 'from_string_converter'. It is tailored to work with
the Socorro 'application' configuration parameter. Once configman has made
a final determination as to which application to actually run, this class
allows Configman to go to that application and fetch its preferred defaults
for the rest of options required by that application."""
from configman.converters import str_to_python_object
from configman.dotdict import DotDict
#==============================================================================
class ApplicationDefaultsProxy(object):
"""a placeholder class that will induce configman to query the application
object for the application's preferred defaults. """
def __init__(self):
self.application_defaults = DotDict()
self.apps = self.find_all_the_apps()
#--------------------------------------------------------------------------
def str_to_application_class(self, an_app_key):
"""a configman compatible str_to_* converter"""
try:
app_class = str_to_python_object(self.apps[an_app_key])
except KeyError:
app_class = str_to_python_object(an_app_key)
try:
self.application_defaults = DotDict(
app_class.get_application_defaults()
)
except AttributeError:
# no get_application_defaults, skip this step
pass
return app_class
#--------------------------------------------------------------------------
@staticmethod
def find_all_the_apps():
"""in the future, re-implement this as an automatic discovery service
"""
return {
'collector': 'collector.collector_app.CollectorApp',
'collector2015': 'collector.collector_app.Collector2015App',
'crashmover': 'collector.crashmover_app.CrashMoverApp',
'fetch': 'collector.external.fetch_app.FetchApp',
'copy_processed': 'collector.collector.crashmover_app.ProcessedCrashCopierApp',
'copy_raw_and_processed': 'collector.collector.crashmover_app.RawAndProcessedCopierApp',
'reprocess_crashlist': 'collector.external.rabbitmq.reprocess_crashlist.ReprocessCrashlistApp',
'purge_rmq': 'collector.external.rabbitmq.purge_queue_app.PurgeRabbitMQQueueApp',
}
can_handle = (
ApplicationDefaultsProxy
)
#==============================================================================
class ValueSource(object):
"""This is meant to be used as both a value source and a from string
converter. An instance, as a value source, always returns an empty
dictionary from its 'get_values' method. However, if it gets used as
a 'from string' converter, the 'get_values' behavior changes. Just before
the 'from string' converter returns the conversion result, this class calls
the method 'get_application_defaults' on it and saves the result. That
saved result becomes the new value for 'get_values' to return.
The end result is that an app that has a prefered set of defaults can still
get them loaded and used even if the app was itself loaded through
Configman.
"""
#--------------------------------------------------------------------------
def __init__(self, source, the_config_manager=None):
self.source = source
#--------------------------------------------------------------------------
def get_values(self, config_manager, ignore_mismatches, obj_hook=DotDict):
if isinstance(self.source.application_defaults, obj_hook):
return self.source.application_defaults
return obj_hook(self.source.application_defaults)
|
willkg/socorro-collector
|
collector/app/for_application_defaults.py
|
Python
|
mpl-2.0
| 3,980
|
#!/usr/bin/env python
#
# Copyright (c) 2011-2013, Shopkick Inc.
# All rights reserved.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# ---
# Author: John Egan <jwegan@gmail.com>
from flawless.lib.config.config import get
from flawless.lib.config.config import init_config
from flawless.lib.config.config import default_path
from flawless.lib.config.config import OPTIONS
|
shopkick/flawless
|
flawless/lib/config/__init__.py
|
Python
|
mpl-2.0
| 530
|
#!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
# ***** END LICENSE BLOCK *****
import os
import re
from mozharness.mozilla.testing.errors import TinderBoxPrintRe
from mozharness.base.log import OutputParser, WARNING, INFO, CRITICAL, ERROR
from mozharness.mozilla.buildbot import TBPL_WARNING, TBPL_FAILURE, TBPL_RETRY
from mozharness.mozilla.buildbot import TBPL_SUCCESS, TBPL_WORST_LEVEL_TUPLE
SUITE_CATEGORIES = ['mochitest', 'reftest', 'xpcshell']
def tbox_print_summary(pass_count, fail_count, known_fail_count=None,
crashed=False, leaked=False):
emphasize_fail_text = '<em class="testfail">%s</em>'
if pass_count < 0 or fail_count < 0 or \
(known_fail_count is not None and known_fail_count < 0):
summary = emphasize_fail_text % 'T-FAIL'
elif pass_count == 0 and fail_count == 0 and \
(known_fail_count == 0 or known_fail_count is None):
summary = emphasize_fail_text % 'T-FAIL'
else:
str_fail_count = str(fail_count)
if fail_count > 0:
str_fail_count = emphasize_fail_text % str_fail_count
summary = "%d/%s" % (pass_count, str_fail_count)
if known_fail_count is not None:
summary += "/%d" % known_fail_count
# Format the crash status.
if crashed:
summary += " %s" % emphasize_fail_text % "CRASH"
# Format the leak status.
if leaked is not False:
summary += " %s" % emphasize_fail_text % (
(leaked and "LEAK") or "L-FAIL")
return summary
class TestSummaryOutputParserHelper(OutputParser):
def __init__(self, regex=re.compile(r'(passed|failed|todo): (\d+)'), **kwargs):
self.regex = regex
self.failed = 0
self.passed = 0
self.todo = 0
self.last_line = None
self.tbpl_status = TBPL_SUCCESS
self.worst_log_level = INFO
super(TestSummaryOutputParserHelper, self).__init__(**kwargs)
def parse_single_line(self, line):
super(TestSummaryOutputParserHelper, self).parse_single_line(line)
self.last_line = line
m = self.regex.search(line)
if m:
try:
setattr(self, m.group(1), int(m.group(2)))
except ValueError:
# ignore bad values
pass
def evaluate_parser(self, return_code, success_codes=None):
if return_code == 0 and self.passed > 0 and self.failed == 0:
self.tbpl_status = TBPL_SUCCESS
elif return_code == 10 and self.failed > 0:
self.tbpl_status = TBPL_WARNING
else:
self.tbpl_status = TBPL_FAILURE
self.worst_log_level = ERROR
return (self.tbpl_status, self.worst_log_level)
def print_summary(self, suite_name):
# generate the TinderboxPrint line for TBPL
emphasize_fail_text = '<em class="testfail">%s</em>'
failed = "0"
if self.passed == 0 and self.failed == 0:
self.tsummary = emphasize_fail_text % "T-FAIL"
else:
if self.failed > 0:
failed = emphasize_fail_text % str(self.failed)
self.tsummary = "%d/%s/%d" % (self.passed, failed, self.todo)
self.info("TinderboxPrint: %s<br/>%s\n" % (suite_name, self.tsummary))
def append_tinderboxprint_line(self, suite_name):
self.print_summary(suite_name)
class DesktopUnittestOutputParser(OutputParser):
"""
A class that extends OutputParser such that it can parse the number of
passed/failed/todo tests from the output.
"""
def __init__(self, suite_category, **kwargs):
# worst_log_level defined already in DesktopUnittestOutputParser
# but is here to make pylint happy
self.worst_log_level = INFO
super(DesktopUnittestOutputParser, self).__init__(**kwargs)
self.summary_suite_re = TinderBoxPrintRe.get('%s_summary' % suite_category, {})
self.harness_error_re = TinderBoxPrintRe['harness_error']['minimum_regex']
self.full_harness_error_re = TinderBoxPrintRe['harness_error']['full_regex']
self.harness_retry_re = TinderBoxPrintRe['harness_error']['retry_regex']
self.fail_count = -1
self.pass_count = -1
# known_fail_count does not exist for some suites
self.known_fail_count = self.summary_suite_re.get('known_fail_group') and -1
self.crashed, self.leaked = False, False
self.tbpl_status = TBPL_SUCCESS
def parse_single_line(self, line):
if self.summary_suite_re:
summary_m = self.summary_suite_re['regex'].match(line) # pass/fail/todo
if summary_m:
message = ' %s' % line
log_level = INFO
# remove all the none values in groups() so this will work
# with all suites including mochitest browser-chrome
summary_match_list = [group for group in summary_m.groups()
if group is not None]
r = summary_match_list[0]
if self.summary_suite_re['pass_group'] in r:
if len(summary_match_list) > 1:
self.pass_count = int(summary_match_list[-1])
else:
# This handles suites that either pass or report
# number of failures. We need to set both
# pass and fail count in the pass case.
self.pass_count = 1
self.fail_count = 0
elif self.summary_suite_re['fail_group'] in r:
self.fail_count = int(summary_match_list[-1])
if self.fail_count > 0:
message += '\n One or more unittests failed.'
log_level = WARNING
# If self.summary_suite_re['known_fail_group'] == None,
# then r should not match it, # so this test is fine as is.
elif self.summary_suite_re['known_fail_group'] in r:
self.known_fail_count = int(summary_match_list[-1])
self.log(message, log_level)
return # skip harness check and base parse_single_line
harness_match = self.harness_error_re.match(line)
if harness_match:
self.warning(' %s' % line)
self.worst_log_level = self.worst_level(WARNING, self.worst_log_level)
self.tbpl_status = self.worst_level(TBPL_WARNING, self.tbpl_status,
levels=TBPL_WORST_LEVEL_TUPLE)
full_harness_match = self.full_harness_error_re.match(line)
if full_harness_match:
r = full_harness_match.group(1)
if r == "application crashed":
self.crashed = True
elif r == "missing output line for total leaks!":
self.leaked = None
else:
self.leaked = True
return # skip base parse_single_line
if self.harness_retry_re.search(line):
self.critical(' %s' % line)
self.worst_log_level = self.worst_level(CRITICAL, self.worst_log_level)
self.tbpl_status = self.worst_level(TBPL_RETRY, self.tbpl_status,
levels=TBPL_WORST_LEVEL_TUPLE)
return # skip base parse_single_line
super(DesktopUnittestOutputParser, self).parse_single_line(line)
def evaluate_parser(self, return_code, success_codes=None):
success_codes = success_codes or [0]
if self.num_errors: # mozharness ran into a script error
self.tbpl_status = self.worst_level(TBPL_FAILURE, self.tbpl_status,
levels=TBPL_WORST_LEVEL_TUPLE)
# I have to put this outside of parse_single_line because this checks not
# only if fail_count was more then 0 but also if fail_count is still -1
# (no fail summary line was found)
if self.fail_count != 0:
self.worst_log_level = self.worst_level(WARNING, self.worst_log_level)
self.tbpl_status = self.worst_level(TBPL_WARNING, self.tbpl_status,
levels=TBPL_WORST_LEVEL_TUPLE)
# Account for the possibility that no test summary was output.
if self.pass_count <= 0 and self.fail_count <= 0 and \
(self.known_fail_count is None or self.known_fail_count <= 0):
self.error('No tests run or test summary not found')
self.worst_log_level = self.worst_level(WARNING,
self.worst_log_level)
self.tbpl_status = self.worst_level(TBPL_WARNING,
self.tbpl_status,
levels=TBPL_WORST_LEVEL_TUPLE)
if return_code not in success_codes:
self.tbpl_status = self.worst_level(TBPL_FAILURE, self.tbpl_status,
levels=TBPL_WORST_LEVEL_TUPLE)
# we can trust in parser.worst_log_level in either case
return (self.tbpl_status, self.worst_log_level)
def append_tinderboxprint_line(self, suite_name):
# We are duplicating a condition (fail_count) from evaluate_parser and
# parse parse_single_line but at little cost since we are not parsing
# the log more then once. I figured this method should stay isolated as
# it is only here for tbpl highlighted summaries and is not part of
# buildbot evaluation or result status IIUC.
summary = tbox_print_summary(self.pass_count,
self.fail_count,
self.known_fail_count,
self.crashed,
self.leaked)
self.info("TinderboxPrint: %s<br/>%s\n" % (suite_name, summary))
class EmulatorMixin(object):
""" Currently dependent on both TooltoolMixin and TestingMixin)"""
def install_emulator_from_tooltool(self, manifest_path, do_unzip=True):
dirs = self.query_abs_dirs()
if self.tooltool_fetch(manifest_path, output_dir=dirs['abs_work_dir'],
cache=self.config.get("tooltool_cache", None)
):
self.fatal("Unable to download emulator via tooltool!")
if do_unzip:
unzip = self.query_exe("unzip")
unzip_cmd = [unzip, '-q', os.path.join(dirs['abs_work_dir'], "emulator.zip")]
self.run_command(unzip_cmd, cwd=dirs['abs_emulator_dir'], halt_on_failure=True,
fatal_exit_code=3)
def install_emulator(self):
dirs = self.query_abs_dirs()
self.mkdir_p(dirs['abs_emulator_dir'])
if self.config.get('emulator_url'):
self.download_unzip(self.config['emulator_url'], dirs['abs_emulator_dir'])
elif self.config.get('emulator_manifest'):
manifest_path = self.create_tooltool_manifest(self.config['emulator_manifest'])
do_unzip = True
if 'unpack' in self.config['emulator_manifest']:
do_unzip = False
self.install_emulator_from_tooltool(manifest_path, do_unzip)
elif self.buildbot_config:
props = self.buildbot_config.get('properties')
url = 'https://hg.mozilla.org/%s/raw-file/%s/b2g/test/emulator.manifest' % (
props['repo_path'], props['revision'])
manifest_path = self.download_file(url,
file_name='tooltool.tt',
parent_dir=dirs['abs_work_dir'])
if not manifest_path:
self.fatal("Can't download emulator manifest from %s" % url)
self.install_emulator_from_tooltool(manifest_path)
else:
self.fatal("Can't get emulator; set emulator_url or emulator_manifest in the config!")
if self.config.get('tools_manifest'):
manifest_path = self.create_tooltool_manifest(self.config['tools_manifest'])
do_unzip = True
if 'unpack' in self.config['tools_manifest']:
do_unzip = False
self.install_emulator_from_tooltool(manifest_path, do_unzip)
|
cstipkovic/spidermonkey-research
|
testing/mozharness/mozharness/mozilla/testing/unittest.py
|
Python
|
mpl-2.0
| 12,660
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from marionette_driver import Wait
from firefox_ui_harness.decorators import skip_if_e10s, skip_under_xvfb
from firefox_ui_harness import FirefoxTestCase
class TestSSLStatusAfterRestart(FirefoxTestCase):
def setUp(self):
FirefoxTestCase.setUp(self)
self.test_data = (
{
'url': 'https://ssl-dv.mozqa.com',
'identity': '',
'type': 'verifiedDomain'
},
{
'url': 'https://ssl-ev.mozqa.com/',
'identity': 'Mozilla Corporation',
'type': 'verifiedIdentity'
},
{
'url': 'https://ssl-ov.mozqa.com/',
'identity': '',
'type': 'verifiedDomain'
}
)
# Set browser to restore previous session
self.prefs.set_pref('browser.startup.page', 3)
self.identity_popup = self.browser.navbar.locationbar.identity_popup
def tearDown(self):
try:
self.windows.close_all([self.browser])
self.browser.tabbar.close_all_tabs([self.browser.tabbar.tabs[0]])
self.browser.switch_to()
self.identity_popup.close(force=True)
finally:
FirefoxTestCase.tearDown(self)
@skip_if_e10s
@skip_under_xvfb
def test_ssl_status_after_restart(self):
for item in self.test_data:
with self.marionette.using_context('content'):
self.marionette.navigate(item['url'])
self.verify_certificate_status(item)
self.browser.tabbar.open_tab()
self.restart()
for index, item in enumerate(self.test_data):
self.browser.tabbar.tabs[index].select()
self.verify_certificate_status(item)
def verify_certificate_status(self, item):
url, identity, cert_type = item['url'], item['identity'], item['type']
# Check the favicon
# TODO: find a better way to check, e.g., mozmill's isDisplayed
favicon_hidden = self.marionette.execute_script("""
return arguments[0].hasAttribute("hidden");
""", script_args=[self.browser.navbar.locationbar.favicon])
self.assertFalse(favicon_hidden)
self.identity_popup.box.click()
Wait(self.marionette).until(lambda _: self.identity_popup.is_open)
# Check the type shown on the idenity popup doorhanger
self.assertEqual(self.identity_popup.popup.get_attribute('className'),
cert_type,
'Certificate type is verified for ' + url)
# Check the identity label
self.assertEqual(self.identity_popup.organization_label.get_attribute('value'),
identity,
'Identity name is correct for ' + url)
# Get the information from the certificate
cert = self.browser.tabbar.selected_tab.certificate
# Open the Page Info window by clicking the More Information button
page_info = self.browser.open_page_info_window(
lambda _: self.identity_popup.more_info_button.click())
# Verify that the current panel is the security panel
self.assertEqual(page_info.deck.selected_panel, page_info.deck.security)
# Verify the domain listed on the security panel
# If this is a wildcard cert, check only the domain
if cert['commonName'].startswith('*'):
self.assertIn(self.security.get_domain_from_common_name(cert['commonName']),
page_info.deck.security.domain.get_attribute('value'),
'Expected domain found in certificate for ' + url)
else:
self.assertEqual(page_info.deck.security.domain.get_attribute('value'),
cert['commonName'],
'Domain value matches certificate common name.')
# Verify the owner listed on the security panel
if identity != '':
owner = cert['organization']
else:
owner = page_info.get_property('securityNoOwner')
self.assertEqual(page_info.deck.security.owner.get_attribute('value'), owner,
'Expected owner label found for ' + url)
# Verify the verifier listed on the security panel
self.assertEqual(page_info.deck.security.verifier.get_attribute('value'),
cert['issuerOrganization'],
'Verifier matches issuer of certificate for ' + url)
page_info.close()
|
sr-murthy/firefox-ui-tests
|
firefox_ui_tests/remote/security/test_ssl_status_after_restart.py
|
Python
|
mpl-2.0
| 4,773
|
# vim: fileencoding=utf-8 et ts=4 sts=4 sw=4 tw=0
"""
Base RPC client class
Authors:
* Brian Granger
* Alexander Glyzov
* Axel Voitier
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2012-2014. Brian Granger, Min Ragan-Kelley, Alexander Glyzov,
# Axel Voitier
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from sys import exc_info
from random import randint
from logging import getLogger
import zmq
from zmq.utils import jsonapi
from .base import RPCBase
from .errors import RemoteRPCError, RPCError
from .utils import RemoteMethod
#-----------------------------------------------------------------------------
# RPC Client base
#-----------------------------------------------------------------------------
class RPCClientBase(RPCBase):
"""An RPC Client (base class)"""
logger = getLogger('netcall.client')
def _create_socket(self):
super(RPCClientBase, self)._create_socket()
self.socket = self.context.socket(zmq.DEALER)
self.socket.setsockopt(zmq.IDENTITY, self.identity)
def _build_request(self, method, args, kwargs, ignore=False, req_id=None):
req_id = req_id or ('%x' % randint(0, 0xFFFFFFFF)).encode()
method = method.encode()
msg_list = [b'|', req_id, method]
data_list = self._serializer.serialize_args_kwargs(args, kwargs)
msg_list.extend(data_list)
msg_list.append(str(int(ignore)).encode())
return req_id, msg_list
def _send_request(self, request):
self.logger.debug('sending %r', request)
self.socket.send_multipart(request)
def _parse_reply(self, msg_list):
"""
Parse a reply from service
(should not raise an exception)
The reply is received as a multipart message:
[b'|', req_id, type, payload ...]
Returns either None or a dict {
'type' : <message_type:bytes> # ACK | OK | YIELD | FAIL
'req_id' : <id:bytes>, # unique message id
'srv_id' : <service_id:bytes> | None # only for ACK messages
'result' : <object>
}
"""
logger = self.logger
if len(msg_list) < 4 or msg_list[0] != b'|':
logger.error('bad reply %r', msg_list)
return None
msg_type = msg_list[2]
data = msg_list[3:]
result = None
srv_id = None
if msg_type == b'ACK':
srv_id = data[0]
elif msg_type in (b'OK', b'YIELD'):
try:
result = self._serializer.deserialize_result(data)
except Exception as e:
msg_type = b'FAIL'
result = e
elif msg_type == b'FAIL':
try:
error = jsonapi.loads(msg_list[3])
if error['ename'] == 'StopIteration':
result = StopIteration()
elif error['ename'] == 'GeneratorExit':
result = GeneratorExit()
else:
result = RemoteRPCError(error['ename'], error['evalue'], error['traceback'])
except Exception as e:
logger.error('unexpected error while decoding FAIL', exc_info=True)
result = RPCError('unexpected error while decoding FAIL: %s' % e)
else:
result = RPCError('bad message type: %r' % msg_type)
return dict(
type = msg_type,
req_id = msg_list[1],
srv_id = srv_id,
result = result,
)
def _generator(self, req_id, get_val_exc):
""" Mirrors a service generator on a client side
"""
#logger = self.logger
def _send_cmd(cmd, args):
_, msg_list = self._build_request(
cmd, args, None, ignore=False, req_id=req_id
)
self._send_request(msg_list)
_send_cmd('_SEND', None)
while True:
val, exc = get_val_exc()
if exc is not None:
raise exc
try:
res = yield val
except GeneratorExit:
_send_cmd('_CLOSE', None)
except:
etype, evalue, _ = exc_info()
_send_cmd('_THROW', [etype.__name__, evalue])
else:
_send_cmd('_SEND', res)
def __getattr__(self, name):
return RemoteMethod(self, name)
def call(self, proc_name, args=[], kwargs={}, result='sync', timeout=None):
"""
Call the remote method with *args and **kwargs
(may raise an exception)
Parameters
----------
proc_name : <bytes> name of the remote procedure to call
args : <tuple> positional arguments of the remote procedure
kwargs : <dict> keyword arguments of the remote procedure
result : 'sync' | 'async' | 'ignore'
timeout : <float> | None
Number of seconds to wait for a reply.
RPCTimeoutError is raised in case of timeout.
Set to None, 0 or a negative number to disable.
Returns
-------
<result:object> if result is 'sync'
<Future> if result is 'async'
None if result is 'ignore'
If remote call fails:
- raises <RemoteRPCError> if result is 'sync'
- sets <RemoteRPCError> into the <Future> if result is 'async'
"""
assert result in ('sync', 'async', 'ignore'), \
'expected any of "sync", "async", "ignore" -- got %r' % result
if not (timeout is None or isinstance(timeout, (int, float))):
raise TypeError("timeout param: <float> or None expected, got %r" % timeout)
if not self._ready:
raise RuntimeError('bind or connect must be called first')
ignore = result == 'ignore'
req_id, msg_list = self._build_request(proc_name, args, kwargs, ignore)
self._send_request(msg_list)
if ignore:
return None
future = self._tools.Future()
self._futures[req_id] = future
if result == 'sync':
# block waiting for a reply passed by _reader
return future.result(timeout=timeout)
else:
# async
return future
|
Alidron/demo-nao
|
alidron-env/lib/python2.7/site-packages/netcall/base_client.py
|
Python
|
mpl-2.0
| 6,703
|
# Copyright (c) 2014 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
import re
from collections import OrderedDict
from webob.multidict import MultiDict
from zope.interface import implementer
from pyramid.view import view_config
import pyramid.httpexceptions as exc
from pyramid import renderers
from pyramid.renderers import get_renderer
from pyramid_layout.layout import Structure
from substanced.util import get_oid
from dace.processinstance.core import Error, ValidationError
from pontus.interfaces import IView
from pontus.core import Step
from pontus.util import copy_dict, update_resources
from pontus.resources import (
BehaviorViewErrorPrincipalmessage,
BehaviorViewErrorSolutions)
from pontus import _, log
class ViewError(Error):
principalmessage = u""
causes = []
solutions = []
type = 'danger'
template = 'pontus:templates/views_templates/alert_message.pt'
def render_message(self, request, subject=None):
content_message = renderers.render(
self.template,
{'error': self, 'subject': subject}, request)
return content_message
EMPTY_TEMPLATE = 'templates/views_templates/empty.pt'
@implementer(IView)
class View(Step):
"""Abstract view"""
viewid = None
title = _('View')
description = ""
name = 'view'
coordinates = 'main'# default value
validators = []
wrapper_template = 'templates/views_templates/view_wrapper.pt'
template = None
requirements = None
css_class = "pontus-main-view"
container_css_class = ""
def render_item(self, item, coordinates, parent):
body = renderers.render(
self.wrapper_template,
{'coordinates': coordinates,
'subitem': item,
'parent': parent}, self.request)
return Structure(body)
def __init__(self,
context,
request,
parent=None,
wizard=None,
stepid=None,
**kwargs):
super(View, self).__init__(wizard, stepid)
self.context = context
self.request = request
self.parent = parent
if self.viewid is None:
self.viewid = self.name
if self.parent is not None:
self.viewid = self.parent.viewid + '_' + self.viewid
if self.context is not None:
self.viewid = self.viewid + '_' + str(get_oid(self.context, ''))
self._original_view_id = self.viewid
self._request_configuration()
def _request_configuration(self):
coordinates = self.params('coordinates')
if coordinates is not None:
self.coordinates = coordinates
@property
def requirements_copy(self):
if self.requirements is None:
return {'css_links': [], 'js_links': []}
else:
return copy_dict(self.requirements)
def has_id(self, id):
return self.viewid == id
def get_view_requirements(self):
return self.requirements_copy
def validate(self):
for validator in self.validators:
try:
validator.validate(self.context, self.request)
except ValidationError as error:
view_error = ViewError()
view_error.principalmessage = BehaviorViewErrorPrincipalmessage
if getattr(error, 'principalmessage', ''):
view_error.causes = [error.principalmessage]
view_error.solutions = BehaviorViewErrorSolutions
raise view_error
return True
def params(self, key=None):
result = []
if key is None:
return self.request.params
islist = False
list_key = key + '[]'
if list_key in self.request.params:
islist = True
if key in self.request.params or list_key in self.request.params:
dict_copy = self.request.params.copy()
dict_copy = MultiDict([(k.replace('[]', ''), value)
for (k, value) in dict_copy.items()])
while key in dict_copy:
result.append(dict_copy.pop(key))
len_result = len(result)
if not islist and len_result == 1:
return result[0]
elif islist or len_result > 1:
return result
return None
def before_update(self):
self.bind()
def update(self):
pass
def after_update(self):
pass
def __call__(self):
result = None
try:
self.validate()
self.before_update()
result = self.update()
self.after_update()
except ViewError as error:
log.warning(error)
raise error
except Exception as http_error:
log.exception(http_error)
raise exc.HTTPInternalServerError()
if isinstance(result, dict):
if 'js_links' not in result:
result['js_links'] = []
if 'css_links' not in result:
result['css_links'] = []
update_resources(self.request, result)
return result
def content(self, args, template=None, main_template=None):
if template is None:
template = self.template
if main_template is None:
main_template = get_renderer(EMPTY_TEMPLATE).implementation()
if isinstance(args, dict):
args['main_template'] = main_template
body = renderers.render(template, args, self.request)
return {'body': body,
'args': args}
def adapt_item(self, render, id, isactive=True):
if self.parent is not None:
isactive = False
item = {'view': self, 'id': id, 'isactive': isactive}
if isinstance(render, list):
item['items'] = render
else:
item['body'] = render
return item
def setviewid(self, viewid):
self.viewid = viewid
self._original_view_id = viewid
def failure(self, error, subject=None):
error_body = error.render_message(self.request, subject)
item = self.adapt_item('', self.viewid, True)
item['messages'] = {error.type: [error_body]}
result = {'js_links': [],
'css_links': [],
'coordinates': {self.coordinates: [item]}}
return result
def success(self, validated=None):
pass
def bind(self):
setattr(self, '_bindings', {})
@property
def bindings(self):
bindings = getattr(self, '_bindings', {}).copy()
if self.parent:
bindings.update(self.parent.bindings)
return bindings
def get_binding(self, key):
return self.bindings.get(key, None)
class ElementaryView(View):
"""Abstract view"""
behaviors = []
validate_behaviors = True
def __init__(self,
context,
request,
parent=None,
wizard=None,
stepid=None,
**kwargs):
super(ElementaryView, self).__init__(context, request, parent,
wizard, stepid, **kwargs)
self._all_validators = list(self.validators)
self.specific_behaviors_instances = []
self.behaviors_instances = OrderedDict()
self.errors = []
if 'behaviors' in kwargs:
bis = kwargs['behaviors']
self.specific_behaviors_instances = [bi for bi in bis
if bi._class_ in self.behaviors]
specific_behaviors = [b._class_ for b in
self.specific_behaviors_instances]
if self.validate_behaviors:
self._all_validators.extend([behavior.get_validator()
for behavior in self.behaviors
if behavior not in specific_behaviors])
self._init_behaviors(specific_behaviors)
def validate(self):
try:
for validator in self._all_validators:
validator.validate(self.context, self.request)
if self.validate_behaviors and self.specific_behaviors_instances:
for init_v in self.specific_behaviors_instances:
init_v.validate(self.context, self.request)
except ValidationError as error:
view_error = ViewError()
view_error.principalmessage = BehaviorViewErrorPrincipalmessage
if error.principalmessage:
view_error.causes = [error.principalmessage]
view_error.solutions = BehaviorViewErrorSolutions
raise view_error
return True
def _add_behaviorinstance(self, behaviorinstance):
key = re.sub(r'\s', '_', behaviorinstance.title)
self.behaviors_instances[key] = behaviorinstance
try:
self.viewid = self.viewid+'_'+str(get_oid(behaviorinstance))
except Exception:
pass
def _init_behaviors(self, specific_behaviors):
self.viewid = self._original_view_id
self.behaviors_instances = OrderedDict()
behaviors = [behavior for behavior in self.behaviors
if behavior not in specific_behaviors]
behaviors_instances = []
for behavior in behaviors:
try:
wizard_behavior = None
if self.wizard:
wizard_behavior = self.wizard.behaviorinstance
behaviorinstance = behavior.get_instance(self.context,
self.request,
wizard=wizard_behavior,
validate=False)
if behaviorinstance:
behaviors_instances.append(behaviorinstance)
except ValidationError as error:
self.errors.append(error)
for behaviorinstance in self.specific_behaviors_instances:
behaviors_instances.append(behaviorinstance)
behaviors_instances = sorted(
behaviors_instances,
key=lambda e:
self.behaviors.index(e.__class__))
for behaviorinstance in behaviors_instances:
self._add_behaviorinstance(behaviorinstance)
def before_update(self):
self.bind()
for behavior in self.behaviors_instances.values():
behavior.before_execution(self.context, self.request)
def execute(self, appstruct=None):
results = []
for behavior in self.behaviors_instances.values():
results.append(behavior.execute(
self.context, self.request, appstruct))
return results
def after_update(self):
pass
class BasicView(ElementaryView):
"""Basic view"""
isexecutable = False
def __init__(self,
context,
request,
parent=None,
wizard=None,
stepid=None,
**kwargs):
super(BasicView, self).__init__(context, request, parent,
wizard, stepid, **kwargs)
self.finished_successfully = True
def update(self):
return {}
@view_config(
context=ViewError,
renderer='pontus:templates/views_templates/grid.pt',
)
class ViewErrorView(BasicView):
title = _('An error has occurred!')
name = 'viewerrorview'
template = 'pontus:templates/views_templates/alert_message.pt'
def update(self):
self.title = self.request.localizer.translate(self.title)
result = {}
body = self.content(
args={'error': self.context}, template=self.template)['body']
item = self.adapt_item(body, self.viewid)
result['coordinates'] = {self.coordinates: [item]}
return result
|
ecreall/pontus
|
pontus/view.py
|
Python
|
agpl-3.0
| 12,126
|
# -*- coding: utf-8 -*-
import time
import datetime
from report import report_sxw
class diseases_report(report_sxw.rml_parse):
_name = 'report.patient.diseases'
def __init__(self, cr, uid, name, context):
super(diseases_report, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
})
report_sxw.report_sxw('report.patient.diseases', 'medical.patient', 'addons/hms_lite/medical/report/patient_diseases.rml', parser=diseases_report, header=True )
|
eneldoserrata/marcos_openerp
|
oemedical/hms_lite/medical/report/patient_diseases.py
|
Python
|
agpl-3.0
| 554
|
############################################################################
#
# Copyright (C) 2013 tele <tele@rhizomatica.org>
#
# REST API Interface to RCCN Modules
# This file is part of RCCN
#
# RCCN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RCCN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
############################################################################
from corepost import Response, NotFoundException, AlreadyExistsException
from corepost.web import RESTResource, route, Http
from config import *
class SubscriberRESTService:
path = '/subscriber'
# get all subscribers
@route('/')
def getAll(self, request):
api_log.info('%s - [GET] %s' % (request.getHost().host, self.path))
try:
sub = Subscriber()
data = json.dumps(sub.get_all(), cls=PGEncoder)
except SubscriberException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
# get subscriber
@route('/<msisdn>')
def get(self, request, msisdn):
api_log.info('%s - [GET] %s/%s' % (request.getHost().host, self.path, msisdn))
try:
sub = Subscriber()
if msisdn == 'all_connected':
data = json.dumps(sub.get_all_connected(), cls=PGEncoder)
elif msisdn == 'unpaid_subscription':
data = json.dumps(sub.get_unpaid_subscription(), cls=PGEncoder)
elif msisdn == 'paid_subscription':
data = json.dumps(sub.get_paid_subscription(), cls=PGEncoder)
elif msisdn == 'unauthorized':
data = json.dumps(sub.get_unauthorized(), cls=PGEncoder)
elif msisdn == 'online':
data = json.dumps(sub.get_online(), cls=PGEncoder)
elif msisdn == 'offline':
data = json.dumps(sub.get_offline(), cls=PGEncoder)
else:
data = json.dumps(sub.get(msisdn), cls=PGEncoder)
except SubscriberException as e:
data = {'status': 'failed', 'error': str(e)}
if msisdn != 'all_connected':
api_log.info(data)
return data
# get msisdn
@route('/extension/<imsi>')
def extension(self, request, imsi):
api_log.info('%s - [GET] %s/%s' % (request.getHost().host, self.path, imsi))
try:
sub =Subscriber()
data = json.dumps(sub.get_local_extension(imsi), cls=PGEncoder)
except SubscriberException as e:
data = {'status': 'failed', 'error': str(e)}
return data
# add new subscriber
@route('/', Http.POST)
def post(self, request, msisdn, name, balance, location):
api_log.info('%s - [POST] %s Data: msisdn:"%s" name:"%s" balance:"%s" location:"%s"' % (request.getHost().host, self.path, msisdn, name, balance, location))
try:
sub = Subscriber()
num = sub.add(msisdn, name, balance, location)
if num != msisdn:
data = {'status': 'success', 'error': num}
else:
data = {'status': 'success', 'error': ''}
except SubscriberException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
# put subscriber offline
@route('/offline', Http.PUT)
def offline(self, request, imsi=''):
api_log.info('%s - [PUT] %s/offline Data: imsi:"%s"' % (request.getHost().host, self.path, imsi))
try:
sub = Subscriber()
sub.set_lac(imsi, 0)
data = {'status': 'success', 'error': ''}
except SubscriberException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
# edit subscriber
@route('/<msisdn>', Http.PUT)
def put(self, request, msisdn='', name='', balance='', authorized='', subscription_status='', location=''):
api_log.info('%s - [PUT] %s/%s Data: name:"%s" balance:"%s" authorized:"%s" subscription_status:"%s" location:"%s"' % (request.getHost().host, self.path,
msisdn, name, balance, authorized, subscription_status, location))
try:
sub = Subscriber()
if authorized != '':
sub.authorized(msisdn, authorized)
if subscription_status != '':
sub.subscription(msisdn, subscription_status)
if msisdn != '' and name != '' or balance != '':
sub.edit(msisdn, name, balance, location)
data = {'status': 'success', 'error': ''}
except SubscriberException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
# delete subscriber
@route('/<msisdn>', Http.DELETE)
def delete(self, request, msisdn):
api_log.info('%s - [DELETE] %s/%s' % (request.getHost().host, self.path, msisdn))
try:
sub = Subscriber()
sub.delete(msisdn)
data = {'status': 'success', 'error': ''}
except SubscriberException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
class ResellerRESTService:
path = '/reseller'
# get all resellers
@route('/')
def getAll(self, request):
api_log.info('%s - [GET] %s' % (request.getHost(). host, self.path))
try:
reseller = Subscriber()
data = json.dumps(reseller.get_all(), cls=PGEncoder)
except ResellerException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
# get reseller
@route('/<msisdn>')
def get(self, request, msisdn):
api_log.info('%s - [GET] %s/%s' % (request.getHost().host, self.path , msisdn))
try:
reseller = Reseller()
if msisdn == 'messages':
data = json.dumps(reseller.get_messages(), cls=PGEncoder)
else:
data = json.dumps(reseller.get(msisdn), cls=PGEncoder)
except ResellerException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
# add new reseller
@route('/<msisdn>', Http.POST)
def post(self, request, msisdn, pin, balance):
api_log.info('%s - [POST] %s Data: msisdn:"%s" pin:"%s" balance:"%s"' % (request.getHost().host, self.path, msisdn, pin, balance))
try:
reseller = Reseller()
reseller.add(msisdn, pin, balance)
data = {'status': 'success', 'error': ''}
except ResellerException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
# edit reseller
@route('/', Http.PUT)
def put(self, request, msisdn='', pin='', balance=''):
api_log.info('%s - [PUT] %s Data: msisdn:"%s" pin:"%s" balance:"%s"' % (request.getHost().host, self.path, msisdn, pin, balance))
try:
reseller = Reseller()
if msisdn != '' and pin != '' or balance != '':
reseller.edit(msisdn, pin, balance)
data = {'status': 'success', 'error': ''}
except ResellerException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
# edit reseller notification messages
@route('/edit_messages', Http.PUT)
def put(self, request, mess1, mess2, mess3, mess4, mess5, mess6):
api_log.info('%s - [PUT] %s/edit_messages Data: mess1:"%s" mess2:"%s" mess3:"%s" mess4:"%s" mess5:"%s" mess6:"%s"' % (request.getHost().host, self.path,
mess1, mess2, mess3, mess4, mess5, mess6))
try:
reseller = Reseller()
reseller.edit_messages(mess1, mess2, mess3, mess4, mess5, mess6)
data = {'status': 'success', 'error': ''}
except ResellerException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
# delete reseller
@route('/<msisdn>', Http.DELETE)
def delete(self, request, msisdn):
api_log.info('%s - [DELETE] %s/%s' % (request.getHost().host, self.path, msisdn))
try:
reseller = Reseller()
reseller.delete(msisdn)
data = {'status': 'success', 'error': ''}
except ResellerException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
class CreditRESTService:
path = '/credit'
@route('/', Http.POST)
def post(self, request, msisdn, amount):
api_log.info('%s - [POST] %s/add Data: msisdn:"%s" amount:"%s"' % (request.getHost().host, self.path, msisdn, amount))
try:
credit = Credit()
credit.add(msisdn, amount)
data = {'status': 'success', 'error': ''}
except CreditException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
@route('/reseller', Http.POST)
def reseller(self, request, msisdn, amount):
api_log.info('%s - [POST] %s/reseller Data: msisdn:"%s" amount:"%s"' % (request.getHost().host, self.path, msisdn, amount))
try:
credit = Credit()
credit.add_to_reseller(msisdn, amount)
data = {'status': 'success', 'error': ''}
except CreditException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
class SMSRESTService:
path = '/sms'
@route('/', Http.POST)
def receive(self, request, source, destination, charset, coding, text):
api_log.info('%s - [POST] %s Data: source:"%s" destination:"%s" charset:"%s" coding: "%s" text:"%s"' % (request.getHost().host, self.path, source,
destination, charset, coding, text))
try:
sms = SMS()
sms.receive(source, destination, text, charset, coding)
data = {'status': 'success', 'error': ''}
except SMSException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
@route('/send', Http.POST)
def send(self, request, source, destination, text):
api_log.info('%s - [POST] %s/send Data: source:"%s" destination:"%s" text:"%s"' % (request.getHost().host, self.path, source, destination, text))
try:
sms = SMS()
sms.send(source, destination, text)
data = {'status': 'success', 'error': ''}
except SMSException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
@route('/send_broadcast', Http.POST)
def send_broadcast(self, request, text, btype):
api_log.info('%s - [POST] %s/send_broadcast Data: text:"%s" btype:"%s"' % (request.getHost().host, self.path, text, btype))
try:
sms = SMS()
sms.send_broadcast(text, btype)
data = {'status': 'success', 'error': ''}
except SMSException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
class StatisticsRESTService:
path = '/statistics'
# Calls statistics
@route('/calls/total_calls')
def total_calls(self, request):
api_log.info('%s - [GET] %s/calls/total_calls' % (request.getHost().host, self.path))
try:
stats = CallsStatistics()
data = stats.get_total_calls()
except StatisticException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
@route('/calls/total_minutes')
def total_minutes(self, request):
api_log.info('%s - [GET] %s/calls/total_minutes' % (request.getHost().host, self.path))
try:
stats = CallsStatistics()
data = stats.get_total_minutes()
except StatisticException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
@route('/calls/average_call_duration')
def average_call_duration(self, request):
api_log.info('%s - [GET] %s/calls/average_call_duration' % (request.getHost().host, self.path))
try:
stats = CallsStatistics()
data = json.dumps(stats.get_average_call_duration(), cls=PGEncoder)
except StatisticException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
@route('/calls/total_calls_by_context',Http.POST)
def total_calls_by_context(self, request, context):
api_log.info('%s - [POST] %s/calls/total_calls_by_context Data: context:"%s"' % (request.getHost().host, self.path, context))
try:
stats = CallsStatistics()
data = stats.get_total_calls_by_context(context)
except StatisticException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
@route('/calls/calls',Http.POST)
def calls(self, request, period):
api_log.info('%s - [POST] %s/calls/calls Data: period:"%s"' % (request.getHost().host, self.path, period))
try:
stats = CallsStatistics()
data = json.dumps(stats.get_calls_stats(period), cls=PGEncoder)
except StatisticException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
@route('/calls/calls_minutes',Http.POST)
def calls_minutes(self, request, period):
api_log.info('%s - [POST] %s/calls/calls_minutes Data: period:"%s"' % (request.getHost().host, self.path, period))
try:
stats = CallsStatistics()
data = json.dumps(stats.get_calls_minutes_stats(period), cls=PGEncoder)
except StatisticException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
@route('/calls/calls_context',Http.POST)
def calls_context(self, request, period):
api_log.info('%s - [POST] %s/calls/calls_context Data: period:"%s"' % (request.getHost().host, self.path, period))
try:
stats = CallsStatistics()
data = json.dumps(stats.get_calls_context_stats(period), cls=PGEncoder)
except StatisticException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
# Costs/Credits statistics
@route('/costs/total_spent')
def total_spent(self, request):
api_log.info('%s - [GET] %s/costs/total_spent' % (request.getHost().host, self.path))
try:
stats = CostsStatistics()
data = json.dumps(stats.get_total_spent(), cls=PGEncoder)
except StatisticException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
@route('/costs/average_call_cost')
def average_call_cost(self, request):
api_log.info('%s - [GET] %s/costs/average_call_cost' % (request.getHost().host, self.path))
try:
stats = CostsStatistics()
data = json.dumps(stats.get_average_call_cost(), cls=PGEncoder)
except StatisticException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
@route('/costs/total_spent_credits')
def total_spent_credits(self, request):
api_log.info('%s - [GET] %s/costs/total_spent_credits' % (request.getHost().host, self.path))
try:
stats = CostsStatistics()
data = json.dumps(stats.get_total_spent_credits(), cls=PGEncoder)
except StatisticException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
@route('/costs/top_destinations')
def top_destinations(self, request):
api_log.info('%s - [GET] %s/top_destinations' % (request.getHost().host, self.path))
try:
stats = CostsStatistics()
data = json.dumps(stats.get_top_destinations(), cls=PGEncoder)
except StatisticException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
@route('/costs/costs_stats', Http.POST)
def costs_stats(self, request, period):
api_log.info('%s - [POST] %s/costs/costs_stats Data: period:"%s"' % (request.getHost().host, self.path, period))
try:
stats = CostsStatistics()
data = json.dumps(stats.get_costs_stats(period), cls=PGEncoder)
except StatisticException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
@route('/costs/credits_stats',Http.POST)
def credits_stats(self, request, period):
api_log.info('%s - [POST] %s/costs/credits_stats Data: period:"%s"' % (request.getHost().host, self.path, period))
try:
stats = CostsStatistics()
data = json.dumps(stats.get_credits_stats(period), cls=PGEncoder)
except StatisticException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
class ConfigurationRESTService:
path = '/configuration'
@route('/site', Http.GET)
def site(self, request):
api_log.info('%s - [GET] %s/site' % (request.getHost().host, self.path))
try:
config = Configuration()
data = json.dumps(config.get_site(), cls=PGEncoder)
except ConfigurationException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
@route('/locations', Http.GET)
def locations(self, request):
api_log.info('%s - [GET] %s/locations' % (request.getHost().host, self.path))
try:
config = Configuration()
data = json.dumps(config.get_locations(), cls=PGEncoder)
except ConfigurationException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
@route('/config', Http.GET)
def config(self, request):
api_log.info('%s - [GET] %s/config' % (request.getHost().host, self.path))
try:
config = Configuration()
data = json.dumps(config.get_site_config(), cls=PGEncoder)
except ConfigurationException as e:
data = {'status': 'failed', 'error': str(e)}
api_log.info(data)
return data
def run_rapi():
api_log.info('Starting up RCCN API manager')
app = RESTResource((SubscriberRESTService(), ResellerRESTService(), CreditRESTService(), StatisticsRESTService(), SMSRESTService(), ConfigurationRESTService()))
app.run(8085)
if __name__ == "__main__":
run_rapi()
|
saycel/saycel
|
rccn/rapi.py
|
Python
|
agpl-3.0
| 19,438
|
# https://eli.thegreenplace.net/2009/03/07/computing-modular-square-roots-in-python
def modular_sqrt(a, p):
""" Find a quadratic residue (mod p) of 'a'. p
must be an odd prime.
Solve the congruence of the form:
x^2 = a (mod p)
And returns x. Note that p - x is also a root.
0 is returned is no square root exists for
these a and p.
The Tonelli-Shanks algorithm is used (except
for some simple cases in which the solution
is known from an identity). This algorithm
runs in polynomial time (unless the
generalized Riemann hypothesis is false).
"""
# Simple cases
#
if legendre_symbol(a, p) != 1:
return 0
elif a == 0:
return 0
elif p == 2:
return 0
elif p % 4 == 3:
return pow(a, (p + 1) / 4, p)
# Partition p-1 to s * 2^e for an odd s (i.e.
# reduce all the powers of 2 from p-1)
#
s = p - 1
e = 0
while s % 2 == 0:
s /= 2
e += 1
# Find some 'n' with a legendre symbol n|p = -1.
# Shouldn't take long.
#
n = 2
while legendre_symbol(n, p) != -1:
n += 1
# Here be dragons!
# Read the paper "Square roots from 1; 24, 51,
# 10 to Dan Shanks" by Ezra Brown for more
# information
#
# x is a guess of the square root that gets better
# with each iteration.
# b is the "fudge factor" - by how much we're off
# with the guess. The invariant x^2 = ab (mod p)
# is maintained throughout the loop.
# g is used for successive powers of n to update
# both a and b
# r is the exponent - decreases with each update
#
x = pow(a, (s + 1) / 2, p)
b = pow(a, s, p)
g = pow(n, s, p)
r = e
while True:
t = b
m = 0
for m in xrange(r):
if t == 1:
break
t = pow(t, 2, p)
if m == 0:
return x
gs = pow(g, 2 ** (r - m - 1), p)
g = (gs * gs) % p
x = (x * gs) % p
b = (b * g) % p
r = m
def legendre_symbol(a, p):
""" Compute the Legendre symbol a|p using
Euler's criterion. p is a prime, a is
relatively prime to p (if p divides
a, then a|p = 0)
Returns 1 if a has a square root modulo
p, -1 otherwise.
"""
ls = pow(a, (p - 1) / 2, p)
return -1 if ls == p - 1 else ls
|
aweinstock314/aweinstock-ctf-writeups
|
cccamp_2019/prejudiced/bendersky_modsqrt.py
|
Python
|
agpl-3.0
| 2,433
|
# Copyright (C) 2010 Wil Mahan <wmahan+fatics@gmail.com>
#
# This file is part of FatICS.
#
# FatICS is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FatICS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with FatICS. If not, see <http://www.gnu.org/licenses/>.
#
import time
import copy
import var
import game
import timeseal
import partner
from game_list import GameList
# user state that is per-session and not saved to persistent storage
class Session(object):
def __init__(self, conn):
self.conn = conn
self.login_time = time.time()
self.last_command_time = time.time()
self.last_tell_user = None
self.last_tell_ch = None
self.last_opp = None
self.use_timeseal = False
self.ping_sent = []
self.ping_time = []
self.move_sent_timestamp = None
self.use_zipseal = False
self.check_for_timeseal = True
self.offers_sent = []
self.offers_received = []
self.game = None
self.ivars = var.varlist.get_default_ivars()
self.lag = 0
self.observed = GameList()
self.closed = False
self.seeks = []
self.partner = None
self.following = None
self.followed_by = set()
self.idlenotifying = set()
self.idlenotified_by = set()
def set_user(self, user):
self.user = user
self.conn.write(_('**** Starting FICS session as %s ****\n\n') % user.get_display_name())
def get_idle_time(self):
""" returns seconds """
assert(self.last_command_time is not None)
return time.time() - self.last_command_time
def get_online_time(self):
""" returns seconds """
assert(self.login_time is not None)
return time.time() - self.login_time
def close(self):
assert(not self.closed)
self.closed = True
# XXX this will not remove draw offers; game-related offers
# should probably be saved when a game is adjourned
for v in self.offers_sent[:]:
assert(v.a == self.user)
v.withdraw_logout()
for v in self.offers_received[:]:
assert(v.b == self.user)
v.decline_logout()
if self.partner:
#self.conn.write(_('Removing partnership with %s.\n') %
# partner.name)
self.partner.write_('\nYour partner, %s, has departed.\n',
self.user.name)
partner.end_partnership(self.partner, self.user)
if self.game:
self.game.leave(self.user)
assert(self.game == None)
del self.offers_received[:]
del self.offers_sent[:]
for u in self.idlenotified_by:
u.write_("\nNotification: %s, whom you were idlenotifying, has departed.\n", (self.user.name,))
u.session.idlenotifying.remove(self.user)
self.idlenotified_by.clear()
if self.followed_by:
for p in self.followed_by.copy():
p.write_('\n%s, whose games you were following, has logged out.\n', self.user.name)
self.followed_by = set()
# unobserve games
assert(self.user.session == self)
for g in self.observed.copy():
g.unobserve(self.user)
assert(not self.observed)
# remove seeks
if self.seeks:
for s in self.seeks[:]:
s.remove()
self.conn.write(_('Your seeks have been removed.\n'))
assert(not self.seeks)
def set_ivars_from_str(self, s):
"""Parse a %b string sent by Jin to set ivars before logging in."""
for (i, val) in enumerate(s):
self.ivars[var.ivar_number[i].name] = int(val)
self.conn.write("#Ivars set.\n")
def set_ivar(self, v, val):
if val is not None:
self.ivars[v.name] = val
else:
if v.name in self.ivars:
del self.ivars[v.name]
def ping(self, for_move=False):
# don't send another ping if one is already pending
assert(self.use_timeseal or self.use_zipseal)
# Always send a ping with a move in a game being played.
# Otherwise, send a ping if one is not alredy pending.
if for_move or not self.ping_sent:
if self.use_zipseal:
self.conn.write(timeseal.ZIPSEAL_PING)
else:
self.conn.write(timeseal.TIMESEAL_1_PING)
self.ping_sent.append((time.time(), for_move))
def pong(self, t):
assert(self.ping_sent)
sent_time, for_move = self.ping_sent.pop(0)
reply_time = time.time() - sent_time
if len(self.ping_time) > 9:
self.ping_time.pop(0)
self.ping_time.append(reply_time)
if for_move:
self.move_sent_timestamp = t
# vim: expandtab tabstop=4 softtabstop=4 shiftwidth=4 smarttab autoindent
|
ecolitan/fatics
|
src/session.py
|
Python
|
agpl-3.0
| 5,365
|
from flask import Blueprint, render_template, abort, request, jsonify
from sqlalchemy import func, distinct
from skylines.database import db
from skylines.lib.dbutil import get_requested_record
from skylines.lib.vary import vary
from skylines.model import User, Club, Flight, Airport
statistics_blueprint = Blueprint('statistics', 'skylines')
@statistics_blueprint.route('/')
@statistics_blueprint.route('/<page>/<id>')
@vary('accept')
def index(page=None, id=None):
if 'application/json' not in request.headers.get('Accept', ''):
return render_template('ember-page.jinja', active_page='statistics')
name = None
query = db.session.query(Flight.year.label('year'),
func.count('*').label('flights'),
func.count(distinct(Flight.pilot_id)).label('pilots'),
func.sum(Flight.olc_classic_distance).label('distance'),
func.sum(Flight.duration).label('duration'))
pilots_query = db.session.query(func.count(distinct(Flight.pilot_id)))
if page == 'pilot':
pilot = get_requested_record(User, id)
name = unicode(pilot)
query = query.filter(Flight.pilot_id == pilot.id)
elif page == 'club':
club = get_requested_record(Club, id)
name = unicode(club)
query = query.filter(Flight.club_id == club.id)
pilots_query = pilots_query.filter(Flight.club_id == club.id)
elif page == 'airport':
airport = get_requested_record(Airport, id)
name = unicode(airport)
query = query.filter(Flight.takeoff_airport_id == airport.id)
pilots_query = pilots_query.filter(Flight.takeoff_airport_id == airport.id)
elif page is not None:
abort(404)
query = query.filter(Flight.is_rankable())
query = query.group_by(Flight.year).order_by(Flight.year.desc())
if page == 'pilot':
sum_pilots = 0
else:
sum_pilots = pilots_query.scalar()
list = []
for row in query:
row.average_distance = row.distance / row.flights
row.average_duration = row.duration / row.flights
list.append({
'year': row.year,
'flights': row.flights,
'distance': row.distance,
'duration': row.duration.total_seconds(),
'pilots': row.pilots,
'average_distance': row.distance / row.flights,
'average_duration': row.duration.total_seconds() / row.flights,
})
return jsonify(name=name, years=list, sumPilots=sum_pilots)
|
kerel-fs/skylines
|
skylines/frontend/views/statistics.py
|
Python
|
agpl-3.0
| 2,573
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2016 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __manifest__.py
#
##############################################################################
import base64
import time
import logging
import re
from ..wizards.generate_communication_wizard import SMS_CHAR_LIMIT, SMS_COST
from math import ceil
from collections import OrderedDict
from datetime import date, datetime
from io import BytesIO
from dateutil.relativedelta import relativedelta
from odoo.addons.sponsorship_compassion.models.product import GIFT_REF
from odoo import api, models, _, fields
from odoo.exceptions import MissingError, UserError
_logger = logging.getLogger(__name__)
try:
from pyPdf import PdfFileWriter, PdfFileReader
from bs4 import BeautifulSoup
except ImportError:
_logger.warning("Please install pypdf and bs4 for using the module")
class PartnerCommunication(models.Model):
_inherit = 'partner.communication.job'
event_id = fields.Many2one('crm.event.compassion', 'Event')
ambassador_id = fields.Many2one('res.partner', 'Ambassador')
currency_id = fields.Many2one('res.currency', compute='_compute_currency')
utm_campaign_id = fields.Many2one('utm.campaign')
sms_cost = fields.Float()
sms_provider_id = fields.Many2one(
'sms.provider', 'SMS Provider',
default=lambda self: self.env.ref('sms_939.large_account_id', False),
readonly=False)
@api.model
def send_mode_select(self):
modes = super(PartnerCommunication, self).send_mode_select()
modes.append(('sms', _('SMS')))
return modes
@api.multi
def _compute_currency(self):
chf = self.env.ref('base.CHF')
for wizard in self:
wizard.currency_id = chf.id
def get_correspondence_attachments(self):
"""
Include PDF of letters if the send_mode is to print the letters.
:return: dict {attachment_name: [report_name, pdf_data]}
"""
self.ensure_one()
attachments = dict()
# Report is used for print configuration
report = 'report_compassion.b2s_letter'
letters = self.get_objects()
if self.send_mode == 'physical':
for letter in self.get_objects():
try:
attachments[letter.file_name] = [
report, self._convert_pdf(letter.letter_image)]
except MissingError:
_logger.warn("Missing letter image", exc_info=True)
self.send_mode = False
self.auto_send = False
self.message_post(
_('The letter image is missing!'), _("Missing letter"))
continue
else:
# Attach directly a zip in the letters
letters.attach_zip()
return attachments
def get_birthday_bvr(self):
"""
Attach birthday gift slip with background for sending by e-mail
:return: dict {attachment_name: [report_name, pdf_data]}
"""
self.ensure_one()
attachments = dict()
background = self.send_mode and 'physical' not in self.send_mode
sponsorships = self.get_objects().filtered(
lambda s: not s.birthday_paid)
gifts_to = sponsorships[:1].gift_partner_id
if sponsorships and gifts_to == self.partner_id:
birthday_gift = self.env['product.product'].search([
('default_code', '=', GIFT_REF[0])], limit=1)
attachments = sponsorships.get_bvr_gift_attachment(
birthday_gift, background)
return attachments
def get_graduation_bvr(self):
"""
Attach graduation gift slip with background for sending by e-mail
:return: dict {attachment_name: [report_name, pdf_data]}
"""
self.ensure_one()
attachments = dict()
background = self.send_mode and 'physical' not in self.send_mode
sponsorships = self.get_objects()
graduation = self.env['product.product'].search([
('default_code', '=', GIFT_REF[4])], limit=1)
gifts_to = sponsorships[0].gift_partner_id
if sponsorships and gifts_to == self.partner_id:
attachments = sponsorships.get_bvr_gift_attachment(
graduation, background)
return attachments
def get_family_slip_attachment(self):
"""
Attach family gift slip with background for sending by e-mail
:return: dict {attachment_name: [report_name, pdf_data]}
"""
self.ensure_one()
attachments = dict()
background = self.send_mode and 'physical' not in self.send_mode
sponsorships = self.get_objects()
family = self.env['product.product'].search([
('default_code', '=', GIFT_REF[2])], limit=1)
gifts_to = sponsorships[0].gift_partner_id
if sponsorships and gifts_to == self.partner_id:
attachments = sponsorships.get_bvr_gift_attachment(
family, background)
return attachments
def get_reminder_bvr(self):
"""
Attach sponsorship due payment slip with background for sending by
e-mail.
:return: dict {attachment_name: [report_name, pdf_data]}
"""
self.ensure_one()
sponsorships = self.get_objects()
# Verify big due periods
if len(sponsorships.mapped('months_due')) > 3:
self.need_call = 'before_sending'
payment_mode = sponsorships.with_context(lang='en_US').mapped(
'payment_mode_id.name')[0]
# LSV-DD Waiting reminders special case
if 'Waiting Reminder' in self.config_id.name and (
'LSV' in payment_mode or 'Postfinance' in payment_mode):
if self.partner_id.bank_ids:
# We received the bank info but withdrawal didn't work.
# Mark to call in order to verify the situation.
self.need_call = 'before_sending'
else:
# Don't put payment slip if we just wait the authorization form
return dict()
# Put product sponsorship to print the payment slip for physical print.
if self.send_mode and 'physical' in self.send_mode:
self.product_id = self.env['product.product'].search([
('default_code', '=', 'sponsorship')], limit=1)
return dict()
# In other cases, attach the payment slip.
report_name = 'report_compassion.bvr_due'
return {
_('sponsorship due.pdf'): [
report_name,
base64.b64encode(self.env['report'].get_pdf(
sponsorships.ids, report_name,
data={'background': True, 'doc_ids': sponsorships.ids}
))
]
}
def get_label_from_sponsorship(self):
"""
Attach sponsorship labels. Used from communication linked to children.
:return: dict {attachment_name: [report_name, pdf_data]}
"""
self.ensure_one()
sponsorships = self.get_objects()
return self.get_label_attachment(sponsorships)
def get_label_attachment(self, sponsorships=False):
"""
Attach sponsorship labels. Used from communication linked to children.
:return: dict {attachment_name: [report_name, pdf_data]}
"""
self.ensure_one()
if not sponsorships:
sponsorships = self.env['recurring.contract']
children = self.get_objects()
for child in children:
sponsorships += child.sponsorship_ids[0]
attachments = dict()
label_print = self.env['label.print'].search([
('name', '=', 'Sponsorship Label')], limit=1)
label_brand = self.env['label.brand'].search([
('brand_name', '=', 'Herma A4')], limit=1)
label_format = self.env['label.config'].search([
('name', '=', '4455 SuperPrint WeiB')], limit=1)
label_wizard = self.env['label.print.wizard'].with_context({
'active_ids': sponsorships.ids,
'active_model': 'recurring.contract',
'label_print': label_print.id,
'must_skip_send_to_printer': True
}).create({
'brand_id': label_brand.id,
'config_id': label_format.id,
'number_of_labels': 33
})
label_data = label_wizard.get_report_data()
report_name = 'label.report_label'
attachments[_('sponsorship labels.pdf')] = [
report_name,
base64.b64encode(
label_wizard.env['report'].get_pdf(
label_wizard.ids, report_name, data=label_data))
]
return attachments
def get_child_picture_attachment(self):
"""
Attach child pictures to communication. It directly attach them
to the communication if sent by e-mail and therefore does
return an empty dictionary.
:return: dict {}
"""
self.ensure_one()
res = dict()
if self.send_mode and 'physical' not in self.send_mode:
# Prepare attachments in case the communication is sent by e-mail
children = self.get_objects()
attachments = self.env['ir.attachment']
for child in children:
name = child.local_id + ' ' + child.last_photo_date + '.jpg'
attachments += attachments.create({
'name': name,
'datas_fname': name,
'res_model': self._name,
'res_id': self.id,
'datas': child.fullshot,
})
self.with_context(no_print=True).ir_attachment_ids = attachments
else:
self.ir_attachment_ids = False
return res
def get_yearly_payment_slips_2bvr(self):
return self.get_yearly_payment_slips(bv_number=2)
def get_yearly_payment_slips(self, bv_number=3):
"""
Attach payment slips
:param bv_number number of BV on a page (switch between 2BV/3BV page)
:return: dict {attachment_name: [report_name, pdf_data]}
"""
self.ensure_one()
assert bv_number in (2, 3)
sponsorships = self.get_objects()
payment_mode_bvr = self.env.ref(
'sponsorship_switzerland.payment_mode_bvr')
attachments = dict()
# IF payment mode is BVR and partner is paying
# attach sponsorship payment slips
pay_bvr = sponsorships.filtered(
lambda s: s.payment_mode_id == payment_mode_bvr and
s.partner_id == self.partner_id)
report_obj = self.env['report']
if pay_bvr and pay_bvr.must_pay_next_year():
today = date.today()
date_start = today.replace(today.year + 1, 1, 1)
date_stop = date_start.replace(month=12, day=31)
report_name = 'report_compassion.{}bvr_sponsorship'.format(
bv_number)
attachments.update({
_('sponsorship payment slips.pdf'): [
report_name,
base64.b64encode(report_obj.get_pdf(
pay_bvr.ids, report_name,
data={
'doc_ids': pay_bvr.ids,
'date_start': fields.Date.to_string(date_start),
'date_stop': fields.Date.to_string(date_stop),
'background': self.send_mode != 'physical'
}
))
]
})
# Attach gifts for correspondents
pays_gift = self.env['recurring.contract']
for sponsorship in sponsorships:
if sponsorship.mapped(sponsorship.send_gifts_to) == \
self.partner_id:
pays_gift += sponsorship
if pays_gift:
report_name = 'report_compassion.{}bvr_gift_sponsorship'.format(
bv_number)
product_ids = self.env['product.product'].search([
('default_code', 'in', GIFT_REF[:3])
]).ids
attachments.update({
_('sponsorship gifts.pdf'): [
report_name,
base64.b64encode(report_obj.get_pdf(
pays_gift.ids, report_name,
data={
'doc_ids': pays_gift.ids,
'product_ids': product_ids
}
))
]
})
return attachments
def get_childpack_attachment(self):
self.ensure_one()
lang = self.partner_id.lang
sponsorships = self.get_objects()
exit_conf = self.env.ref(
'partner_communication_switzerland.lifecycle_child_planned_exit')
if self.config_id == exit_conf and sponsorships.mapped(
'sub_sponsorship_id'):
sponsorships = sponsorships.mapped('sub_sponsorship_id')
children = sponsorships.mapped('child_id')
# Always retrieve latest information before printing dossier
children.get_infos()
report_name = 'report_compassion.childpack_small'
return {
_('child dossier.pdf'): [
report_name,
base64.b64encode(self.env['report'].get_pdf(
children.ids, report_name, data={
'lang': lang,
'is_pdf': self.send_mode != 'physical',
'type': report_name,
}))
]
}
def get_tax_receipt(self):
self.ensure_one()
res = {}
if self.send_mode == 'digital':
report_name = 'report_compassion.tax_receipt'
data = {
'doc_ids': self.partner_id.ids,
'year': self.env.context.get('year', date.today().year - 1),
'lang': self.partner_id.lang,
}
res = {
_('tax receipt.pdf'): [
report_name,
base64.b64encode(
self.env['report'].with_context(
must_skip_send_to_printer=True).get_pdf(
self.partner_id.ids, report_name, data=data))
]
}
return res
@api.multi
def send(self):
"""
- Prevent sending communication when invoices are being reconciled
- Mark B2S correspondence as read when printed.
- Postpone no money holds when reminders sent.
- Update donor tag
- Sends SMS for sms send_mode
:return: True
"""
sms_jobs = self.filtered(lambda j: j.send_mode == 'sms')
sms_jobs.send_by_sms()
other_jobs = self - sms_jobs
for job in other_jobs.filtered(lambda j: j.model in (
'recurring.contract', 'account.invoice')):
queue_job = self.env['queue.job'].search([
('channel', '=', 'root.group_reconcile'),
('state', '!=', 'done'),
], limit=1)
if queue_job:
invoices = self.env['account.invoice'].browse(
queue_job.record_ids)
if job.partner_id in invoices.mapped('partner_id'):
retry = 0
state = queue_job.state
while state != 'done' and retry < 5:
if queue_job.state == 'failed':
raise UserError(_(
"A reconcile job has failed. Please call "
"an admin for help."
))
_logger.info("Reconcile job is processing! Going in "
"sleep for five seconds...")
time.sleep(5)
state = queue_job.read(['state'])[0]['state']
retry += 1
if queue_job.state != 'done':
raise UserError(_(
"Some invoices of the partner are just being "
"reconciled now. Please wait the process to finish"
" before printing the communication."
))
super(PartnerCommunication, other_jobs).send()
b2s_printed = other_jobs.filtered(
lambda c: c.config_id.model == 'correspondence' and
c.send_mode == 'physical' and c.state == 'done')
if b2s_printed:
letters = b2s_printed.get_objects()
if letters:
letters.write({
'letter_delivered': True,
})
# No money extension
no_money_1 = self.env.ref('partner_communication_switzerland.'
'sponsorship_waiting_reminder_1')
no_money_2 = self.env.ref('partner_communication_switzerland.'
'sponsorship_waiting_reminder_2')
no_money_3 = self.env.ref('partner_communication_switzerland.'
'sponsorship_waiting_reminder_3')
settings = self.env['availability.management.settings']
first_extension = settings.get_param('no_money_hold_duration')
second_extension = settings.get_param('no_money_hold_extension')
for communication in other_jobs:
extension = False
if communication.config_id == no_money_1:
extension = first_extension + 7
elif communication.config_id == no_money_2:
extension = second_extension + 7
elif communication.config_id == no_money_3:
extension = 10
if extension:
holds = communication.get_objects().mapped('child_id.hold_id')
for hold in holds:
expiration = datetime.now() + relativedelta(days=extension)
hold.expiration_date = fields.Datetime.to_string(
expiration)
donor = self.env.ref('partner_compassion.res_partner_category_donor')
partners = other_jobs.filtered(
lambda j: j.config_id.model == 'account.invoice.line' and
donor not in j.partner_id.category_id).mapped('partner_id')
partners.write({'category_id': [(4, donor.id)]})
return True
@api.multi
def send_by_sms(self):
"""
Sends communication jobs with SMS 939 service.
:return: list of sms_texts
"""
link_pattern = re.compile(r'<a href="(.*)">(.*)</a>', re.DOTALL)
sms_medium_id = self.env.ref('sms_sponsorship.utm_medium_sms').id
sms_texts = []
for job in self.filtered('partner_mobile'):
sms_text = job.convert_html_for_sms(link_pattern, sms_medium_id)
sms_texts.append(sms_text)
sms_wizard = self.env['sms.sender.wizard'].with_context(
partner_id=job.partner_id.id).create({
'subject': job.subject,
'text': sms_text,
'sms_provider_id': job.sms_provider_id.id
})
sms_wizard.send_sms_partner()
job.write({
'state': 'done',
'sent_date': fields.Datetime.now(),
'sms_cost': ceil(
float(len(sms_text)) / SMS_CHAR_LIMIT) * SMS_COST
})
return sms_texts
def convert_html_for_sms(self, link_pattern, sms_medium_id):
"""
Converts HTML into simple text for SMS.
First replace links with short links using Link Tracker.
Then clean HTML using BeautifulSoup library.
:param link_pattern: the regex pattern for replacing links
:param sms_medium_id: the associated utm.medium id for generated links
:return: Clean text with short links for SMS use.
"""
self.ensure_one()
source_id = self.config_id.source_id.id
def _replace_link(match):
full_link = match.group(1).replace('&', '&')
short_link = self.env['link.tracker'].create({
'url': full_link,
'campaign_id': self.utm_campaign_id.id or self.env.ref(
'partner_communication_switzerland.'
'utm_campaign_communication').id,
'medium_id': sms_medium_id,
'source_id': source_id
})
return short_link.short_url
links_converted_text = link_pattern.sub(_replace_link, self.body_html)
soup = BeautifulSoup(links_converted_text, "lxml")
return soup.get_text().strip()
@api.multi
def open_related(self):
""" Select a better view for invoice lines. """
res = super(PartnerCommunication, self).open_related()
if self.config_id.model == 'account.invoice.line':
res['context'] = self.with_context(
tree_view_ref='sponsorship_compassion'
'.view_invoice_line_partner_tree',
group_by=False
).env.context
return res
def get_new_dossier_attachments(self):
"""
Returns pdfs for the New Dossier Communication, including:
- Sponsorship payment slips (if payment is True)
- Small Childpack
- Sponsorship labels (if correspondence is True)
- Child picture
:return: dict {attachment_name: [report_name, pdf_data]}
"""
self.ensure_one()
attachments = OrderedDict()
report_obj = self.env['report']
account_payment_mode_obj = self.env['account.payment.mode']\
.with_context(lang='en_US')
lsv_dd_modes = account_payment_mode_obj.search(
['|', ('name', 'like', 'Direct Debit'), ('name', 'like', 'LSV')])
permanent_order = self.env.ref(
'sponsorship_switzerland.payment_mode_permanent_order')
sponsorships = self.get_objects()
# Sponsorships included for payment slips
bv_sponsorships = sponsorships.filtered(
# 1. Needs to be payer
lambda s: s.partner_id == self.partner_id and
# 2. Permanent Order are always included
s.payment_mode_id == permanent_order or (
# 3. LSV/DD are never included
s.payment_mode_id not in lsv_dd_modes and
# 4. If already paid they are not included
not s.period_paid)
)
write_sponsorships = sponsorships.filtered(
lambda s: s.correspondent_id == self.partner_id)
# Include all active sponsorships for Permanent Order
bv_sponsorships |= bv_sponsorships\
.filtered(lambda s: s.payment_mode_id == permanent_order)\
.mapped('group_id.contract_ids').filtered(
lambda s: s.state in ('active', 'waiting'))
# Payment slips
if bv_sponsorships:
report_name = 'report_compassion.3bvr_sponsorship'
if bv_sponsorships.mapped('payment_mode_id') == permanent_order:
# One single slip is enough for permanent order.
report_name = 'report_compassion.bvr_sponsorship'
attachments.update({
_('sponsorship payment slips.pdf'): [
report_name,
base64.b64encode(report_obj.get_pdf(
bv_sponsorships.ids, report_name,
data={
'doc_ids': bv_sponsorships.ids,
'background': self.send_mode != 'physical'
}
))
]
})
# Childpack if not a SUB of planned exit.
lifecycle = sponsorships.mapped('parent_id.child_id.lifecycle_ids')
planned_exit = lifecycle and lifecycle[0].type == 'Planned Exit'
if not planned_exit:
attachments.update(self.get_childpack_attachment())
# Labels
if write_sponsorships:
attachments.update(self.get_label_attachment(write_sponsorships))
# Child picture
report_name = 'partner_communication_switzerland.child_picture'
child_ids = sponsorships.mapped('child_id').ids
attachments.update({
_('child picture.pdf'): [
report_name,
base64.b64encode(report_obj.get_pdf(
child_ids, report_name,
data={'doc_ids': child_ids}
))
]
})
# Country information
for field_office in self.get_objects().mapped(
'child_id.field_office_id'):
country_pdf = field_office.country_info_pdf
if country_pdf:
attachments.update({
field_office.name + ".pdf": [
'partner_communication_switzerland.field_office_info',
country_pdf
]
})
return attachments
def get_csp_attachment(self):
self.ensure_one()
attachments = OrderedDict()
report_obj = self.env['report']
account_payment_mode_obj = self.env['account.payment.mode']
csp = self.get_objects()
# Include all active csp for Permanent Order
if 'Permanent Order' in csp.with_context(
lang='en_US').mapped('payment_mode_id.name'):
csp += csp.mapped(
'group_id.contract_ids').filtered(
lambda s: s.state == 'active')
is_payer = self.partner_id in csp.mapped('partner_id')
make_payment_pdf = True
# LSV/DD don't need a payment slip
groups = csp.mapped('group_id')
lsv_dd_modes = account_payment_mode_obj.search(
['|', ('name', 'like', 'Direct Debit'), ('name', 'like', 'LSV')])
lsv_dd_groups = groups.filtered(
lambda r: r.payment_mode_id in lsv_dd_modes)
if len(lsv_dd_groups) == len(groups):
make_payment_pdf = False
# If partner already paid, avoid payment slip
if len(csp.filtered('period_paid')) == len(csp):
make_payment_pdf = False
# Payment slips
if is_payer and make_payment_pdf:
report_name = 'report_compassion.3bvr_sponsorship'
attachments.update({
_('csv payment slips.pdf'): [
report_name,
base64.b64encode(report_obj.get_pdf(
csp.ids, report_name,
data={
'doc_ids': csp.ids,
'background': self.send_mode != 'physical'
}
))
]
})
return attachments
def _convert_pdf(self, pdf_data):
"""
Converts all pages of PDF in A4 format if communication is
printed.
:param pdf_data: binary data of original pdf
:return: binary data of converted pdf
"""
if self.send_mode != 'physical':
return pdf_data
pdf = PdfFileReader(BytesIO(base64.b64decode(pdf_data)))
convert = PdfFileWriter()
a4_width = 594.48
a4_height = 844.32 # A4 units in PyPDF
for i in xrange(0, pdf.numPages):
# translation coordinates
tx = 0
ty = 0
page = pdf.getPage(i)
corner = [float(x) for x in page.mediaBox.getUpperRight()]
if corner[0] > a4_width or corner[1] > a4_height:
page.scaleBy(max(a4_width / corner[0], a4_height / corner[1]))
elif corner[0] < a4_width or corner[1] < a4_height:
tx = (a4_width - corner[0]) / 2
ty = (a4_height - corner[1]) / 2
convert.addBlankPage(a4_width, a4_height)
convert.getPage(i).mergeTranslatedPage(page, tx, ty)
output_stream = BytesIO()
convert.write(output_stream)
output_stream.seek(0)
return base64.b64encode(output_stream.read())
|
ecino/compassion-switzerland
|
partner_communication_switzerland/models/partner_communication.py
|
Python
|
agpl-3.0
| 28,550
|
from django.core.exceptions import ValidationError
from django.utils.deconstruct import deconstructible
def page_filters_validator(val):
if not isinstance(val, dict):
raise ValidationError
if not all(isinstance(v, list) for v in val.values()):
raise ValidationError
if not all(isinstance(v, str) for vs in val.values() for v in vs):
raise ValidationError
@deconstructible
class HighchartsCustomizationValidator:
def __init__(self, properties):
self.properties = properties
def __call__(self, value):
errors = []
custom_props = dict(self.properties)
if not isinstance(value, dict):
raise ValidationError("Root element must be a dict, got {}".format(type(value)))
for k, v in value.items():
try:
prop = custom_props[k]
except KeyError:
errors.append(ValidationError("Unknown property {}".format(k)))
continue
if not isinstance(v, prop.type):
raise ValidationError("Invalid type for {}, expected {!s}, got {!s}".format(k, prop.type, type(v)))
if errors:
raise ValidationError(errors)
|
amcat/amcat-dashboard
|
dashboard/util/validators.py
|
Python
|
agpl-3.0
| 1,205
|
# -*- coding: utf-8 -*-
from copy import deepcopy
from datetime import datetime
import os
from pprint import pprint
from dlstats.fetchers.insee import INSEE as Fetcher
from dlstats import constants
import unittest
from unittest import mock
import httpretty
from dlstats.tests.fetchers.base import BaseFetcherTestCase
from dlstats.tests.resources import xml_samples
from dlstats.tests.base import RESOURCES_DIR as BASE_RESOURCES_DIR
RESOURCES_DIR = os.path.abspath(os.path.join(BASE_RESOURCES_DIR, "insee"))
def get_dimensions_from_dsd(self, xml_dsd=None, provider_name=None, dataset_code=None, dsd_id=None):
dimension_keys = ['FREQ', 'NATURE', 'PRODUIT']
dimensions = {
'FREQ': {'M': 'M'},
'NATURE': {},
'PRODUIT': {},
}
return dimension_keys, dimensions
def get_dimensions_from_dsd_CHO_AN_AGE(self, xml_dsd=None, provider_name=None, dataset_code=None, dsd_id=None):
dimension_keys = ['INDICATEUR', 'SEXE', 'AGE']
dimensions = {
'INDICATEUR': {'Nbre': 'Nbre'},
'SEXE': {},
'AGE': {},
}
return dimension_keys, dimensions
LOCAL_DATASETS_UPDATE = {
"IPI-2010-A21": {
"categories_root": ['COMPTA-NAT', 'CONDITIONS-VIE-SOCIETE', 'DEMO-ENT',
'ECHANGES-EXT', 'ENQ-CONJ', 'MARCHE-TRAVAIL',
'POPULATION', 'PRIX', 'PRODUCTION-ENT',
'SALAIRES-REVENUS', 'SERVICES-TOURISME-TRANSPORT',
'SRGDP'],
"concept_keys": ['base-per', 'decimals', 'embargo-time', 'freq', 'idbank', 'last-update', 'nature', 'obs-status', 'produit', 'ref-area', 'time-per-collect', 'title', 'unit-measure', 'unit-mult'],
"codelist_keys": ['base-per', 'decimals', 'embargo-time', 'freq', 'idbank', 'last-update', 'nature', 'obs-status', 'produit', 'ref-area', 'time-per-collect', 'title', 'unit-measure', 'unit-mult'],
"codelist_count": {
"base-per": 0,
"decimals": 0,
"embargo-time": 0,
"freq": 7,
"idbank": 0,
"last-update": 0,
"nature": 25,
"obs-status": 10,
"produit": 30,
"ref-area": 11,
"time-per-collect": 7,
"title": 0,
"unit-measure": 123,
"unit-mult": 0,
},
"dimension_keys": ['freq', 'produit', 'nature'],
"dimension_count": {
"freq": 7,
"produit": 30,
"nature": 25,
},
"attribute_keys": ['idbank', 'title', 'last-update', 'unit-measure', 'unit-mult', 'ref-area', 'decimals', 'base-per', 'time-per-collect', 'obs-status', 'embargo-time'],
"attribute_count": {
"idbank": 0,
"title": 0,
"last-update": 0,
"unit-measure": 123,
"unit-mult": 0,
"ref-area": 11,
"decimals": 0,
"base-per": 0,
"time-per-collect": 7,
"obs-status": 10,
"embargo-time": 0,
},
},
}
DSD_INSEE_CHO_AN_AGE = {
"provider": "INSEE",
"filepaths": deepcopy(xml_samples.DATA_INSEE_SPECIFIC["DSD"]["filepaths"]),
"dataset_code": "CHO-AN-AGE",
"dataset_name": "Unemployment according to the ILO standard (annual average) - By gender and age",
"dsd_id": "CHO-AN-AGE",
"dsd_ids": ["CHO-AN-AGE"],
"dataflow_keys": ['CHO-AN-AGE'],
"is_completed": True,
"concept_keys": ['age', 'base-per', 'decimals', 'embargo-time', 'freq', 'idbank', 'indicateur', 'last-update', 'obs-status', 'ref-area', 'sexe', 'time-per-collect', 'title', 'unit-measure', 'unit-mult'],
"codelist_keys": ['age', 'base-per', 'decimals', 'embargo-time', 'freq', 'idbank', 'indicateur', 'last-update', 'obs-status', 'ref-area', 'sexe', 'time-per-collect', 'title', 'unit-measure', 'unit-mult'],
"codelist_count": {
"age": 73,
"base-per": 0,
"decimals": 0,
"embargo-time": 0,
"freq": 7,
"idbank": 0,
"indicateur": 9,
"last-update": 0,
"obs-status": 10,
"ref-area": 11,
"sexe": 3,
"time-per-collect": 7,
"title": 0,
"unit-measure": 123,
"unit-mult": 0,
},
"dimension_keys": ['indicateur', 'sexe', 'age'],
"dimension_count": {
"indicateur": 9,
"sexe": 3,
"age": 73,
},
"attribute_keys": ['freq', 'idbank', 'title', 'last-update', 'unit-measure', 'unit-mult', 'ref-area', 'decimals', 'base-per', 'time-per-collect', 'obs-status', 'embargo-time'],
"attribute_count": {
"freq": 7,
"idbank": 0,
"title": 0,
"last-update": 0,
"unit-measure": 123,
"unit-mult": 0,
"ref-area": 11,
"decimals": 0,
"base-per": 0,
"time-per-collect": 7,
"obs-status": 10,
"embargo-time": 0,
},
}
DSD_INSEE_CHO_AN_AGE["filepaths"]["datastructure"] = os.path.abspath(os.path.join(RESOURCES_DIR, "insee-datastructure-CHO-AN-AGE.xml"))
DATA_INSEE_CHO_AN_AGE = {
"filepath": os.path.abspath(os.path.join(RESOURCES_DIR, "insee-data-CHO-AN-AGE.xml")),
"klass": "XMLSpecificData_2_1_INSEE",
"DSD": DSD_INSEE_CHO_AN_AGE,
"kwargs": {
"provider_name": "INSEE",
"dataset_code": "CHO-AN-AGE",
"dsd_filepath": DSD_INSEE_CHO_AN_AGE["filepaths"]["datastructure"],
},
"series_accept": 31,
"series_reject_frequency": 0,
"series_reject_empty": 0,
"series_all_values": 1219,
"series_key_first": '001664976',
"series_key_last": '001665006',
"series_sample": {
"provider_name": "INSEE",
"dataset_code": "CHO-AN-AGE",
'key': '001664976',
'name': 'Number - Men - From 15 to 24 years old',
'frequency': 'A',
'last_update': None,
'first_value': {
'value': '143',
'period': '1975',
'attributes': {
"OBS_STATUS": "A"
},
},
'last_value': {
'value': '359',
'period': '2014',
'attributes': {
"OBS_STATUS": "A"
},
},
'dimensions': {
'INDICATEUR': 'Nbre',
'SEXE': '1',
'AGE': '15-24',
},
'attributes': {
'DECIMALS': '0',
'FREQ': 'A',
'LAST_UPDATE': '2016-02-10',
'REF_AREA': 'FM',
'TIME_PER_COLLECT': 'MOYENNE',
'TITLE': 'Nombre de chômeurs au sens du BIT (moyenne annuelle) - Hommes de 15 à 24 ans - France métropolitaine',
'UNIT_MEASURE': 'IND',
'UNIT_MULT': '3'
},
}
}
class FetcherTestCase(BaseFetcherTestCase):
# nosetests -s -v dlstats.tests.fetchers.test_insee:FetcherTestCase
FETCHER_KLASS = Fetcher
DATASETS = {
'IPI-2010-A21': deepcopy(xml_samples.DATA_INSEE_SPECIFIC),
'CHO-AN-AGE': DATA_INSEE_CHO_AN_AGE
}
DATASET_FIRST = "ACT-TRIM-ANC"
DATASET_LAST = "TXEMP-AN-FR"
DEBUG_MODE = False
def _load_files(self, dataset_code, data_key=None):
filepaths = self.DATASETS[dataset_code]["DSD"]["filepaths"]
dsd_content_type = 'application/vnd.sdmx.structure+xml;version=2.1'
url = "http://www.bdm.insee.fr/series/sdmx/dataflow/INSEE"
self.register_url(url,
filepaths["dataflow"],
content_type=dsd_content_type,
match_querystring=True)
url = "http://www.bdm.insee.fr/series/sdmx/categoryscheme/INSEE"
self.register_url(url,
filepaths["categoryscheme"],
content_type=dsd_content_type,
match_querystring=True)
url = "http://www.bdm.insee.fr/series/sdmx/categorisation/INSEE"
self.register_url(url,
filepaths["categorisation"],
content_type=dsd_content_type,
match_querystring=True)
url = "http://www.bdm.insee.fr/series/sdmx/conceptscheme/INSEE"
self.register_url(url,
filepaths["conceptscheme"],
content_type=dsd_content_type,
match_querystring=True)
for cl in ["CL_UNIT", "CL_AREA", "CL_TIME_COLLECT", "CL_OBS_STATUS", "CL_UNIT_MULT", "CL_FREQ"]:
url = "http://www.bdm.insee.fr/series/sdmx/codelist/INSEE/%s" % cl
self.register_url(url,
filepaths[cl],
content_type=dsd_content_type,
match_querystring=True)
url = "http://www.bdm.insee.fr/series/sdmx/datastructure/INSEE/%s?reference=children" % dataset_code
self.register_url(url,
filepaths["datastructure"],
content_type=dsd_content_type,
match_querystring=True)
if data_key:
url = "http://www.bdm.insee.fr/series/sdmx/data/%s/%s" % (dataset_code, data_key)
self.register_url(url,
self.DATASETS[dataset_code]['filepath'],
content_type='application/vnd.sdmx.structurespecificdata+xml;version=2.1',
match_querystring=True)
@httpretty.activate
@unittest.skipUnless('FULL_TEST' in os.environ, "Skip - no full test")
def test_load_datasets_first(self):
dataset_code = 'IPI-2010-A21'
self._load_files(dataset_code)
self.assertLoadDatasetsFirst([dataset_code])
@httpretty.activate
@unittest.skipUnless('FULL_TEST' in os.environ, "Skip - no full test")
def test_load_datasets_update(self):
dataset_code = 'IPI-2010-A21'
self._load_files(dataset_code)
self.assertLoadDatasetsUpdate([dataset_code])
@httpretty.activate
def test_build_data_tree(self):
# nosetests -s -v dlstats.tests.fetchers.test_insee:FetcherTestCase.test_build_data_tree
dataset_code = 'IPI-2010-A21'
self._load_files(dataset_code)
self.DATASETS[dataset_code]["DSD"].update(LOCAL_DATASETS_UPDATE[dataset_code])
self.assertDataTree(dataset_code)
@httpretty.activate
@mock.patch('dlstats.fetchers.insee.INSEE_Data._get_dimensions_from_dsd', get_dimensions_from_dsd)
def test_upsert_dataset_ipi_2010_a21(self):
# nosetests -s -v dlstats.tests.fetchers.test_insee:FetcherTestCase.test_upsert_dataset_ipi_2010_a21
dataset_code = 'IPI-2010-A21'
self.DATASETS[dataset_code]["DSD"].update(LOCAL_DATASETS_UPDATE[dataset_code])
self.DATASETS[dataset_code]["series_sample"]["attributes"].pop("IDBANK", None)
self._load_files(dataset_code, data_key="M..")
self.assertProvider()
dataset = self.assertDataset(dataset_code)
series_list = self.assertSeries(dataset_code)
self.assertTrue(dataset["last_update"] >= datetime(2016, 1, 8))
self.assertEquals(series_list[0]["last_update_ds"], datetime(2016, 1, 8))
self.assertEquals(series_list[-1]["last_update_ds"], datetime(2013, 3, 11))
@httpretty.activate
@mock.patch('dlstats.fetchers.insee.INSEE_Data._get_dimensions_from_dsd', get_dimensions_from_dsd_CHO_AN_AGE)
def test_upsert_dataset_cho_an_age(self):
# nosetests -s -v dlstats.tests.fetchers.test_insee:FetcherTestCase.test_upsert_dataset_cho_an_age
dataset_code = 'CHO-AN-AGE'
self._load_files(dataset_code, data_key="Nbre..")
self.assertProvider()
dataset = self.assertDataset(dataset_code)
series_list = self.assertSeries(dataset_code)
self.assertTrue(dataset["last_update"] >= datetime(2016, 2, 10))
self.assertEquals(series_list[0]["last_update_ds"], datetime(2016, 2, 10))
self.assertEquals(series_list[-1]["last_update_ds"], datetime(2016, 2, 12))
@httpretty.activate
@unittest.skipIf(True, "TODO")
def test_is_updated(self):
# nosetests -s -v dlstats.tests.fetchers.test_insee:FetcherTestCase.test_is_updated
"""
use dataset.series.now
"""
dataset_code = 'IPI-2010-A21'
self._load_files(dataset_code)
self.insee.upsert_dataset(dataset_code)
'''series avec un LAST_UPDATE > au dataset'''
query = {
'provider_name': self.insee.provider_name,
"dataset_code": dataset_code
}
new_datetime = datetime(2015, 12, 9)
result = self.db[constants.COL_DATASETS].update_one(query, {"$set": {'last_update': new_datetime}})
pprint(result.raw_result)
self._load_files(dataset_code)
self.insee.upsert_dataset(dataset_code)
_series = self.insee.insee_data._series
#pprint(_series)
for s in _series:
print(s['key'])
d = self.db[constants.COL_DATASETS].find_one(query)
print("dataset : ", d['last_update'])
self.assertEqual(len(_series), 11)
|
Widukind/dlstats
|
dlstats/tests/fetchers/test_insee.py
|
Python
|
agpl-3.0
| 13,284
|
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'd2w#o#(!antcw5e%(#p5*pu(x=zhw60^byh$)ps+4#e8m#-fj!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS = [
'django.contrib.sites',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'grappelli.dashboard',
'grappelli',
'django.contrib.admin',
'adminsortable2',
'ckeditor',
'ckeditor_uploader',
'compressor',
'recurrence',
'kombu.transport.django',
'watson',
'sorl.thumbnail',
'django_filters',
'antxetamedia',
'antxetamedia.frontpage',
'antxetamedia.blobs.apps.BlobsConfig',
'antxetamedia.shows',
'antxetamedia.news.apps.NewsConfig',
'antxetamedia.radio.apps.RadioConfig',
'antxetamedia.projects.apps.ProjectsConfig',
'antxetamedia.schedule',
'antxetamedia.widgets',
'antxetamedia.events.apps.EventsConfig',
'antxetamedia.flatpages',
'antxetamedia.archive',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
SITE_ID = 3
ROOT_URLCONF = 'antxetamedia.urls'
TEMPLATES = [
{
'NAME': 'Only project',
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join('antxetamedia/templates')],
'APP_DIRS': True,
'OPTIONS': {
'debug': DEBUG,
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'antxetamedia.flatpages.context_processors.menu_flatpage_list',
],
},
},
]
WSGI_APPLICATION = 'antxetamedia.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'eu'
LANGUAGES = [('eu', 'Euskara')]
TIME_ZONE = 'Europe/Paris'
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOCALE_PATHS = [os.path.join(BASE_DIR, 'antxetamedia/locale')]
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
MEDIA_ROOT = os.path.join(BASE_DIR, '.media')
MEDIA_URL = '/media/'
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, '.assets')
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'antxetamedia/static')]
STATICFILES_FINDERS = [
'compressor.finders.CompressorFinder',
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
BROKER_URL = 'django://'
CELERY_ALWAYS_EAGER = True
CELERY_TASK_SERIALIZER = 'json'
CELERY_ACCEPT_CONTENT = ['json']
COMPRESS_PRECOMPILERS = [('text/x-sass', 'django_libsass.SassCompiler')]
CKEDITOR_JQUERY_URL = os.path.join(STATIC_URL, 'bower_components/jquery/dist/jquery.min.js')
CKEDITOR_UPLOAD_PATH = 'ckeditor/'
CKEDITOR_IMAGE_BACKEND = 'pillow'
CKEDITOR_CONFIGS = {
'default': {
'extraPlugins': 'iframe,autoembed',
'toolbar': [
['Format', 'Bold', 'Italic', 'Underline', 'StrikeThrough', '-',
'NumberedList', 'BulletedList', '-', 'Outdent', 'Indent', '-', 'JustifyLeft', 'JustifyCenter',
'JustifyRight', 'JustifyBlock'],
['Image', 'Link', 'Iframe', 'Source'],
['Undo', 'Redo', '-', 'Cut', 'Copy', 'Paste', 'Find', 'Replace', '-', 'Print'],
],
}
}
GRAPPELLI_INDEX_DASHBOARD = 'antxetamedia.dashboard.AntxetamediaDashboard'
GRAPPELLI_ADMIN_TITLE = 'Antxetamedia'
GRAPPELLI_SWITCH_USER = True
GRAPPELLI_CLEAN_INPUT_TYPES = False
FORMAT_MODULE_PATH = 'antxetamedia.formats'
DATA_UPLOAD_MAX_MEMORY_SIZE = None # Disable upload size limits
FRONTPAGE_NEWSPODCASTS = 5
FRONTPAGE_RADIOPODCASTS = 10
FRONTPAGE_EVENTS = 5
NEWSCATEGORIES_COOKIE = 'newscategories'
RADIOSHOWS_COOKIE = 'radioshows'
SYNC_BLOBS = False
|
GISAElkartea/amv2
|
antxetamedia/settings.py
|
Python
|
agpl-3.0
| 4,765
|
# Copyright 2017 ACSONE SA/NV
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import models, fields, api
DIGITS = (16, 2)
class ReportJournalLedger(models.TransientModel):
_name = 'report_journal_ledger'
date_from = fields.Date(
required=True
)
date_to = fields.Date(
required=True
)
company_id = fields.Many2one(
comodel_name='res.company',
required=True,
ondelete='cascade'
)
move_target = fields.Selection(
selection='_get_move_targets',
default='all',
required=True,
)
sort_option = fields.Selection(
selection='_get_sort_options',
default='move_name',
required=True,
)
group_option = fields.Selection(
selection='_get_group_options',
default='journal',
required=True,
)
journal_ids = fields.Many2many(
comodel_name='account.journal',
required=True,
)
report_journal_ledger_ids = fields.One2many(
comodel_name='report_journal_ledger_journal',
inverse_name='report_id',
)
report_move_ids = fields.One2many(
comodel_name='report_journal_ledger_move',
inverse_name='report_id',
)
report_move_line_ids = fields.One2many(
comodel_name='report_journal_ledger_move_line',
inverse_name='report_id',
)
report_journal_ledger_tax_line_ids = fields.One2many(
comodel_name='report_journal_ledger_journal_tax_line',
inverse_name='report_id',
)
report_tax_line_ids = fields.One2many(
comodel_name='report_journal_ledger_report_tax_line',
inverse_name='report_id',
)
foreign_currency = fields.Boolean()
with_account_name = fields.Boolean()
@api.model
def _get_move_targets(self):
return self.env['journal.ledger.report.wizard']._get_move_targets()
@api.model
def _get_sort_options(self):
return self.env['journal.ledger.report.wizard']._get_sort_options()
@api.model
def _get_group_options(self):
return self.env['journal.ledger.report.wizard']._get_group_options()
@api.multi
def compute_data_for_report(self):
self.ensure_one()
self._inject_journal_values()
self._inject_move_values()
self._inject_move_line_values()
self._inject_journal_tax_values()
self._update_journal_report_total_values()
if self.group_option == 'none':
self._inject_report_tax_values()
# Refresh cache because all data are computed with SQL requests
self.invalidate_cache()
@api.multi
def _inject_journal_values(self):
self.ensure_one()
sql = """
DELETE
FROM report_journal_ledger_journal
WHERE report_id = %s
"""
params = (
self.id,
)
self.env.cr.execute(sql, params)
sql = """
INSERT INTO report_journal_ledger_journal (
create_uid,
create_date,
report_id,
journal_id,
name,
code,
company_id,
currency_id
)
SELECT
%s as create_uid,
NOW() as create_date,
%s as report_id,
aj.id as journal_id,
aj.name as name,
aj.code as code,
aj.company_id as company_id,
COALESCE(aj.currency_id, company.currency_id) as currency_id
FROM
account_journal aj
LEFT JOIN
res_company company on (company.id = aj.company_id)
WHERE
aj.id in %s
AND
aj.company_id = %s
ORDER BY
aj.name
"""
params = (
self.env.uid,
self.id,
tuple(self.journal_ids.ids),
self.company_id.id,
)
self.env.cr.execute(sql, params)
@api.multi
def _inject_move_values(self):
self.ensure_one()
sql = """
DELETE
FROM report_journal_ledger_move
WHERE report_id = %s
"""
params = (
self.id,
)
self.env.cr.execute(sql, params)
sql = self._get_inject_move_insert()
sql += self._get_inject_move_select()
sql += self._get_inject_move_where_clause()
sql += self._get_inject_move_order_by()
params = self._get_inject_move_params()
self.env.cr.execute(sql, params)
@api.multi
def _get_inject_move_insert(self):
return """
INSERT INTO report_journal_ledger_move (
create_uid,
create_date,
report_id,
report_journal_ledger_id,
move_id,
name,
company_id
)
"""
@api.multi
def _get_inject_move_select(self):
return """
SELECT
%s as create_uid,
NOW() as create_date,
rjqj.report_id as report_id,
rjqj.id as report_journal_ledger_id,
am.id as move_id,
am.name as name,
am.company_id as company_id
FROM
account_move am
INNER JOIN
report_journal_ledger_journal rjqj
on (rjqj.journal_id = am.journal_id)
"""
@api.multi
def _get_inject_move_where_clause(self):
self.ensure_one()
where_clause = """
WHERE
rjqj.report_id = %s
AND
am.date >= %s
AND
am.date <= %s
"""
if self.move_target != 'all':
where_clause += """
AND
am.state = %s
"""
return where_clause
@api.multi
def _get_inject_move_order_by(self):
self.ensure_one()
order_by = """
ORDER BY
"""
if self.sort_option == 'move_name':
order_by += " am.name"
elif self.sort_option == 'date':
order_by += " am.date, am.name"
return order_by
@api.multi
def _get_inject_move_params(self):
params = [
self.env.uid,
self.id,
self.date_from,
self.date_to
]
if self.move_target != 'all':
params.append(self.move_target)
return tuple(params)
@api.multi
def _inject_move_line_values(self):
self.ensure_one()
sql = """
DELETE
FROM report_journal_ledger_move_line
WHERE report_id = %s
"""
params = (
self.id,
)
self.env.cr.execute(sql, params)
sql = """
INSERT INTO report_journal_ledger_move_line (
create_uid,
create_date,
report_id,
report_journal_ledger_id,
report_move_id,
move_line_id,
account_id,
account,
account_code,
account_type,
partner_id,
partner,
date,
entry,
label,
debit,
credit,
company_currency_id,
amount_currency,
currency_id,
currency_name,
tax_id,
taxes_description,
company_id
)
SELECT
%s as create_uid,
NOW() as create_date,
rjqm.report_id as report_id,
rjqm.report_journal_ledger_id as report_journal_ledger_id,
rjqm.id as report_move_id,
aml.id as move_line_id,
aml.account_id as account_id,
aa.name as account,
aa.code as account_code,
aa.internal_type as account_type,
aml.partner_id as partner_id,
p.name as partner,
aml.date as date,
rjqm.name as entry,
aml.name as label,
aml.debit as debit,
aml.credit as credit,
aml.company_currency_id as currency_id,
aml.amount_currency as amount_currency,
aml.currency_id as currency_id,
currency.name as currency_name,
aml.tax_line_id as tax_id,
CASE
WHEN
aml.tax_line_id is not null
THEN
COALESCE(at.description, at.name)
WHEN
aml.tax_line_id is null
THEN
(SELECT
array_to_string(
array_agg(COALESCE(at.description, at.name)
), ', ')
FROM
account_move_line_account_tax_rel aml_at_rel
LEFT JOIN
account_tax at on (at.id = aml_at_rel.account_tax_id)
WHERE
aml_at_rel.account_move_line_id = aml.id)
ELSE
''
END as taxes_description,
aml.company_id as company_id
FROM
account_move_line aml
INNER JOIN
report_journal_ledger_move rjqm
on (rjqm.move_id = aml.move_id)
LEFT JOIN
account_account aa
on (aa.id = aml.account_id)
LEFT JOIN
res_partner p
on (p.id = aml.partner_id)
LEFT JOIN
account_tax at
on (at.id = aml.tax_line_id)
LEFT JOIN
res_currency currency
on (currency.id = aml.currency_id)
WHERE
rjqm.report_id = %s
"""
params = (
self.env.uid,
self.id,
)
self.env.cr.execute(sql, params)
@api.multi
def _inject_report_tax_values(self):
self.ensure_one()
sql_distinct_tax_id = """
SELECT
distinct(jrqjtl.tax_id)
FROM
report_journal_ledger_journal_tax_line jrqjtl
WHERE
jrqjtl.report_id = %s
"""
self.env.cr.execute(sql_distinct_tax_id, (self.id,))
rows = self.env.cr.fetchall()
tax_ids = set([row[0] for row in rows])
sql = """
INSERT INTO report_journal_ledger_report_tax_line (
create_uid,
create_date,
report_id,
tax_id,
tax_name,
tax_code,
base_debit,
base_credit,
tax_debit,
tax_credit
)
SELECT
%s as create_uid,
NOW() as create_date,
%s as report_id,
%s as tax_id,
at.name as tax_name,
at.description as tax_code,
(
SELECT sum(base_debit)
FROM report_journal_ledger_journal_tax_line jrqjtl2
WHERE jrqjtl2.report_id = %s
AND jrqjtl2.tax_id = %s
) as base_debit,
(
SELECT sum(base_credit)
FROM report_journal_ledger_journal_tax_line jrqjtl2
WHERE jrqjtl2.report_id = %s
AND jrqjtl2.tax_id = %s
) as base_credit,
(
SELECT sum(tax_debit)
FROM report_journal_ledger_journal_tax_line jrqjtl2
WHERE jrqjtl2.report_id = %s
AND jrqjtl2.tax_id = %s
) as tax_debit,
(
SELECT sum(tax_credit)
FROM report_journal_ledger_journal_tax_line jrqjtl2
WHERE jrqjtl2.report_id = %s
AND jrqjtl2.tax_id = %s
) as tax_credit
FROM
report_journal_ledger_journal_tax_line jrqjtl
LEFT JOIN
account_tax at
on (at.id = jrqjtl.tax_id)
WHERE
jrqjtl.report_id = %s
AND
jrqjtl.tax_id = %s
"""
for tax_id in tax_ids:
params = (
self.env.uid,
self.id,
tax_id,
self.id,
tax_id,
self.id,
tax_id,
self.id,
tax_id,
self.id,
tax_id,
self.id,
tax_id,
)
self.env.cr.execute(sql, params)
@api.multi
def _inject_journal_tax_values(self):
self.ensure_one()
sql = """
DELETE
FROM report_journal_ledger_journal_tax_line
WHERE report_id = %s
"""
params = (
self.id,
)
self.env.cr.execute(sql, params)
sql_distinct_tax_id = """
SELECT
distinct(jrqml.tax_id)
FROM
report_journal_ledger_move_line jrqml
WHERE
jrqml.report_journal_ledger_id = %s
"""
tax_ids_by_journal_id = {}
for report_journal in self.report_journal_ledger_ids:
if report_journal.id not in tax_ids_by_journal_id:
tax_ids_by_journal_id[report_journal.id] = []
self.env.cr.execute(sql_distinct_tax_id, (report_journal.id,))
rows = self.env.cr.fetchall()
tax_ids_by_journal_id[report_journal.id].extend([
row[0] for row in rows if row[0]
])
sql = """
INSERT INTO report_journal_ledger_journal_tax_line (
create_uid,
create_date,
report_id,
report_journal_ledger_id,
tax_id,
tax_name,
tax_code,
base_debit,
base_credit,
tax_debit,
tax_credit
)
SELECT
%s as create_uid,
NOW() as create_date,
%s as report_id,
%s as report_journal_ledger_id,
%s as tax_id,
at.name as tax_name,
at.description as tax_code,
(
SELECT sum(debit)
FROM report_journal_ledger_move_line jrqml2
WHERE jrqml2.report_journal_ledger_id = %s
AND (
SELECT
count(*)
FROM
account_move_line_account_tax_rel aml_at_rel
WHERE
aml_at_rel.account_move_line_id =
jrqml2.move_line_id
AND
aml_at_rel.account_tax_id = %s
) > 0
) as base_debit,
(
SELECT sum(credit)
FROM report_journal_ledger_move_line jrqml2
WHERE jrqml2.report_journal_ledger_id = %s
AND (
SELECT
count(*)
FROM
account_move_line_account_tax_rel aml_at_rel
WHERE
aml_at_rel.account_move_line_id =
jrqml2.move_line_id
AND
aml_at_rel.account_tax_id = %s
) > 0
) as base_credit,
(
SELECT sum(debit)
FROM report_journal_ledger_move_line jrqml2
WHERE jrqml2.report_journal_ledger_id = %s
AND jrqml2.tax_id = %s
) as tax_debit,
(
SELECT sum(credit)
FROM report_journal_ledger_move_line jrqml2
WHERE jrqml2.report_journal_ledger_id = %s
AND jrqml2.tax_id = %s
) as tax_credit
FROM
report_journal_ledger_journal rjqj
LEFT JOIN
account_tax at
on (at.id = %s)
WHERE
rjqj.id = %s
"""
for report_journal_ledger_id in tax_ids_by_journal_id:
tax_ids = tax_ids_by_journal_id[report_journal_ledger_id]
for tax_id in tax_ids:
params = (
self.env.uid,
self.id,
report_journal_ledger_id,
tax_id,
report_journal_ledger_id,
tax_id,
report_journal_ledger_id,
tax_id,
report_journal_ledger_id,
tax_id,
report_journal_ledger_id,
tax_id,
tax_id,
report_journal_ledger_id,
)
self.env.cr.execute(sql, params)
@api.multi
def _update_journal_report_total_values(self):
self.ensure_one()
sql = """
UPDATE
report_journal_ledger_journal rjqj
SET
debit = (
SELECT sum(rjqml.debit)
FROM report_journal_ledger_move_line rjqml
WHERE rjqml.report_journal_ledger_id = rjqj.id
),
credit = (
SELECT sum(rjqml.credit)
FROM report_journal_ledger_move_line rjqml
WHERE rjqml.report_journal_ledger_id = rjqj.id
)
WHERE
rjqj.report_id = %s
"""
self.env.cr.execute(sql, (self.id,))
@api.multi
def print_report(self, report_type):
self.ensure_one()
if report_type == 'xlsx':
report_name = 'a_f_r.report_journal_ledger_xlsx'
else:
report_name = 'account_financial_report.' \
'report_journal_ledger_qweb'
return self.env['ir.actions.report'].search(
[('report_name', '=', report_name),
('report_type', '=', report_type)], limit=1).report_action(self)
def _get_html(self):
result = {}
rcontext = {}
context = dict(self.env.context)
report = self.browse(context.get('active_id'))
if report:
rcontext['o'] = report
result['html'] = self.env.ref(
'account_financial_report.report_journal_ledger').render(
rcontext)
return result
@api.model
def get_html(self, given_context=None):
return self._get_html()
class ReportJournalLedgerJournal(models.TransientModel):
_name = 'report_journal_ledger_journal'
name = fields.Char(
required=True,
)
code = fields.Char()
report_id = fields.Many2one(
comodel_name='report_journal_ledger',
required=True,
ondelete='cascade'
)
journal_id = fields.Many2one(
comodel_name='account.journal',
required=True,
ondelete='cascade',
)
report_move_ids = fields.One2many(
comodel_name='report_journal_ledger_move',
inverse_name='report_journal_ledger_id',
)
report_tax_line_ids = fields.One2many(
comodel_name='report_journal_ledger_journal_tax_line',
inverse_name='report_journal_ledger_id',
)
debit = fields.Float(
digits=DIGITS,
)
credit = fields.Float(
digits=DIGITS,
)
company_id = fields.Many2one(
comodel_name='res.company',
required=True,
ondelete='cascade'
)
currency_id = fields.Many2one(
comodel_name='res.currency',
)
class ReportJournalLedgerMove(models.TransientModel):
_name = 'report_journal_ledger_move'
report_id = fields.Many2one(
comodel_name='report_journal_ledger',
required=True,
ondelete='cascade'
)
report_journal_ledger_id = fields.Many2one(
comodel_name='report_journal_ledger_journal',
required=True,
ondelete='cascade',
)
move_id = fields.Many2one(
comodel_name='account.move',
required=True,
ondelete='cascade',
)
report_move_line_ids = fields.One2many(
comodel_name='report_journal_ledger_move_line',
inverse_name='report_move_id',
)
name = fields.Char()
company_id = fields.Many2one(
comodel_name='res.company',
required=True,
ondelete='cascade'
)
class ReportJournalLedgerMoveLine(models.TransientModel):
_name = 'report_journal_ledger_move_line'
_order = 'partner_id desc, account_id desc'
report_id = fields.Many2one(
comodel_name='report_journal_ledger',
required=True,
ondelete='cascade'
)
report_journal_ledger_id = fields.Many2one(
comodel_name='report_journal_ledger_journal',
required=True,
ondelete='cascade',
)
report_move_id = fields.Many2one(
comodel_name='report_journal_ledger_move',
required=True,
ondelete='cascade',
)
move_line_id = fields.Many2one(
comodel_name='account.move.line',
required=True,
ondelete='cascade',
)
account_id = fields.Many2one(
comodel_name='account.account'
)
account = fields.Char()
account_code = fields.Char()
account_type = fields.Char()
partner = fields.Char()
partner_id = fields.Many2one(
comodel_name='res.partner',
)
date = fields.Date()
entry = fields.Char()
label = fields.Char()
debit = fields.Float(
digits=DIGITS,
)
credit = fields.Float(
digits=DIGITS,
)
company_currency_id = fields.Many2one(
comodel_name='res.currency',
)
amount_currency = fields.Monetary(
currency_field='currency_id',
)
currency_id = fields.Many2one(
comodel_name='res.currency',
)
currency_name = fields.Char()
taxes_description = fields.Char()
tax_id = fields.Many2one(
comodel_name='account.tax'
)
company_id = fields.Many2one(
comodel_name='res.company',
required=True,
ondelete='cascade'
)
class ReportJournalLedgerReportTaxLine(models.TransientModel):
_name = 'report_journal_ledger_report_tax_line'
_order = 'tax_code'
report_id = fields.Many2one(
comodel_name='report_journal_ledger',
required=True,
ondelete='cascade'
)
tax_id = fields.Many2one(
comodel_name='account.tax'
)
tax_name = fields.Char()
tax_code = fields.Char()
base_debit = fields.Float(
digits=DIGITS,
)
base_credit = fields.Float(
digits=DIGITS,
)
base_balance = fields.Float(
digits=DIGITS,
compute='_compute_base_balance',
)
tax_debit = fields.Float(
digits=DIGITS,
)
tax_credit = fields.Float(
digits=DIGITS,
)
tax_balance = fields.Float(
digits=DIGITS,
compute='_compute_tax_balance'
)
@api.multi
def _compute_base_balance(self):
for rec in self:
rec.base_balance = rec.base_debit - rec.base_credit
@api.multi
def _compute_tax_balance(self):
for rec in self:
rec.tax_balance = rec.tax_debit - rec.tax_credit
class ReportJournalLedgerJournalTaxLine(models.TransientModel):
_name = 'report_journal_ledger_journal_tax_line'
_inherit = 'report_journal_ledger_report_tax_line'
_order = 'tax_code'
report_journal_ledger_id = fields.Many2one(
comodel_name='report_journal_ledger_journal',
required=True,
ondelete='cascade',
)
|
BT-astauder/account-financial-reporting
|
account_financial_report/report/journal_ledger.py
|
Python
|
agpl-3.0
| 24,510
|
from django.apps import AppConfig
class ChangeanalysisConfig(AppConfig):
name = 'diffanalysis'
|
Cartocite/osmada
|
diffanalysis/apps.py
|
Python
|
agpl-3.0
| 101
|
#!/usr/bin/env python3
from tja_info import *
from sys import argv, stderr
if __name__ == '__main__':
info = None
parse_level = 3
try:
parse_level = TJAInfo.parse_course(argv[2])
except:
pass
for codec in ['utf-8', 'shift-jis', 'gbk']:
try:
with open(argv[1], encoding='gbk') as d:
info = TJAInfo(d.read())
break
except:
pass
if not info:
print('Could not parse', argv[1], file=stderr)
exit(1)
print(f'Parse level {parse_level} of {argv[1]}', file=stderr)
headers = {
'#PLAYER': '1',
'#RANK': '3',
'#DIFFICULTY': '4',
'#STAGEFILE': '',
'#GENRE': info.headers['SUBTITLE'],
'#TITLE': '[TJA] ' + info.headers['TITLE'],
'#ARTIST': 'TJA',
'#BPM': info.headers['BPM'],
'#PLAYLEVEL': info.headers['LEVELS'][3],
'#WAV02': 'out.wav',
'#WAVDD': 'dong.wav',
'#WAVCC': 'ka.wav',
}
print('*---------------------- HEADER FIELD')
for k, v in headers.items():
print(k, v)
print('\n*---------------------- MAIN DATA FIELD\n#00001:02\n')
section_seconds = 4 * (60 / float(info.headers['BPM']))
measure_seconds = section_seconds / 192
stop_count = round(-float(info.headers['OFFSET']) / measure_seconds) - (2 * 192)
print(f'#STOP01 {stop_count}')
print(f'#00009:01')
small_notes_counter = 0
for s_num, s in enumerate(info.beatmaps[parse_level]):
s_num = s_num + 2
notes = tuple(filter(lambda o: isinstance(o, NoteTypes), s))
rr_notes = ['00'] * len(notes)
rl_notes = ['00'] * len(notes)
br_notes = ['00'] * len(notes)
bl_notes = ['00'] * len(notes)
for t, n in enumerate(notes):
if n == NoteTypes.BIG_RED:
rr_notes[t], rl_notes[t] = 'DD', 'DD'
elif n == NoteTypes.BIG_BLUE:
br_notes[t], bl_notes[t] = 'CC', 'CC'
else:
sel_notes = None
if n == NoteTypes.RED:
sel_notes = rr_notes if small_notes_counter % 2 == 0 else rl_notes
elif n == NoteTypes.BLUE:
sel_notes = br_notes if small_notes_counter % 2 == 0 else bl_notes
else:
continue
sel_notes[t] = 'DD' if n == NoteTypes.RED else 'CC'
small_notes_counter += 1
m = {12: bl_notes, 13: rl_notes, 15: rr_notes, 18: br_notes}
# print(m)
for channel, ch_notes in m.items():
if not len(ch_notes) or all(map(lambda n: n == '00', ch_notes)):
continue
print('#{:03d}{}:{}'.format(s_num, channel, ''.join(ch_notes)))
# print(s_num, notes, file=stderr)
current_measure = 1
bpm_change_counter = 1
for s_num, s in enumerate(info.beatmaps[parse_level]):
s_num = s_num + 2
non_notes = tuple(filter(lambda o: not isinstance(o, NoteTypes), s))
measures = tuple(filter(lambda o: isinstance(o, Measure), non_notes))
if len(measures):
current_measure = measures[0].value.numerator / measures[0].value.denominator
if current_measure != 1:
print('#{:03d}02:{}'.format(s_num, current_measure))
bpm_changes = tuple(filter(lambda o: isinstance(o, BPMChange), non_notes))
bpm_channel_notes = []
for c in bpm_changes:
print('#BPM{:02d}:{}'.format(bpm_change_counter, c.new_bpm))
bpm_channel_notes.append('{:02d}'.format(bpm_change_counter))
bpm_change_counter += 1
if bpm_channel_notes:
print('#{:03d}08:{}'.format(s_num, ''.join(bpm_channel_notes)))
|
Saren-Arterius/textage2bms
|
tja2bms.py
|
Python
|
agpl-3.0
| 3,766
|
# Copyright 2012 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Figure out server address for the maas_url setting."""
from __future__ import (
absolute_import,
print_function,
unicode_literals,
)
str = None
__metaclass__ = type
__all__ = [
'guess_server_address',
]
from fcntl import ioctl
from os import environ
import re
import socket
import struct
from subprocess import check_output
from metadataserver import logger
# fcntl operation as defined in <ioctls.h>. This is GNU/Linux-specific!
SIOCGIFADDR = 0x8915
def get_command_output(*command_line):
"""Execute a command line, and return its output.
Raises an exception if return value is nonzero.
:param *command_line: Words for the command line. No shell expansions
are performed.
:type *command_line: Sequence of unicode.
:return: Output from the command.
:rtype: List of unicode, one per line.
"""
env = {
variable: value
for variable, value in environ.items()
if not variable.startswith('LC_')
}
env.update({
'LC_ALL': 'C',
'LANG': 'en_US.UTF-8',
})
return check_output(command_line, env=env).splitlines()
def find_default_interface(ip_route_output):
"""Find the network interface used for the system's default route.
If no default is found, makes a guess.
:param ip_route_output: Output lines from "ip route show" output.
:type ip_route_output: Sequence of unicode.
:return: unicode, or None.
"""
route_lines = list(ip_route_output)
for line in route_lines:
match = re.match('default\s+.*\sdev\s+(\w+)', line)
if match is not None:
return match.groups()[0]
# Still nothing? Try the first recognizable interface in the list.
for line in route_lines:
match = re.match('\s*(?:\S+\s+)*dev\s+(\w+)', line)
if match is not None:
return match.groups()[0]
return None
def get_ip_address(interface):
"""Get the IP address for a given network interface."""
# Apparently the netifaces module would do this for us.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
interface_name = struct.pack(b'256s', interface[:15])
try:
info = ioctl(s.fileno(), SIOCGIFADDR, interface_name)
except IOError as e:
logger.warn(
"Could not determine address for apparent default interface %s "
"(%s)"
% (interface, e))
return None
return socket.inet_ntoa(info[20:24])
def guess_server_address():
"""Make a guess as to this server's IP address."""
ip_route_output = get_command_output(
'/bin/ip', '-oneline', 'route', 'show')
interface = find_default_interface(ip_route_output)
if interface is None:
return socket.gethostname()
else:
return get_ip_address(interface)
|
cloudbase/maas
|
src/metadataserver/address.py
|
Python
|
agpl-3.0
| 2,953
|
# Copyright © 2017 Tom Hacohen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, version 3.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.apps import AppConfig
class JournalConfig(AppConfig):
name = 'journal'
|
etesync/journal-manager
|
journal/apps.py
|
Python
|
agpl-3.0
| 700
|
from paste.deploy.converters import asbool
from pylons import tmpl_context as c, config
from adhocracy.lib.auth.authorization import has
from adhocracy.lib.auth.authorization import NOT_LOGGED_IN
def index(check):
check.perm('user.view')
def show(check, u):
check.perm('user.view')
check.other('user_deleted', u.is_deleted())
def create(check):
check.other('user_logged_in', c.user is not None)
def edit(check, u):
if has('user.manage'):
return
show(check, u)
check.other('user_not_self', u != c.user)
check.other(NOT_LOGGED_IN, not c.user)
def manage(check, u):
check.perm('user.manage')
def message(check, u):
check.perm('user.message')
check.other('user_is_self', u == c.user)
check.other('user_without_email', u.email is None)
def supervise(check, u):
check.other('not_in_instance', not c.instance)
check.other('no_member_in_instance', not u.is_member(c.instance))
check.other('not_user.manage_or_instance.admin',
not (has('user.manage') or has('instance.admin')))
def show_dashboard(check, u):
show(check, u)
check.other('user_not_self', u != c.user)
show_watchlist = show_dashboard
def delete(check, u):
edit(check, u)
allowed = asbool(config.get('adhocracy.self_deletion_allowed', 'true'))
check.other('self_deletion_allowed', not allowed)
def vote(check):
check.other('vote_prohibited', has('vote.prohibit'))
check.other('not_in_instance', not c.instance)
check.other(NOT_LOGGED_IN, not c.user)
check.perm('vote.cast')
|
SysTheron/adhocracy
|
src/adhocracy/lib/auth/user.py
|
Python
|
agpl-3.0
| 1,569
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014-present Taiga Agile LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core.management.base import BaseCommand
from django.db.models import Q
from taiga.importers.trello.importer import TrelloImporter
from taiga.users.models import User
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('--token', dest="token", type=str,
help='Auth token')
parser.add_argument('--project-id', dest="project_id", type=str,
help='Project ID or full name (ex: taigaio/taiga-back)')
parser.add_argument('--template', dest='template', default="kanban",
help='template to use: scrum or kanban (default kanban)')
parser.add_argument('--ask-for-users', dest='ask_for_users', const=True,
action="store_const", default=False,
help='Import closed data')
parser.add_argument('--closed-data', dest='closed_data', const=True,
action="store_const", default=False,
help='Import closed data')
parser.add_argument('--keep-external-reference', dest='keep_external_reference', const=True,
action="store_const", default=False,
help='Store external reference of imported data')
def handle(self, *args, **options):
admin = User.objects.get(username="admin")
if options.get('token', None):
token = options.get('token')
else:
(oauth_token, oauth_token_secret, url) = TrelloImporter.get_auth_url()
print("Go to here and come with your token: {}".format(url))
oauth_verifier = input("Code: ")
access_data = TrelloImporter.get_access_token(oauth_token, oauth_token_secret, oauth_verifier)
token = access_data['oauth_token']
print("Access token: {}".format(token))
importer = TrelloImporter(admin, token)
if options.get('project_id', None):
project_id = options.get('project_id')
else:
print("Select the project to import:")
for project in importer.list_projects():
print("- {}: {}".format(project['id'], project['name']))
project_id = input("Project id: ")
users_bindings = {}
if options.get('ask_for_users', None):
print("Add the username or email for next trello users:")
for user in importer.list_users(project_id):
while True:
username_or_email = input("{}: ".format(user['fullName']))
if username_or_email == "":
break
try:
users_bindings[user['id']] = User.objects.get(Q(username=username_or_email) | Q(email=username_or_email))
break
except User.DoesNotExist:
print("ERROR: Invalid username or email")
options = {
"template": options.get('template'),
"import_closed_data": options.get("closed_data", False),
"users_bindings": users_bindings,
"keep_external_reference": options.get('keep_external_reference')
}
importer.import_project(project_id, options)
|
taigaio/taiga-back
|
taiga/importers/management/commands/import_from_trello.py
|
Python
|
agpl-3.0
| 4,032
|
# Migmig Constant variables
# Usually, constants are defined at module level ...
OK = '200'
DONE = '199'
UNKNOWN = '666'
UNKNOWN_HEADER = '667'
BAD_IDENTIFIER = '97'
BAD_URL = '98'
NOT_FOUND = '404'
RANGE_NOT_SUPPORTED = '198'
|
dotamin/migmig
|
server/constants.py
|
Python
|
agpl-3.0
| 232
|
import ConfigParser
import smtplib, email, email.encoders, email.mime.text, email.mime.base
import string
config = ConfigParser.ConfigParser()
config.read('/etc/chronojump.conf')
def user_comments_from_log(log):
user_comment_separator = "----------"
if log.startswith(user_comment_separator):
text = log.split(user_comment_separator)[1]
return "%s\n%s\n%s" % (user_comment_separator, text, user_comment_separator)
return ""
def metadata_to_dictionary(metadata):
"""
Given metadata like "1.5.2.0-email_del_usuari2-o-no@usuari.com"
returns {'version':'1.5.2.0', 'email':'email_del_usuari2-o-no@usuari.com'}
"""
m = {}
m['version'] = metadata.split("-", 1)[0]
m['email'] = metadata.split("-",1)[-1]
return m
def send(metadata_str, filename, attachment_data):
metadata = metadata_to_dictionary(metadata_str)
d = {'user_comments':user_comments_from_log(attachment_data)}
body = string.Template("""Hola,
Hi ha un nou error log al servidor.
$user_comments
El Servidor
""").substitute(d)
emailMsg = email.MIMEMultipart.MIMEMultipart()
emailMsg['Subject'] = "Crash log - %s - %s" % (metadata['version'],
metadata['email'])
emailMsg['From'] = config.get("notification_emails", "from")
emailMsg['To'] = config.get("notification_emails", "to")
emailMsg.attach(email.mime.Text.MIMEText(body))
# Now attach the file
fileMsg = email.mime.base.MIMEBase('application','octet/stream')
fileMsg.set_payload(attachment_data)
email.encoders.encode_base64(fileMsg)
fileMsg.add_header('Content-Disposition','attachment;filename="%s"' % (filename))
emailMsg.attach(fileMsg)
server = smtplib.SMTP('localhost')
server.sendmail(config.get("notification_emails", "from"),
config.get("notification_emails", "to"),
emailMsg.as_string())
if __name__ == "__main__":
data = """----------
This is a comment
from the user
----------
this is a log
from C# :-)
"""
send("1.5.2.0-email_del_usuari2@usuari.com", "this is the file.txt", data)
|
GNOME/chronojump-server
|
email_error_log.py
|
Python
|
agpl-3.0
| 2,145
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import base64
import datetime
import dateutil
import email
try:
import simplejson as json
except ImportError:
import json
from lxml import etree
import logging
import pytz
import time
import xmlrpclib
from email.message import Message
from openerp import tools
from openerp import SUPERUSER_ID
from openerp.addons.mail.mail_message import decode
from openerp.osv import fields, osv, orm
from openerp.osv.orm import browse_record, browse_null
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
def decode_header(message, header, separator=' '):
return separator.join(map(decode, filter(None, message.get_all(header, []))))
class mail_thread(osv.AbstractModel):
''' mail_thread model is meant to be inherited by any model that needs to
act as a discussion topic on which messages can be attached. Public
methods are prefixed with ``message_`` in order to avoid name
collisions with methods of the models that will inherit from this class.
``mail.thread`` defines fields used to handle and display the
communication history. ``mail.thread`` also manages followers of
inheriting classes. All features and expected behavior are managed
by mail.thread. Widgets has been designed for the 7.0 and following
versions of OpenERP.
Inheriting classes are not required to implement any method, as the
default implementation will work for any model. However it is common
to override at least the ``message_new`` and ``message_update``
methods (calling ``super``) to add model-specific behavior at
creation and update of a thread when processing incoming emails.
Options:
- _mail_flat_thread: if set to True, all messages without parent_id
are automatically attached to the first message posted on the
ressource. If set to False, the display of Chatter is done using
threads, and no parent_id is automatically set.
'''
_name = 'mail.thread'
_description = 'Email Thread'
_mail_flat_thread = True
_mail_post_access = 'write'
# Automatic logging system if mail installed
# _track = {
# 'field': {
# 'module.subtype_xml': lambda self, cr, uid, obj, context=None: obj[state] == done,
# 'module.subtype_xml2': lambda self, cr, uid, obj, context=None: obj[state] != done,
# },
# 'field2': {
# ...
# },
# }
# where
# :param string field: field name
# :param module.subtype_xml: xml_id of a mail.message.subtype (i.e. mail.mt_comment)
# :param obj: is a browse_record
# :param function lambda: returns whether the tracking should record using this subtype
_track = {}
def get_empty_list_help(self, cr, uid, help, context=None):
""" Override of BaseModel.get_empty_list_help() to generate an help message
that adds alias information. """
model = context.get('empty_list_help_model')
res_id = context.get('empty_list_help_id')
ir_config_parameter = self.pool.get("ir.config_parameter")
catchall_domain = ir_config_parameter.get_param(cr, uid, "mail.catchall.domain", context=context)
document_name = context.get('empty_list_help_document_name', _('document'))
alias = None
if catchall_domain and model and res_id: # specific res_id -> find its alias (i.e. section_id specified)
object_id = self.pool.get(model).browse(cr, uid, res_id, context=context)
# check that the alias effectively creates new records
if object_id.alias_id and object_id.alias_id.alias_name and \
object_id.alias_id.alias_model_id and \
object_id.alias_id.alias_model_id.model == self._name and \
object_id.alias_id.alias_force_thread_id == 0:
alias = object_id.alias_id
elif catchall_domain and model: # no specific res_id given -> generic help message, take an example alias (i.e. alias of some section_id)
alias_obj = self.pool.get('mail.alias')
alias_ids = alias_obj.search(cr, uid, [("alias_parent_model_id.model", "=", model), ("alias_name", "!=", False), ('alias_force_thread_id', '=', False)], context=context, order='id ASC')
if alias_ids and len(alias_ids) == 1:
alias = alias_obj.browse(cr, uid, alias_ids[0], context=context)
if alias:
alias_email = alias.name_get()[0][1]
return _("""<p class='oe_view_nocontent_create'>
Click here to add new %(document)s or send an email to: <a href='mailto:%(email)s'>%(email)s</a>
</p>
%(static_help)s"""
) % {
'document': document_name,
'email': alias_email,
'static_help': help or ''
}
if document_name != 'document' and help and help.find("oe_view_nocontent_create") == -1:
return _("<p class='oe_view_nocontent_create'>Click here to add new %(document)s</p>%(static_help)s") % {
'document': document_name,
'static_help': help or '',
}
return help
def _get_message_data(self, cr, uid, ids, name, args, context=None):
""" Computes:
- message_unread: has uid unread message for the document
- message_summary: html snippet summarizing the Chatter for kanban views """
res = dict((id, dict(message_unread=False, message_unread_count=0, message_summary=' ')) for id in ids)
user_pid = self.pool.get('res.users').read(cr, uid, uid, ['partner_id'], context=context)['partner_id'][0]
# search for unread messages, directly in SQL to improve performances
cr.execute(""" SELECT m.res_id FROM mail_message m
RIGHT JOIN mail_notification n
ON (n.message_id = m.id AND n.partner_id = %s AND (n.read = False or n.read IS NULL))
WHERE m.model = %s AND m.res_id in %s""",
(user_pid, self._name, tuple(ids),))
for result in cr.fetchall():
res[result[0]]['message_unread'] = True
res[result[0]]['message_unread_count'] += 1
for id in ids:
if res[id]['message_unread_count']:
title = res[id]['message_unread_count'] > 1 and _("You have %d unread messages") % res[id]['message_unread_count'] or _("You have one unread message")
res[id]['message_summary'] = "<span class='oe_kanban_mail_new' title='%s'><span class='oe_e'>9</span> %d %s</span>" % (title, res[id].pop('message_unread_count'), _("New"))
return res
def read_followers_data(self, cr, uid, follower_ids, context=None):
result = []
technical_group = self.pool.get('ir.model.data').get_object(cr, uid, 'base', 'group_no_one', context=context)
for follower in self.pool.get('res.partner').browse(cr, uid, follower_ids, context=context):
is_editable = uid in map(lambda x: x.id, technical_group.users)
is_uid = uid in map(lambda x: x.id, follower.user_ids)
data = (follower.id,
follower.name,
{'is_editable': is_editable, 'is_uid': is_uid},
)
result.append(data)
return result
def _get_subscription_data(self, cr, uid, ids, name, args, user_pid=None, context=None):
""" Computes:
- message_subtype_data: data about document subtypes: which are
available, which are followed if any """
res = dict((id, dict(message_subtype_data='')) for id in ids)
if user_pid is None:
user_pid = self.pool.get('res.users').read(cr, uid, uid, ['partner_id'], context=context)['partner_id'][0]
# find current model subtypes, add them to a dictionary
subtype_obj = self.pool.get('mail.message.subtype')
subtype_ids = subtype_obj.search(cr, uid, ['|', ('res_model', '=', self._name), ('res_model', '=', False)], context=context)
subtype_dict = dict((subtype.name, dict(default=subtype.default, followed=False, id=subtype.id)) for subtype in subtype_obj.browse(cr, uid, subtype_ids, context=context))
for id in ids:
res[id]['message_subtype_data'] = subtype_dict.copy()
# find the document followers, update the data
fol_obj = self.pool.get('mail.followers')
fol_ids = fol_obj.search(cr, uid, [
('partner_id', '=', user_pid),
('res_id', 'in', ids),
('res_model', '=', self._name),
], context=context)
for fol in fol_obj.browse(cr, uid, fol_ids, context=context):
thread_subtype_dict = res[fol.res_id]['message_subtype_data']
for subtype in fol.subtype_ids:
thread_subtype_dict[subtype.name]['followed'] = True
res[fol.res_id]['message_subtype_data'] = thread_subtype_dict
return res
def _search_message_unread(self, cr, uid, obj=None, name=None, domain=None, context=None):
return [('message_ids.to_read', '=', True)]
def _get_followers(self, cr, uid, ids, name, arg, context=None):
fol_obj = self.pool.get('mail.followers')
fol_ids = fol_obj.search(cr, SUPERUSER_ID, [('res_model', '=', self._name), ('res_id', 'in', ids)])
res = dict((id, dict(message_follower_ids=[], message_is_follower=False)) for id in ids)
user_pid = self.pool.get('res.users').read(cr, uid, uid, ['partner_id'], context=context)['partner_id'][0]
for fol in fol_obj.browse(cr, SUPERUSER_ID, fol_ids):
res[fol.res_id]['message_follower_ids'].append(fol.partner_id.id)
if fol.partner_id.id == user_pid:
res[fol.res_id]['message_is_follower'] = True
return res
def _set_followers(self, cr, uid, id, name, value, arg, context=None):
if not value:
return
partner_obj = self.pool.get('res.partner')
fol_obj = self.pool.get('mail.followers')
# read the old set of followers, and determine the new set of followers
fol_ids = fol_obj.search(cr, SUPERUSER_ID, [('res_model', '=', self._name), ('res_id', '=', id)])
old = set(fol.partner_id.id for fol in fol_obj.browse(cr, SUPERUSER_ID, fol_ids))
new = set(old)
for command in value or []:
if isinstance(command, (int, long)):
new.add(command)
elif command[0] == 0:
new.add(partner_obj.create(cr, uid, command[2], context=context))
elif command[0] == 1:
partner_obj.write(cr, uid, [command[1]], command[2], context=context)
new.add(command[1])
elif command[0] == 2:
partner_obj.unlink(cr, uid, [command[1]], context=context)
new.discard(command[1])
elif command[0] == 3:
new.discard(command[1])
elif command[0] == 4:
new.add(command[1])
elif command[0] == 5:
new.clear()
elif command[0] == 6:
new = set(command[2])
# remove partners that are no longer followers
self.message_unsubscribe(cr, uid, [id], list(old-new), context=context)
# add new followers
self.message_subscribe(cr, uid, [id], list(new-old), context=context)
def _search_followers(self, cr, uid, obj, name, args, context):
"""Search function for message_follower_ids
Do not use with operator 'not in'. Use instead message_is_followers
"""
fol_obj = self.pool.get('mail.followers')
res = []
for field, operator, value in args:
assert field == name
# TOFIX make it work with not in
assert operator != "not in", "Do not search message_follower_ids with 'not in'"
fol_ids = fol_obj.search(cr, SUPERUSER_ID, [('res_model', '=', self._name), ('partner_id', operator, value)])
res_ids = [fol.res_id for fol in fol_obj.browse(cr, SUPERUSER_ID, fol_ids)]
res.append(('id', 'in', res_ids))
return res
def _search_is_follower(self, cr, uid, obj, name, args, context):
"""Search function for message_is_follower"""
res = []
for field, operator, value in args:
assert field == name
partner_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).partner_id.id
if (operator == '=' and value) or (operator == '!=' and not value): # is a follower
res_ids = self.search(cr, uid, [('message_follower_ids', 'in', [partner_id])], context=context)
else: # is not a follower or unknown domain
mail_ids = self.search(cr, uid, [('message_follower_ids', 'in', [partner_id])], context=context)
res_ids = self.search(cr, uid, [('id', 'not in', mail_ids)], context=context)
res.append(('id', 'in', res_ids))
return res
_columns = {
'message_is_follower': fields.function(_get_followers, type='boolean',
fnct_search=_search_is_follower, string='Is a Follower', multi='_get_followers,'),
'message_follower_ids': fields.function(_get_followers, fnct_inv=_set_followers,
fnct_search=_search_followers, type='many2many', priority=-10,
obj='res.partner', string='Followers', multi='_get_followers'),
'message_ids': fields.one2many('mail.message', 'res_id',
domain=lambda self: [('model', '=', self._name)],
auto_join=True,
string='Messages',
help="Messages and communication history"),
'message_unread': fields.function(_get_message_data,
fnct_search=_search_message_unread, multi="_get_message_data",
type='boolean', string='Unread Messages',
help="If checked new messages require your attention."),
'message_summary': fields.function(_get_message_data, method=True,
type='text', string='Summary', multi="_get_message_data",
help="Holds the Chatter summary (number of messages, ...). "\
"This summary is directly in html format in order to "\
"be inserted in kanban views."),
}
def _get_user_chatter_options(self, cr, uid, context=None):
options = {
'display_log_button': False
}
group_ids = self.pool.get('res.users').browse(cr, uid, uid, context=context).groups_id
group_user_id = self.pool.get("ir.model.data").get_object_reference(cr, uid, 'base', 'group_user')[1]
is_employee = group_user_id in [group.id for group in group_ids]
if is_employee:
options['display_log_button'] = True
return options
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
res = super(mail_thread, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu)
if view_type == 'form':
doc = etree.XML(res['arch'])
for node in doc.xpath("//field[@name='message_ids']"):
options = json.loads(node.get('options', '{}'))
options.update(self._get_user_chatter_options(cr, uid, context=context))
node.set('options', json.dumps(options))
res['arch'] = etree.tostring(doc)
return res
#------------------------------------------------------
# CRUD overrides for automatic subscription and logging
#------------------------------------------------------
def create(self, cr, uid, values, context=None):
""" Chatter override :
- subscribe uid
- subscribe followers of parent
- log a creation message
"""
if context is None:
context = {}
# subscribe uid unless asked not to
if not context.get('mail_create_nosubscribe'):
pid = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid).partner_id.id
message_follower_ids = values.get('message_follower_ids') or [] # webclient can send None or False
message_follower_ids.append([4, pid])
values['message_follower_ids'] = message_follower_ids
# add operation to ignore access rule checking for subscription
context_operation = dict(context, operation='create')
else:
context_operation = context
thread_id = super(mail_thread, self).create(cr, uid, values, context=context_operation)
# automatic logging unless asked not to (mainly for various testing purpose)
if not context.get('mail_create_nolog'):
self.message_post(cr, uid, thread_id, body=_('%s created') % (self._description), context=context)
# auto_subscribe: take values and defaults into account
create_values = dict(values)
for key, val in context.iteritems():
if key.startswith('default_'):
create_values[key[8:]] = val
self.message_auto_subscribe(cr, uid, [thread_id], create_values.keys(), context=context, values=create_values)
# track values
track_ctx = dict(context)
if 'lang' not in track_ctx:
track_ctx['lang'] = self.pool.get('res.users').browse(cr, uid, uid, context=context).lang
tracked_fields = self._get_tracked_fields(cr, uid, values.keys(), context=track_ctx)
if tracked_fields:
initial_values = {thread_id: dict((item, False) for item in tracked_fields)}
self.message_track(cr, uid, [thread_id], tracked_fields, initial_values, context=track_ctx)
return thread_id
def write(self, cr, uid, ids, values, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
# Track initial values of tracked fields
track_ctx = dict(context)
if 'lang' not in track_ctx:
track_ctx['lang'] = self.pool.get('res.users').browse(cr, uid, uid, context=context).lang
tracked_fields = self._get_tracked_fields(cr, uid, values.keys(), context=track_ctx)
if tracked_fields:
records = self.browse(cr, uid, ids, context=track_ctx)
initial_values = dict((this.id, dict((key, getattr(this, key)) for key in tracked_fields.keys())) for this in records)
# Perform write, update followers
result = super(mail_thread, self).write(cr, uid, ids, values, context=context)
self.message_auto_subscribe(cr, uid, ids, values.keys(), context=context, values=values)
# Perform the tracking
if tracked_fields:
self.message_track(cr, uid, ids, tracked_fields, initial_values, context=track_ctx)
return result
def unlink(self, cr, uid, ids, context=None):
""" Override unlink to delete messages and followers. This cannot be
cascaded, because link is done through (res_model, res_id). """
msg_obj = self.pool.get('mail.message')
fol_obj = self.pool.get('mail.followers')
# delete messages and notifications
msg_ids = msg_obj.search(cr, uid, [('model', '=', self._name), ('res_id', 'in', ids)], context=context)
msg_obj.unlink(cr, uid, msg_ids, context=context)
# delete
res = super(mail_thread, self).unlink(cr, uid, ids, context=context)
# delete followers
fol_ids = fol_obj.search(cr, SUPERUSER_ID, [('res_model', '=', self._name), ('res_id', 'in', ids)], context=context)
fol_obj.unlink(cr, SUPERUSER_ID, fol_ids, context=context)
return res
def copy(self, cr, uid, id, default=None, context=None):
default = default or {}
default['message_ids'] = []
default['message_follower_ids'] = []
return super(mail_thread, self).copy(cr, uid, id, default=default, context=context)
#------------------------------------------------------
# Automatically log tracked fields
#------------------------------------------------------
def _get_tracked_fields(self, cr, uid, updated_fields, context=None):
""" Return a structure of tracked fields for the current model.
:param list updated_fields: modified field names
:return list: a list of (field_name, column_info obj), containing
always tracked fields and modified on_change fields
"""
lst = []
for name, column_info in self._all_columns.items():
visibility = getattr(column_info.column, 'track_visibility', False)
if visibility == 'always' or (visibility == 'onchange' and name in updated_fields) or name in self._track:
lst.append(name)
if not lst:
return lst
return self.fields_get(cr, uid, lst, context=context)
def message_track(self, cr, uid, ids, tracked_fields, initial_values, context=None):
def convert_for_display(value, col_info):
if not value and col_info['type'] == 'boolean':
return 'False'
if not value:
return ''
if col_info['type'] == 'many2one':
return value.name_get()[0][1]
if col_info['type'] == 'selection':
return dict(col_info['selection'])[value]
return value
def format_message(message_description, tracked_values):
message = ''
if message_description:
message = '<span>%s</span>' % message_description
for name, change in tracked_values.items():
message += '<div> • <b>%s</b>: ' % change.get('col_info')
if change.get('old_value'):
message += '%s → ' % change.get('old_value')
message += '%s</div>' % change.get('new_value')
return message
if not tracked_fields:
return True
for browse_record in self.browse(cr, uid, ids, context=context):
initial = initial_values[browse_record.id]
changes = set()
tracked_values = {}
# generate tracked_values data structure: {'col_name': {col_info, new_value, old_value}}
for col_name, col_info in tracked_fields.items():
initial_value = initial[col_name]
record_value = getattr(browse_record, col_name)
if record_value == initial_value and getattr(self._all_columns[col_name].column, 'track_visibility', None) == 'always':
tracked_values[col_name] = dict(col_info=col_info['string'],
new_value=convert_for_display(record_value, col_info))
elif record_value != initial_value and (record_value or initial_value): # because browse null != False
if getattr(self._all_columns[col_name].column, 'track_visibility', None) in ['always', 'onchange']:
tracked_values[col_name] = dict(col_info=col_info['string'],
old_value=convert_for_display(initial_value, col_info),
new_value=convert_for_display(record_value, col_info))
if col_name in tracked_fields:
changes.add(col_name)
if not changes:
continue
# find subtypes and post messages or log if no subtype found
subtypes = []
for field, track_info in self._track.items():
if field not in changes:
continue
for subtype, method in track_info.items():
if method(self, cr, uid, browse_record, context):
subtypes.append(subtype)
posted = False
for subtype in subtypes:
subtype_rec = self.pool.get('ir.model.data').xmlid_to_object(cr, uid, subtype, context=context)
if not (subtype_rec and subtype_rec.exists()):
_logger.debug('subtype %s not found' % subtype)
continue
message = format_message(subtype_rec.description if subtype_rec.description else subtype_rec.name, tracked_values)
self.message_post(cr, uid, browse_record.id, body=message, subtype=subtype, context=context)
posted = True
if not posted:
message = format_message('', tracked_values)
self.message_post(cr, uid, browse_record.id, body=message, context=context)
return True
#------------------------------------------------------
# mail.message wrappers and tools
#------------------------------------------------------
def _needaction_domain_get(self, cr, uid, context=None):
if self._needaction:
return [('message_unread', '=', True)]
return []
def _garbage_collect_attachments(self, cr, uid, context=None):
""" Garbage collect lost mail attachments. Those are attachments
- linked to res_model 'mail.compose.message', the composer wizard
- with res_id 0, because they were created outside of an existing
wizard (typically user input through Chatter or reports
created on-the-fly by the templates)
- unused since at least one day (create_date and write_date)
"""
limit_date = datetime.datetime.utcnow() - datetime.timedelta(days=1)
limit_date_str = datetime.datetime.strftime(limit_date, tools.DEFAULT_SERVER_DATETIME_FORMAT)
ir_attachment_obj = self.pool.get('ir.attachment')
attach_ids = ir_attachment_obj.search(cr, uid, [
('res_model', '=', 'mail.compose.message'),
('res_id', '=', 0),
('create_date', '<', limit_date_str),
('write_date', '<', limit_date_str),
], context=context)
ir_attachment_obj.unlink(cr, uid, attach_ids, context=context)
return True
def check_mail_message_access(self, cr, uid, mids, operation, model_obj=None, context=None):
""" mail.message check permission rules for related document. This method is
meant to be inherited in order to implement addons-specific behavior.
A common behavior would be to allow creating messages when having read
access rule on the document, for portal document such as issues. """
if not model_obj:
model_obj = self
if hasattr(self, '_mail_post_access'):
create_allow = self._mail_post_access
else:
create_allow = 'write'
if operation in ['write', 'unlink']:
check_operation = 'write'
elif operation == 'create' and create_allow in ['create', 'read', 'write', 'unlink']:
check_operation = create_allow
elif operation == 'create':
check_operation = 'write'
else:
check_operation = operation
model_obj.check_access_rights(cr, uid, check_operation)
model_obj.check_access_rule(cr, uid, mids, check_operation, context=context)
def _get_formview_action(self, cr, uid, id, model=None, context=None):
""" Return an action to open the document. This method is meant to be
overridden in addons that want to give specific view ids for example.
:param int id: id of the document to open
:param string model: specific model that overrides self._name
"""
return {
'type': 'ir.actions.act_window',
'res_model': model or self._name,
'view_type': 'form',
'view_mode': 'form',
'views': [(False, 'form')],
'target': 'current',
'res_id': id,
}
def _get_inbox_action_xml_id(self, cr, uid, context=None):
""" When redirecting towards the Inbox, choose which action xml_id has
to be fetched. This method is meant to be inherited, at least in portal
because portal users have a different Inbox action than classic users. """
return ('mail', 'action_mail_inbox_feeds')
def message_redirect_action(self, cr, uid, context=None):
""" For a given message, return an action that either
- opens the form view of the related document if model, res_id, and
read access to the document
- opens the Inbox with a default search on the conversation if model,
res_id
- opens the Inbox with context propagated
"""
if context is None:
context = {}
# default action is the Inbox action
self.pool.get('res.users').browse(cr, SUPERUSER_ID, uid, context=context)
act_model, act_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, *self._get_inbox_action_xml_id(cr, uid, context=context))
action = self.pool.get(act_model).read(cr, uid, act_id, [])
params = context.get('params')
msg_id = model = res_id = None
if params:
msg_id = params.get('message_id')
model = params.get('model')
res_id = params.get('res_id')
if not msg_id and not (model and res_id):
return action
if msg_id and not (model and res_id):
msg = self.pool.get('mail.message').browse(cr, uid, msg_id, context=context)
if msg.exists():
model, res_id = msg.model, msg.res_id
# if model + res_id found: try to redirect to the document or fallback on the Inbox
if model and res_id:
model_obj = self.pool.get(model)
if model_obj.check_access_rights(cr, uid, 'read', raise_exception=False):
try:
model_obj.check_access_rule(cr, uid, [res_id], 'read', context=context)
if not hasattr(model_obj, '_get_formview_action'):
action = self.pool.get('mail.thread')._get_formview_action(cr, uid, res_id, model=model, context=context)
else:
action = model_obj._get_formview_action(cr, uid, res_id, context=context)
except (osv.except_osv, orm.except_orm):
pass
action.update({
'context': {
'search_default_model': model,
'search_default_res_id': res_id,
}
})
return action
#------------------------------------------------------
# Email specific
#------------------------------------------------------
def message_get_reply_to(self, cr, uid, ids, context=None):
""" Returns the preferred reply-to email address that is basically
the alias of the document, if it exists. """
if not self._inherits.get('mail.alias'):
return [False for id in ids]
return ["%s@%s" % (record['alias_name'], record['alias_domain'])
if record.get('alias_domain') and record.get('alias_name')
else False
for record in self.read(cr, SUPERUSER_ID, ids, ['alias_name', 'alias_domain'], context=context)]
#------------------------------------------------------
# Mail gateway
#------------------------------------------------------
def message_capable_models(self, cr, uid, context=None):
""" Used by the plugin addon, based for plugin_outlook and others. """
ret_dict = {}
for model_name in self.pool.obj_list():
model = self.pool[model_name]
if hasattr(model, "message_process") and hasattr(model, "message_post"):
ret_dict[model_name] = model._description
return ret_dict
def _message_find_partners(self, cr, uid, message, header_fields=['From'], context=None):
""" Find partners related to some header fields of the message.
:param string message: an email.message instance """
s = ', '.join([decode(message.get(h)) for h in header_fields if message.get(h)])
return filter(lambda x: x, self._find_partner_from_emails(cr, uid, None, tools.email_split(s), context=context))
def message_route_verify(self, cr, uid, message, message_dict, route, update_author=True, assert_model=True, create_fallback=True, context=None):
""" Verify route validity. Check and rules:
1 - if thread_id -> check that document effectively exists; otherwise
fallback on a message_new by resetting thread_id
2 - check that message_update exists if thread_id is set; or at least
that message_new exist
[ - find author_id if udpate_author is set]
3 - if there is an alias, check alias_contact:
'followers' and thread_id:
check on target document that the author is in the followers
'followers' and alias_parent_thread_id:
check on alias parent document that the author is in the
followers
'partners': check that author_id id set
"""
assert isinstance(route, (list, tuple)), 'A route should be a list or a tuple'
assert len(route) == 5, 'A route should contain 5 elements: model, thread_id, custom_values, uid, alias record'
message_id = message.get('Message-Id')
email_from = decode_header(message, 'From')
author_id = message_dict.get('author_id')
model, thread_id, alias = route[0], route[1], route[4]
model_pool = None
def _create_bounce_email():
mail_mail = self.pool.get('mail.mail')
mail_id = mail_mail.create(cr, uid, {
'body_html': '<div><p>Hello,</p>'
'<p>The following email sent to %s cannot be accepted because this is '
'a private email address. Only allowed people can contact us at this address.</p></div>'
'<blockquote>%s</blockquote>' % (message.get('to'), message_dict.get('body')),
'subject': 'Re: %s' % message.get('subject'),
'email_to': message.get('from'),
'auto_delete': True,
}, context=context)
mail_mail.send(cr, uid, [mail_id], context=context)
def _warn(message):
_logger.warning('Routing mail with Message-Id %s: route %s: %s',
message_id, route, message)
# Wrong model
if model and not model in self.pool:
if assert_model:
assert model in self.pool, 'Routing: unknown target model %s' % model
_warn('unknown target model %s' % model)
return ()
elif model:
model_pool = self.pool[model]
# Private message: should not contain any thread_id
if not model and thread_id:
if assert_model:
assert thread_id == 0, 'Routing: posting a message without model should be with a null res_id (private message), received %s.' % thread_id
_warn('posting a message without model should be with a null res_id (private message), received %s, resetting thread_id' % thread_id)
thread_id = 0
# Private message: should have a parent_id (only answers)
if not model and not message_dict.get('parent_id'):
if assert_model:
assert message_dict.get('parent_id'), 'Routing: posting a message without model should be with a parent_id (private mesage).'
_warn('posting a message without model should be with a parent_id (private mesage), skipping')
return ()
# Existing Document: check if exists; if not, fallback on create if allowed
if thread_id and not model_pool.exists(cr, uid, thread_id):
if create_fallback:
_warn('reply to missing document (%s,%s), fall back on new document creation' % (model, thread_id))
thread_id = None
elif assert_model:
assert model_pool.exists(cr, uid, thread_id), 'Routing: reply to missing document (%s,%s)' % (model, thread_id)
else:
_warn('reply to missing document (%s,%s), skipping' % (model, thread_id))
return ()
# Existing Document: check model accepts the mailgateway
if thread_id and model and not hasattr(model_pool, 'message_update'):
if create_fallback:
_warn('model %s does not accept document update, fall back on document creation' % model)
thread_id = None
elif assert_model:
assert hasattr(model_pool, 'message_update'), 'Routing: model %s does not accept document update, crashing' % model
else:
_warn('model %s does not accept document update, skipping' % model)
return ()
# New Document: check model accepts the mailgateway
if not thread_id and model and not hasattr(model_pool, 'message_new'):
if assert_model:
assert hasattr(model_pool, 'message_new'), 'Model %s does not accept document creation, crashing' % model
_warn('model %s does not accept document creation, skipping' % model)
return ()
# Update message author if asked
# We do it now because we need it for aliases (contact settings)
if not author_id and update_author:
author_ids = self._find_partner_from_emails(cr, uid, thread_id, [email_from], model=model, context=context)
if author_ids:
author_id = author_ids[0]
message_dict['author_id'] = author_id
# Alias: check alias_contact settings
if alias and alias.alias_contact == 'followers' and (thread_id or alias.alias_parent_thread_id):
if thread_id:
obj = self.pool[model].browse(cr, uid, thread_id, context=context)
else:
obj = self.pool[alias.alias_parent_model_id.model].browse(cr, uid, alias.alias_parent_thread_id, context=context)
if not author_id or not author_id in [fol.id for fol in obj.message_follower_ids]:
_warn('alias %s restricted to internal followers, skipping' % alias.alias_name)
_create_bounce_email()
return ()
elif alias and alias.alias_contact == 'partners' and not author_id:
_warn('alias %s does not accept unknown author, skipping' % alias.alias_name)
_create_bounce_email()
return ()
return (model, thread_id, route[2], route[3], route[4])
def message_route(self, cr, uid, message, message_dict, model=None, thread_id=None,
custom_values=None, context=None):
"""Attempt to figure out the correct target model, thread_id,
custom_values and user_id to use for an incoming message.
Multiple values may be returned, if a message had multiple
recipients matching existing mail.aliases, for example.
The following heuristics are used, in this order:
1. If the message replies to an existing thread_id, and
properly contains the thread model in the 'In-Reply-To'
header, use this model/thread_id pair, and ignore
custom_value (not needed as no creation will take place)
2. Look for a mail.alias entry matching the message
recipient, and use the corresponding model, thread_id,
custom_values and user_id.
3. Fallback to the ``model``, ``thread_id`` and ``custom_values``
provided.
4. If all the above fails, raise an exception.
:param string message: an email.message instance
:param dict message_dict: dictionary holding message variables
:param string model: the fallback model to use if the message
does not match any of the currently configured mail aliases
(may be None if a matching alias is supposed to be present)
:type dict custom_values: optional dictionary of default field values
to pass to ``message_new`` if a new record needs to be created.
Ignored if the thread record already exists, and also if a
matching mail.alias was found (aliases define their own defaults)
:param int thread_id: optional ID of the record/thread from ``model``
to which this mail should be attached. Only used if the message
does not reply to an existing thread and does not match any mail alias.
:return: list of [model, thread_id, custom_values, user_id, alias]
"""
assert isinstance(message, Message), 'message must be an email.message.Message at this point'
mail_msg_obj = self.pool['mail.message']
fallback_model = model
# Get email.message.Message variables for future processing
message_id = message.get('Message-Id')
email_from = decode_header(message, 'From')
email_to = decode_header(message, 'To')
references = decode_header(message, 'References')
in_reply_to = decode_header(message, 'In-Reply-To')
thread_references = references or in_reply_to
# 1. message is a reply to an existing message (exact match of message_id)
msg_references = thread_references.split()
mail_message_ids = mail_msg_obj.search(cr, uid, [('message_id', 'in', msg_references)], context=context)
if mail_message_ids:
original_msg = mail_msg_obj.browse(cr, SUPERUSER_ID, mail_message_ids[0], context=context)
model, thread_id = original_msg.model, original_msg.res_id
_logger.info(
'Routing mail from %s to %s with Message-Id %s: direct reply to msg: model: %s, thread_id: %s, custom_values: %s, uid: %s',
email_from, email_to, message_id, model, thread_id, custom_values, uid)
route = self.message_route_verify(
cr, uid, message, message_dict,
(model, thread_id, custom_values, uid, None),
update_author=True, assert_model=True, create_fallback=True, context=context)
return route and [route] or []
# 2. message is a reply to an existign thread (6.1 compatibility)
ref_match = thread_references and tools.reference_re.search(thread_references)
if ref_match:
thread_id = int(ref_match.group(1))
model = ref_match.group(2) or fallback_model
if thread_id and model in self.pool:
model_obj = self.pool[model]
compat_mail_msg_ids = mail_msg_obj.search(
cr, uid, [
('message_id', '=', False),
('model', '=', model),
('res_id', '=', thread_id),
], context=context)
if compat_mail_msg_ids and model_obj.exists(cr, uid, thread_id) and hasattr(model_obj, 'message_update'):
_logger.info(
'Routing mail from %s to %s with Message-Id %s: direct thread reply (compat-mode) to model: %s, thread_id: %s, custom_values: %s, uid: %s',
email_from, email_to, message_id, model, thread_id, custom_values, uid)
route = self.message_route_verify(
cr, uid, message, message_dict,
(model, thread_id, custom_values, uid, None),
update_author=True, assert_model=True, create_fallback=True, context=context)
return route and [route] or []
# 2. Reply to a private message
if in_reply_to:
mail_message_ids = mail_msg_obj.search(cr, uid, [
('message_id', '=', in_reply_to),
'!', ('message_id', 'ilike', 'reply_to')
], limit=1, context=context)
if mail_message_ids:
mail_message = mail_msg_obj.browse(cr, uid, mail_message_ids[0], context=context)
_logger.info('Routing mail from %s to %s with Message-Id %s: direct reply to a private message: %s, custom_values: %s, uid: %s',
email_from, email_to, message_id, mail_message.id, custom_values, uid)
route = self.message_route_verify(cr, uid, message, message_dict,
(mail_message.model, mail_message.res_id, custom_values, uid, None),
update_author=True, assert_model=True, create_fallback=True, context=context)
return route and [route] or []
# 3. Look for a matching mail.alias entry
# Delivered-To is a safe bet in most modern MTAs, but we have to fallback on To + Cc values
# for all the odd MTAs out there, as there is no standard header for the envelope's `rcpt_to` value.
rcpt_tos = \
','.join([decode_header(message, 'Delivered-To'),
decode_header(message, 'To'),
decode_header(message, 'Cc'),
decode_header(message, 'Resent-To'),
decode_header(message, 'Resent-Cc')])
local_parts = [e.split('@')[0] for e in tools.email_split(rcpt_tos)]
if local_parts:
mail_alias = self.pool.get('mail.alias')
alias_ids = mail_alias.search(cr, uid, [('alias_name', 'in', local_parts)])
if alias_ids:
routes = []
for alias in mail_alias.browse(cr, uid, alias_ids, context=context):
user_id = alias.alias_user_id.id
if not user_id:
# TDE note: this could cause crashes, because no clue that the user
# that send the email has the right to create or modify a new document
# Fallback on user_id = uid
# Note: recognized partners will be added as followers anyway
# user_id = self._message_find_user_id(cr, uid, message, context=context)
user_id = uid
_logger.info('No matching user_id for the alias %s', alias.alias_name)
route = (alias.alias_model_id.model, alias.alias_force_thread_id, eval(alias.alias_defaults), user_id, alias)
_logger.info('Routing mail from %s to %s with Message-Id %s: direct alias match: %r',
email_from, email_to, message_id, route)
route = self.message_route_verify(cr, uid, message, message_dict, route,
update_author=True, assert_model=True, create_fallback=True, context=context)
if route:
routes.append(route)
return routes
# 4. Fallback to the provided parameters, if they work
if not thread_id:
# Legacy: fallback to matching [ID] in the Subject
match = tools.res_re.search(decode_header(message, 'Subject'))
thread_id = match and match.group(1)
# Convert into int (bug spotted in 7.0 because of str)
try:
thread_id = int(thread_id)
except:
thread_id = False
_logger.info('Routing mail from %s to %s with Message-Id %s: fallback to model:%s, thread_id:%s, custom_values:%s, uid:%s',
email_from, email_to, message_id, fallback_model, thread_id, custom_values, uid)
route = self.message_route_verify(cr, uid, message, message_dict,
(fallback_model, thread_id, custom_values, uid, None),
update_author=True, assert_model=True, context=context)
if route:
return [route]
# AssertionError if no routes found and if no bounce occured
assert False, \
"No possible route found for incoming message from %s to %s (Message-Id %s:)." \
"Create an appropriate mail.alias or force the destination model." % (email_from, email_to, message_id)
def message_route_process(self, cr, uid, message, message_dict, routes, context=None):
# postpone setting message_dict.partner_ids after message_post, to avoid double notifications
partner_ids = message_dict.pop('partner_ids', [])
thread_id = False
for model, thread_id, custom_values, user_id, alias in routes:
if self._name == 'mail.thread':
context.update({'thread_model': model})
if model:
model_pool = self.pool[model]
assert thread_id and hasattr(model_pool, 'message_update') or hasattr(model_pool, 'message_new'), \
"Undeliverable mail with Message-Id %s, model %s does not accept incoming emails" % \
(message_dict['message_id'], model)
# disabled subscriptions during message_new/update to avoid having the system user running the
# email gateway become a follower of all inbound messages
nosub_ctx = dict(context, mail_create_nosubscribe=True, mail_create_nolog=True)
if thread_id and hasattr(model_pool, 'message_update'):
model_pool.message_update(cr, user_id, [thread_id], message_dict, context=nosub_ctx)
else:
thread_id = model_pool.message_new(cr, user_id, message_dict, custom_values, context=nosub_ctx)
else:
assert thread_id == 0, "Posting a message without model should be with a null res_id, to create a private message."
model_pool = self.pool.get('mail.thread')
if not hasattr(model_pool, 'message_post'):
context['thread_model'] = model
model_pool = self.pool['mail.thread']
new_msg_id = model_pool.message_post(cr, uid, [thread_id], context=context, subtype='mail.mt_comment', **message_dict)
if partner_ids:
# postponed after message_post, because this is an external message and we don't want to create
# duplicate emails due to notifications
self.pool.get('mail.message').write(cr, uid, [new_msg_id], {'partner_ids': partner_ids}, context=context)
return thread_id
def message_process(self, cr, uid, model, message, custom_values=None,
save_original=False, strip_attachments=False,
thread_id=None, context=None):
""" Process an incoming RFC2822 email message, relying on
``mail.message.parse()`` for the parsing operation,
and ``message_route()`` to figure out the target model.
Once the target model is known, its ``message_new`` method
is called with the new message (if the thread record did not exist)
or its ``message_update`` method (if it did).
There is a special case where the target model is False: a reply
to a private message. In this case, we skip the message_new /
message_update step, to just post a new message using mail_thread
message_post.
:param string model: the fallback model to use if the message
does not match any of the currently configured mail aliases
(may be None if a matching alias is supposed to be present)
:param message: source of the RFC2822 message
:type message: string or xmlrpclib.Binary
:type dict custom_values: optional dictionary of field values
to pass to ``message_new`` if a new record needs to be created.
Ignored if the thread record already exists, and also if a
matching mail.alias was found (aliases define their own defaults)
:param bool save_original: whether to keep a copy of the original
email source attached to the message after it is imported.
:param bool strip_attachments: whether to strip all attachments
before processing the message, in order to save some space.
:param int thread_id: optional ID of the record/thread from ``model``
to which this mail should be attached. When provided, this
overrides the automatic detection based on the message
headers.
"""
if context is None:
context = {}
# extract message bytes - we are forced to pass the message as binary because
# we don't know its encoding until we parse its headers and hence can't
# convert it to utf-8 for transport between the mailgate script and here.
if isinstance(message, xmlrpclib.Binary):
message = str(message.data)
# Warning: message_from_string doesn't always work correctly on unicode,
# we must use utf-8 strings here :-(
if isinstance(message, unicode):
message = message.encode('utf-8')
msg_txt = email.message_from_string(message)
# parse the message, verify we are not in a loop by checking message_id is not duplicated
msg = self.message_parse(cr, uid, msg_txt, save_original=save_original, context=context)
if strip_attachments:
msg.pop('attachments', None)
if msg.get('message_id'): # should always be True as message_parse generate one if missing
existing_msg_ids = self.pool.get('mail.message').search(cr, SUPERUSER_ID, [
('message_id', '=', msg.get('message_id')),
], context=context)
if existing_msg_ids:
_logger.info('Ignored mail from %s to %s with Message-Id %s: found duplicated Message-Id during processing',
msg.get('from'), msg.get('to'), msg.get('message_id'))
return False
# find possible routes for the message
routes = self.message_route(cr, uid, msg_txt, msg, model, thread_id, custom_values, context=context)
thread_id = self.message_route_process(cr, uid, msg_txt, msg, routes, context=context)
return thread_id
def message_new(self, cr, uid, msg_dict, custom_values=None, context=None):
"""Called by ``message_process`` when a new message is received
for a given thread model, if the message did not belong to
an existing thread.
The default behavior is to create a new record of the corresponding
model (based on some very basic info extracted from the message).
Additional behavior may be implemented by overriding this method.
:param dict msg_dict: a map containing the email details and
attachments. See ``message_process`` and
``mail.message.parse`` for details.
:param dict custom_values: optional dictionary of additional
field values to pass to create()
when creating the new thread record.
Be careful, these values may override
any other values coming from the message.
:param dict context: if a ``thread_model`` value is present
in the context, its value will be used
to determine the model of the record
to create (instead of the current model).
:rtype: int
:return: the id of the newly created thread object
"""
if context is None:
context = {}
data = {}
if isinstance(custom_values, dict):
data = custom_values.copy()
model = context.get('thread_model') or self._name
model_pool = self.pool[model]
fields = model_pool.fields_get(cr, uid, context=context)
if 'name' in fields and not data.get('name'):
data['name'] = msg_dict.get('subject', '')
res_id = model_pool.create(cr, uid, data, context=context)
return res_id
def message_update(self, cr, uid, ids, msg_dict, update_vals=None, context=None):
"""Called by ``message_process`` when a new message is received
for an existing thread. The default behavior is to update the record
with update_vals taken from the incoming email.
Additional behavior may be implemented by overriding this
method.
:param dict msg_dict: a map containing the email details and
attachments. See ``message_process`` and
``mail.message.parse()`` for details.
:param dict update_vals: a dict containing values to update records
given their ids; if the dict is None or is
void, no write operation is performed.
"""
if update_vals:
self.write(cr, uid, ids, update_vals, context=context)
return True
def _message_extract_payload(self, message, save_original=False):
"""Extract body as HTML and attachments from the mail message"""
attachments = []
body = u''
if save_original:
attachments.append(('original_email.eml', message.as_string()))
if not message.is_multipart() or 'text/' in message.get('content-type', ''):
encoding = message.get_content_charset()
body = message.get_payload(decode=True)
body = tools.ustr(body, encoding, errors='replace')
if message.get_content_type() == 'text/plain':
# text/plain -> <pre/>
body = tools.append_content_to_html(u'', body, preserve=True)
else:
alternative = False
for part in message.walk():
if part.get_content_type() == 'multipart/alternative':
alternative = True
if part.get_content_maintype() == 'multipart':
continue # skip container
# part.get_filename returns decoded value if able to decode, coded otherwise.
# original get_filename is not able to decode iso-8859-1 (for instance).
# therefore, iso encoded attachements are not able to be decoded properly with get_filename
# code here partially copy the original get_filename method, but handle more encoding
filename=part.get_param('filename', None, 'content-disposition')
if not filename:
filename=part.get_param('name', None)
if filename:
if isinstance(filename, tuple):
# RFC2231
filename=email.utils.collapse_rfc2231_value(filename).strip()
else:
filename=decode(filename)
encoding = part.get_content_charset() # None if attachment
# 1) Explicit Attachments -> attachments
if filename or part.get('content-disposition', '').strip().startswith('attachment'):
attachments.append((filename or 'attachment', part.get_payload(decode=True)))
continue
# 2) text/plain -> <pre/>
if part.get_content_type() == 'text/plain' and (not alternative or not body):
body = tools.append_content_to_html(body, tools.ustr(part.get_payload(decode=True),
encoding, errors='replace'), preserve=True)
# 3) text/html -> raw
elif part.get_content_type() == 'text/html':
html = tools.ustr(part.get_payload(decode=True), encoding, errors='replace')
if alternative:
body = html
else:
body = tools.append_content_to_html(body, html, plaintext=False)
# 4) Anything else -> attachment
else:
attachments.append((filename or 'attachment', part.get_payload(decode=True)))
return body, attachments
def message_parse(self, cr, uid, message, save_original=False, context=None):
"""Parses a string or email.message.Message representing an
RFC-2822 email, and returns a generic dict holding the
message details.
:param message: the message to parse
:type message: email.message.Message | string | unicode
:param bool save_original: whether the returned dict
should include an ``original`` attachment containing
the source of the message
:rtype: dict
:return: A dict with the following structure, where each
field may not be present if missing in original
message::
{ 'message_id': msg_id,
'subject': subject,
'from': from,
'to': to,
'cc': cc,
'body': unified_body,
'attachments': [('file1', 'bytes'),
('file2', 'bytes')}
}
"""
msg_dict = {
'type': 'email',
}
if not isinstance(message, Message):
if isinstance(message, unicode):
# Warning: message_from_string doesn't always work correctly on unicode,
# we must use utf-8 strings here :-(
message = message.encode('utf-8')
message = email.message_from_string(message)
message_id = message['message-id']
if not message_id:
# Very unusual situation, be we should be fault-tolerant here
message_id = "<%s@localhost>" % time.time()
_logger.debug('Parsing Message without message-id, generating a random one: %s', message_id)
msg_dict['message_id'] = message_id
if message.get('Subject'):
msg_dict['subject'] = decode(message.get('Subject'))
# Envelope fields not stored in mail.message but made available for message_new()
msg_dict['from'] = decode(message.get('from'))
msg_dict['to'] = decode(message.get('to'))
msg_dict['cc'] = decode(message.get('cc'))
msg_dict['email_from'] = decode(message.get('from'))
partner_ids = self._message_find_partners(cr, uid, message, ['To', 'Cc'], context=context)
msg_dict['partner_ids'] = [(4, partner_id) for partner_id in partner_ids]
if message.get('Date'):
try:
date_hdr = decode(message.get('Date'))
parsed_date = dateutil.parser.parse(date_hdr, fuzzy=True)
if parsed_date.utcoffset() is None:
# naive datetime, so we arbitrarily decide to make it
# UTC, there's no better choice. Should not happen,
# as RFC2822 requires timezone offset in Date headers.
stored_date = parsed_date.replace(tzinfo=pytz.utc)
else:
stored_date = parsed_date.astimezone(tz=pytz.utc)
except Exception:
_logger.warning('Failed to parse Date header %r in incoming mail '
'with message-id %r, assuming current date/time.',
message.get('Date'), message_id)
stored_date = datetime.datetime.now()
msg_dict['date'] = stored_date.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)
if message.get('In-Reply-To'):
parent_ids = self.pool.get('mail.message').search(cr, uid, [('message_id', '=', decode(message['In-Reply-To']))])
if parent_ids:
msg_dict['parent_id'] = parent_ids[0]
if message.get('References') and 'parent_id' not in msg_dict:
parent_ids = self.pool.get('mail.message').search(cr, uid, [('message_id', 'in',
[x.strip() for x in decode(message['References']).split()])])
if parent_ids:
msg_dict['parent_id'] = parent_ids[0]
msg_dict['body'], msg_dict['attachments'] = self._message_extract_payload(message, save_original=save_original)
return msg_dict
#------------------------------------------------------
# Note specific
#------------------------------------------------------
def log(self, cr, uid, id, message, secondary=False, context=None):
_logger.warning("log() is deprecated. As this module inherit from "\
"mail.thread, the message will be managed by this "\
"module instead of by the res.log mechanism. Please "\
"use mail_thread.message_post() instead of the "\
"now deprecated res.log.")
self.message_post(cr, uid, [id], message, context=context)
def _message_add_suggested_recipient(self, cr, uid, result, obj, partner=None, email=None, reason='', context=None):
""" Called by message_get_suggested_recipients, to add a suggested
recipient in the result dictionary. The form is :
partner_id, partner_name<partner_email> or partner_name, reason """
if email and not partner:
# get partner info from email
partner_info = self.message_partner_info_from_emails(cr, uid, obj.id, [email], context=context)[0]
if partner_info.get('partner_id'):
partner = self.pool.get('res.partner').browse(cr, SUPERUSER_ID, [partner_info['partner_id']], context=context)[0]
if email and email in [val[1] for val in result[obj.id]]: # already existing email -> skip
return result
if partner and partner in obj.message_follower_ids: # recipient already in the followers -> skip
return result
if partner and partner in [val[0] for val in result[obj.id]]: # already existing partner ID -> skip
return result
if partner and partner.email: # complete profile: id, name <email>
result[obj.id].append((partner.id, '%s<%s>' % (partner.name, partner.email), reason))
elif partner: # incomplete profile: id, name
result[obj.id].append((partner.id, '%s' % (partner.name), reason))
else: # unknown partner, we are probably managing an email address
result[obj.id].append((False, email, reason))
return result
def message_get_suggested_recipients(self, cr, uid, ids, context=None):
""" Returns suggested recipients for ids. Those are a list of
tuple (partner_id, partner_name, reason), to be managed by Chatter. """
result = dict.fromkeys(ids, list())
if self._all_columns.get('user_id'):
for obj in self.browse(cr, SUPERUSER_ID, ids, context=context): # SUPERUSER because of a read on res.users that would crash otherwise
if not obj.user_id or not obj.user_id.partner_id:
continue
self._message_add_suggested_recipient(cr, uid, result, obj, partner=obj.user_id.partner_id, reason=self._all_columns['user_id'].column.string, context=context)
return result
def _find_partner_from_emails(self, cr, uid, id, emails, model=None, context=None, check_followers=True):
""" Utility method to find partners from email addresses. The rules are :
1 - check in document (model | self, id) followers
2 - try to find a matching partner that is also an user
3 - try to find a matching partner
:param list emails: list of email addresses
:param string model: model to fetch related record; by default self
is used.
:param boolean check_followers: check in document followers
"""
partner_obj = self.pool['res.partner']
partner_ids = []
obj = None
if id and (model or self._name != 'mail.thread') and check_followers:
if model:
obj = self.pool[model].browse(cr, uid, id, context=context)
else:
obj = self.browse(cr, uid, id, context=context)
for contact in emails:
partner_id = False
email_address = tools.email_split(contact)
if not email_address:
partner_ids.append(partner_id)
continue
email_address = email_address[0]
# first try: check in document's followers
if obj:
for follower in obj.message_follower_ids:
if follower.email == email_address:
partner_id = follower.id
# second try: check in partners that are also users
if not partner_id:
ids = partner_obj.search(cr, SUPERUSER_ID, [
('email', 'ilike', email_address),
('user_ids', '!=', False)
], limit=1, context=context)
if ids:
partner_id = ids[0]
# third try: check in partners
if not partner_id:
ids = partner_obj.search(cr, SUPERUSER_ID, [
('email', 'ilike', email_address)
], limit=1, context=context)
if ids:
partner_id = ids[0]
partner_ids.append(partner_id)
return partner_ids
def message_partner_info_from_emails(self, cr, uid, id, emails, link_mail=False, context=None):
""" Convert a list of emails into a list partner_ids and a list
new_partner_ids. The return value is non conventional because
it is meant to be used by the mail widget.
:return dict: partner_ids and new_partner_ids """
mail_message_obj = self.pool.get('mail.message')
partner_ids = self._find_partner_from_emails(cr, uid, id, emails, context=context)
result = list()
for idx in range(len(emails)):
email_address = emails[idx]
partner_id = partner_ids[idx]
partner_info = {'full_name': email_address, 'partner_id': partner_id}
result.append(partner_info)
# link mail with this from mail to the new partner id
if link_mail and partner_info['partner_id']:
message_ids = mail_message_obj.search(cr, SUPERUSER_ID, [
'|',
('email_from', '=', email_address),
('email_from', 'ilike', '<%s>' % email_address),
('author_id', '=', False)
], context=context)
if message_ids:
mail_message_obj.write(cr, SUPERUSER_ID, message_ids, {'author_id': partner_info['partner_id']}, context=context)
return result
def _message_preprocess_attachments(self, cr, uid, attachments, attachment_ids, attach_model, attach_res_id, context=None):
""" Preprocess attachments for mail_thread.message_post() or mail_mail.create().
:param list attachments: list of attachment tuples in the form ``(name,content)``,
where content is NOT base64 encoded
:param list attachment_ids: a list of attachment ids, not in tomany command form
:param str attach_model: the model of the attachments parent record
:param integer attach_res_id: the id of the attachments parent record
"""
Attachment = self.pool['ir.attachment']
m2m_attachment_ids = []
if attachment_ids:
filtered_attachment_ids = Attachment.search(cr, SUPERUSER_ID, [
('res_model', '=', 'mail.compose.message'),
('create_uid', '=', uid),
('id', 'in', attachment_ids)], context=context)
if filtered_attachment_ids:
Attachment.write(cr, SUPERUSER_ID, filtered_attachment_ids, {'res_model': attach_model, 'res_id': attach_res_id}, context=context)
m2m_attachment_ids += [(4, id) for id in attachment_ids]
# Handle attachments parameter, that is a dictionary of attachments
for name, content in attachments:
if isinstance(content, unicode):
content = content.encode('utf-8')
data_attach = {
'name': name,
'datas': base64.b64encode(str(content)),
'datas_fname': name,
'description': name,
'res_model': attach_model,
'res_id': attach_res_id,
}
m2m_attachment_ids.append((0, 0, data_attach))
return m2m_attachment_ids
def message_post(self, cr, uid, thread_id, body='', subject=None, type='notification',
subtype=None, parent_id=False, attachments=None, context=None,
content_subtype='html', **kwargs):
""" Post a new message in an existing thread, returning the new
mail.message ID.
:param int thread_id: thread ID to post into, or list with one ID;
if False/0, mail.message model will also be set as False
:param str body: body of the message, usually raw HTML that will
be sanitized
:param str type: see mail_message.type field
:param str content_subtype:: if plaintext: convert body into html
:param int parent_id: handle reply to a previous message by adding the
parent partners to the message in case of private discussion
:param tuple(str,str) attachments or list id: list of attachment tuples in the form
``(name,content)``, where content is NOT base64 encoded
Extra keyword arguments will be used as default column values for the
new mail.message record. Special cases:
- attachment_ids: supposed not attached to any document; attach them
to the related document. Should only be set by Chatter.
:return int: ID of newly created mail.message
"""
if context is None:
context = {}
if attachments is None:
attachments = {}
mail_message = self.pool.get('mail.message')
ir_attachment = self.pool.get('ir.attachment')
assert (not thread_id) or \
isinstance(thread_id, (int, long)) or \
(isinstance(thread_id, (list, tuple)) and len(thread_id) == 1), \
"Invalid thread_id; should be 0, False, an ID or a list with one ID"
if isinstance(thread_id, (list, tuple)):
thread_id = thread_id[0]
# if we're processing a message directly coming from the gateway, the destination model was
# set in the context.
model = False
if thread_id:
model = context.get('thread_model', self._name) if self._name == 'mail.thread' else self._name
if model != self._name and hasattr(self.pool[model], 'message_post'):
del context['thread_model']
return self.pool[model].message_post(cr, uid, thread_id, body=body, subject=subject, type=type, subtype=subtype, parent_id=parent_id, attachments=attachments, context=context, content_subtype=content_subtype, **kwargs)
#0: Find the message's author, because we need it for private discussion
author_id = kwargs.get('author_id')
if author_id is None: # keep False values
author_id = self.pool.get('mail.message')._get_default_author(cr, uid, context=context)
# 1: Handle content subtype: if plaintext, converto into HTML
if content_subtype == 'plaintext':
body = tools.plaintext2html(body)
# 2: Private message: add recipients (recipients and author of parent message) - current author
# + legacy-code management (! we manage only 4 and 6 commands)
partner_ids = set()
kwargs_partner_ids = kwargs.pop('partner_ids', [])
for partner_id in kwargs_partner_ids:
if isinstance(partner_id, (list, tuple)) and partner_id[0] == 4 and len(partner_id) == 2:
partner_ids.add(partner_id[1])
if isinstance(partner_id, (list, tuple)) and partner_id[0] == 6 and len(partner_id) == 3:
partner_ids |= set(partner_id[2])
elif isinstance(partner_id, (int, long)):
partner_ids.add(partner_id)
else:
pass # we do not manage anything else
if parent_id and not model:
parent_message = mail_message.browse(cr, uid, parent_id, context=context)
private_followers = set([partner.id for partner in parent_message.partner_ids])
if parent_message.author_id:
private_followers.add(parent_message.author_id.id)
private_followers -= set([author_id])
partner_ids |= private_followers
# 3. Attachments
# - HACK TDE FIXME: Chatter: attachments linked to the document (not done JS-side), load the message
attachment_ids = self._message_preprocess_attachments(cr, uid, attachments, kwargs.pop('attachment_ids', []), model, thread_id, context)
# 4: mail.message.subtype
subtype_id = False
if subtype:
if '.' not in subtype:
subtype = 'mail.%s' % subtype
ref = self.pool.get('ir.model.data').get_object_reference(cr, uid, *subtype.split('.'))
subtype_id = ref and ref[1] or False
# automatically subscribe recipients if asked to
if context.get('mail_post_autofollow') and thread_id and partner_ids:
partner_to_subscribe = partner_ids
if context.get('mail_post_autofollow_partner_ids'):
partner_to_subscribe = filter(lambda item: item in context.get('mail_post_autofollow_partner_ids'), partner_ids)
self.message_subscribe(cr, uid, [thread_id], list(partner_to_subscribe), context=context)
# _mail_flat_thread: automatically set free messages to the first posted message
if self._mail_flat_thread and not parent_id and thread_id:
message_ids = mail_message.search(cr, uid, ['&', ('res_id', '=', thread_id), ('model', '=', model)], context=context, order="id ASC", limit=1)
parent_id = message_ids and message_ids[0] or False
# we want to set a parent: force to set the parent_id to the oldest ancestor, to avoid having more than 1 level of thread
elif parent_id:
message_ids = mail_message.search(cr, SUPERUSER_ID, [('id', '=', parent_id), ('parent_id', '!=', False)], context=context)
# avoid loops when finding ancestors
processed_list = []
if message_ids:
message = mail_message.browse(cr, SUPERUSER_ID, message_ids[0], context=context)
while (message.parent_id and message.parent_id.id not in processed_list):
processed_list.append(message.parent_id.id)
message = message.parent_id
parent_id = message.id
values = kwargs
values.update({
'author_id': author_id,
'model': model,
'res_id': thread_id or False,
'body': body,
'subject': subject or False,
'type': type,
'parent_id': parent_id,
'attachment_ids': attachment_ids,
'subtype_id': subtype_id,
'partner_ids': [(4, pid) for pid in partner_ids],
})
# Avoid warnings about non-existing fields
for x in ('from', 'to', 'cc'):
values.pop(x, None)
# Create and auto subscribe the author
msg_id = mail_message.create(cr, uid, values, context=context)
message = mail_message.browse(cr, uid, msg_id, context=context)
if message.author_id and thread_id and type != 'notification' and not context.get('mail_create_nosubscribe'):
self.message_subscribe(cr, uid, [thread_id], [message.author_id.id], context=context)
return msg_id
#------------------------------------------------------
# Followers API
#------------------------------------------------------
def message_get_subscription_data(self, cr, uid, ids, user_pid=None, context=None):
""" Wrapper to get subtypes data. """
return self._get_subscription_data(cr, uid, ids, None, None, user_pid=user_pid, context=context)
def message_subscribe_users(self, cr, uid, ids, user_ids=None, subtype_ids=None, context=None):
""" Wrapper on message_subscribe, using users. If user_ids is not
provided, subscribe uid instead. """
if user_ids is None:
user_ids = [uid]
partner_ids = [user.partner_id.id for user in self.pool.get('res.users').browse(cr, uid, user_ids, context=context)]
return self.message_subscribe(cr, uid, ids, partner_ids, subtype_ids=subtype_ids, context=context)
def message_subscribe(self, cr, uid, ids, partner_ids, subtype_ids=None, context=None):
""" Add partners to the records followers. """
if context is None:
context = {}
mail_followers_obj = self.pool.get('mail.followers')
subtype_obj = self.pool.get('mail.message.subtype')
user_pid = self.pool.get('res.users').browse(cr, uid, uid, context=context).partner_id.id
if set(partner_ids) == set([user_pid]):
if context.get('operation', '') != 'create':
try:
self.check_access_rights(cr, uid, 'read')
self.check_access_rule(cr, uid, ids, 'read')
except (osv.except_osv, orm.except_orm):
return False
else:
self.check_access_rights(cr, uid, 'write')
self.check_access_rule(cr, uid, ids, 'write')
existing_pids_dict = {}
fol_ids = mail_followers_obj.search(cr, SUPERUSER_ID, ['&', '&', ('res_model', '=', self._name), ('res_id', 'in', ids), ('partner_id', 'in', partner_ids)])
for fol in mail_followers_obj.browse(cr, SUPERUSER_ID, fol_ids, context=context):
existing_pids_dict.setdefault(fol.res_id, set()).add(fol.partner_id.id)
# subtype_ids specified: update already subscribed partners
if subtype_ids and fol_ids:
mail_followers_obj.write(cr, SUPERUSER_ID, fol_ids, {'subtype_ids': [(6, 0, subtype_ids)]}, context=context)
# subtype_ids not specified: do not update already subscribed partner, fetch default subtypes for new partners
if subtype_ids is None:
subtype_ids = subtype_obj.search(
cr, uid, [
('default', '=', True), '|', ('res_model', '=', self._name), ('res_model', '=', False)], context=context)
for id in ids:
existing_pids = existing_pids_dict.get(id, set())
new_pids = set(partner_ids) - existing_pids
# subscribe new followers
for new_pid in new_pids:
mail_followers_obj.create(
cr, SUPERUSER_ID, {
'res_model': self._name,
'res_id': id,
'partner_id': new_pid,
'subtype_ids': [(6, 0, subtype_ids)],
}, context=context)
return True
def message_unsubscribe_users(self, cr, uid, ids, user_ids=None, context=None):
""" Wrapper on message_subscribe, using users. If user_ids is not
provided, unsubscribe uid instead. """
if user_ids is None:
user_ids = [uid]
partner_ids = [user.partner_id.id for user in self.pool.get('res.users').browse(cr, uid, user_ids, context=context)]
return self.message_unsubscribe(cr, uid, ids, partner_ids, context=context)
def message_unsubscribe(self, cr, uid, ids, partner_ids, context=None):
""" Remove partners from the records followers. """
user_pid = self.pool.get('res.users').read(cr, uid, uid, ['partner_id'], context=context)['partner_id'][0]
if set(partner_ids) == set([user_pid]):
self.check_access_rights(cr, uid, 'read')
self.check_access_rule(cr, uid, ids, 'read')
else:
self.check_access_rights(cr, uid, 'write')
self.check_access_rule(cr, uid, ids, 'write')
fol_obj = self.pool['mail.followers']
fol_ids = fol_obj.search(
cr, SUPERUSER_ID, [
('res_model', '=', self._name),
('res_id', 'in', ids),
('partner_id', 'in', partner_ids)
], context=context)
return fol_obj.unlink(cr, SUPERUSER_ID, fol_ids, context=context)
def _message_get_auto_subscribe_fields(self, cr, uid, updated_fields, auto_follow_fields=['user_id'], context=None):
""" Returns the list of relational fields linking to res.users that should
trigger an auto subscribe. The default list checks for the fields
- called 'user_id'
- linking to res.users
- with track_visibility set
In OpenERP V7, this is sufficent for all major addon such as opportunity,
project, issue, recruitment, sale.
Override this method if a custom behavior is needed about fields
that automatically subscribe users.
"""
user_field_lst = []
for name, column_info in self._all_columns.items():
if name in auto_follow_fields and name in updated_fields and getattr(column_info.column, 'track_visibility', False) and column_info.column._obj == 'res.users':
user_field_lst.append(name)
return user_field_lst
def message_auto_subscribe(self, cr, uid, ids, updated_fields, context=None, values=None):
""" Handle auto subscription. Two methods for auto subscription exist:
- tracked res.users relational fields, such as user_id fields. Those fields
must be relation fields toward a res.users record, and must have the
track_visilibity attribute set.
- using subtypes parent relationship: check if the current model being
modified has an header record (such as a project for tasks) whose followers
can be added as followers of the current records. Example of structure
with project and task:
- st_project_1.parent_id = st_task_1
- st_project_1.res_model = 'project.project'
- st_project_1.relation_field = 'project_id'
- st_task_1.model = 'project.task'
:param list updated_fields: list of updated fields to track
:param dict values: updated values; if None, the first record will be browsed
to get the values. Added after releasing 7.0, therefore
not merged with updated_fields argumment.
"""
subtype_obj = self.pool.get('mail.message.subtype')
follower_obj = self.pool.get('mail.followers')
new_followers = dict()
# fetch auto_follow_fields: res.users relation fields whose changes are tracked for subscription
user_field_lst = self._message_get_auto_subscribe_fields(cr, uid, updated_fields, context=context)
# fetch header subtypes
header_subtype_ids = subtype_obj.search(cr, uid, ['|', ('res_model', '=', False), ('parent_id.res_model', '=', self._name)], context=context)
subtypes = subtype_obj.browse(cr, uid, header_subtype_ids, context=context)
# if no change in tracked field or no change in tracked relational field: quit
relation_fields = set([subtype.relation_field for subtype in subtypes if subtype.relation_field is not False])
if not any(relation in updated_fields for relation in relation_fields) and not user_field_lst:
return True
# legacy behavior: if values is not given, compute the values by browsing
# @TDENOTE: remove me in 8.0
if values is None:
record = self.browse(cr, uid, ids[0], context=context)
for updated_field in updated_fields:
field_value = getattr(record, updated_field)
if isinstance(field_value, browse_record):
field_value = field_value.id
elif isinstance(field_value, browse_null):
field_value = False
values[updated_field] = field_value
# find followers of headers, update structure for new followers
headers = set()
for subtype in subtypes:
if subtype.relation_field and values.get(subtype.relation_field):
headers.add((subtype.res_model, values.get(subtype.relation_field)))
if headers:
header_domain = ['|'] * (len(headers) - 1)
for header in headers:
header_domain += ['&', ('res_model', '=', header[0]), ('res_id', '=', header[1])]
header_follower_ids = follower_obj.search(
cr, SUPERUSER_ID,
header_domain,
context=context
)
for header_follower in follower_obj.browse(cr, SUPERUSER_ID, header_follower_ids, context=context):
for subtype in header_follower.subtype_ids:
if subtype.parent_id and subtype.parent_id.res_model == self._name:
new_followers.setdefault(header_follower.partner_id.id, set()).add(subtype.parent_id.id)
elif subtype.res_model is False:
new_followers.setdefault(header_follower.partner_id.id, set()).add(subtype.id)
# add followers coming from res.users relational fields that are tracked
user_ids = [values[name] for name in user_field_lst if values.get(name)]
user_pids = [user.partner_id.id for user in self.pool.get('res.users').browse(cr, SUPERUSER_ID, user_ids, context=context)]
for partner_id in user_pids:
new_followers.setdefault(partner_id, None)
for pid, subtypes in new_followers.items():
subtypes = list(subtypes) if subtypes is not None else None
self.message_subscribe(cr, uid, ids, [pid], subtypes, context=context)
# find first email message, set it as unread for auto_subscribe fields for them to have a notification
if user_pids:
for record_id in ids:
message_obj = self.pool.get('mail.message')
msg_ids = message_obj.search(cr, SUPERUSER_ID, [
('model', '=', self._name),
('res_id', '=', record_id),
('type', '=', 'email')], limit=1, context=context)
if not msg_ids:
msg_ids = message_obj.search(cr, SUPERUSER_ID, [
('model', '=', self._name),
('res_id', '=', record_id)], limit=1, context=context)
if msg_ids:
self.pool.get('mail.notification')._notify(cr, uid, msg_ids[0], partners_to_notify=user_pids, context=context)
return True
#------------------------------------------------------
# Thread state
#------------------------------------------------------
def message_mark_as_unread(self, cr, uid, ids, context=None):
""" Set as unread. """
partner_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).partner_id.id
cr.execute('''
UPDATE mail_notification SET
read=false
WHERE
message_id IN (SELECT id from mail_message where res_id=any(%s) and model=%s limit 1) and
partner_id = %s
''', (ids, self._name, partner_id))
return True
def message_mark_as_read(self, cr, uid, ids, context=None):
""" Set as read. """
partner_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).partner_id.id
cr.execute('''
UPDATE mail_notification SET
read=true
WHERE
message_id IN (SELECT id FROM mail_message WHERE res_id=ANY(%s) AND model=%s) AND
partner_id = %s
''', (ids, self._name, partner_id))
return True
#------------------------------------------------------
# Thread suggestion
#------------------------------------------------------
def get_suggested_thread(self, cr, uid, removed_suggested_threads=None, context=None):
"""Return a list of suggested threads, sorted by the numbers of followers"""
if context is None:
context = {}
# TDE HACK: originally by MAT from portal/mail_mail.py but not working until the inheritance graph bug is not solved in trunk
# TDE FIXME: relocate in portal when it won't be necessary to reload the hr.employee model in an additional bridge module
if self.pool['res.groups']._all_columns.get('is_portal'):
user = self.pool.get('res.users').browse(cr, SUPERUSER_ID, uid, context=context)
if any(group.is_portal for group in user.groups_id):
return []
threads = []
if removed_suggested_threads is None:
removed_suggested_threads = []
thread_ids = self.search(cr, uid, [('id', 'not in', removed_suggested_threads), ('message_is_follower', '=', False)], context=context)
for thread in self.browse(cr, uid, thread_ids, context=context):
data = {
'id': thread.id,
'popularity': len(thread.message_follower_ids),
'name': thread.name,
'image_small': thread.image_small
}
threads.append(data)
return sorted(threads, key=lambda x: (x['popularity'], x['id']), reverse=True)[:3]
|
jmesteve/saas3
|
openerp/addons/mail/mail_thread.py
|
Python
|
agpl-3.0
| 96,273
|
"""
Helper classes and methods for running modulestore tests without Django.
"""
import io
import os
from contextlib import contextmanager
from contextlib2 import ExitStack
from importlib import import_module
from shutil import rmtree
from tempfile import mkdtemp
from unittest import TestCase
from uuid import uuid4
import six
from path import Path as path
from six.moves import range, zip
from xmodule.contentstore.mongo import MongoContentStore
from xmodule.modulestore.draft_and_published import ModuleStoreDraftAndPublished
from xmodule.modulestore.edit_info import EditInfoMixin
from xmodule.modulestore.inheritance import InheritanceMixin
from xmodule.modulestore.mixed import MixedModuleStore
from xmodule.modulestore.mongo.base import ModuleStoreEnum
from xmodule.modulestore.mongo.draft import DraftModuleStore
from xmodule.modulestore.split_mongo.split_draft import DraftVersioningModuleStore
from xmodule.modulestore.tests.factories import ItemFactory
from xmodule.modulestore.tests.mongo_connection import MONGO_HOST, MONGO_PORT_NUM
from xmodule.modulestore.xml import XMLModuleStore
from xmodule.modulestore.xml_importer import LocationMixin
from xmodule.tests import DATA_DIR
from xmodule.x_module import XModuleMixin
def load_function(path):
"""
Load a function by name.
path is a string of the form "path.to.module.function"
returns the imported python object `function` from `path.to.module`
"""
module_path, _, name = path.rpartition('.')
return getattr(import_module(module_path), name)
# pylint: disable=unused-argument
def create_modulestore_instance(
engine,
contentstore,
doc_store_config,
options,
i18n_service=None,
fs_service=None,
user_service=None,
signal_handler=None,
):
"""
This will return a new instance of a modulestore given an engine and options
"""
class_ = load_function(engine)
if issubclass(class_, ModuleStoreDraftAndPublished):
options['branch_setting_func'] = lambda: ModuleStoreEnum.Branch.draft_preferred
return class_(
doc_store_config=doc_store_config,
contentstore=contentstore,
signal_handler=signal_handler,
**options
)
def mock_tab_from_json(tab_dict):
"""
Mocks out the CourseTab.from_json to just return the tab_dict itself so that we don't have to deal
with plugin errors.
"""
return tab_dict
def add_temp_files_from_dict(file_dict, dir):
"""
Takes in a dict formatted as: { file_name: content }, and adds files to directory
"""
for file_name in file_dict:
with io.open("{}/{}".format(dir, file_name), "w") as opened_file:
content = file_dict[file_name]
if content:
opened_file.write(six.text_type(content))
def remove_temp_files_from_list(file_list, dir):
"""
Takes in a list of file names and removes them from dir if they exist
"""
for file_name in file_list:
file_path = "{}/{}".format(dir, file_name)
if os.path.exists(file_path):
os.remove(file_path)
class MixedSplitTestCase(TestCase):
"""
Stripped-down version of ModuleStoreTestCase that can be used without Django
(i.e. for testing in common/lib/ ). Sets up MixedModuleStore and Split.
"""
RENDER_TEMPLATE = lambda t_n, d, ctx=None, nsp='main': u'{}: {}, {}'.format(t_n, repr(d), repr(ctx))
modulestore_options = {
'default_class': 'xmodule.raw_module.RawDescriptor',
'fs_root': DATA_DIR,
'render_template': RENDER_TEMPLATE,
'xblock_mixins': (EditInfoMixin, InheritanceMixin, LocationMixin, XModuleMixin),
}
DOC_STORE_CONFIG = {
'host': MONGO_HOST,
'port': MONGO_PORT_NUM,
'db': 'test_mongo_libs_{0}'.format(os.getpid()),
'collection': 'modulestore',
'asset_collection': 'assetstore',
}
MIXED_OPTIONS = {
'stores': [
{
'NAME': 'split',
'ENGINE': 'xmodule.modulestore.split_mongo.split_draft.DraftVersioningModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': modulestore_options
},
]
}
def setUp(self):
"""
Set up requirements for testing: a user ID and a modulestore
"""
super(MixedSplitTestCase, self).setUp()
self.user_id = ModuleStoreEnum.UserID.test
self.store = MixedModuleStore(
None,
create_modulestore_instance=create_modulestore_instance,
mappings={},
**self.MIXED_OPTIONS
)
self.addCleanup(self.store.close_all_connections)
self.addCleanup(self.store._drop_database) # pylint: disable=protected-access
def make_block(self, category, parent_block, **kwargs):
"""
Create a block of type `category` as a child of `parent_block`, in any
course or library. You can pass any field values as kwargs.
"""
extra = {"publish_item": False, "user_id": self.user_id}
extra.update(kwargs)
return ItemFactory.create(
category=category,
parent=parent_block,
parent_location=parent_block.location,
modulestore=self.store,
**extra
)
class ProceduralCourseTestMixin(object):
"""
Contains methods for testing courses generated procedurally
"""
def populate_course(self, branching=2, emit_signals=False):
"""
Add k chapters, k^2 sections, k^3 verticals, k^4 problems to self.course (where k = branching)
"""
user_id = self.user.id
self.populated_usage_keys = {} # pylint: disable=attribute-defined-outside-init
def descend(parent, stack): # pylint: disable=missing-docstring
if not stack:
return
xblock_type = stack[0]
for _ in range(branching):
child = ItemFactory.create(
category=xblock_type,
parent_location=parent.location,
user_id=user_id
)
self.populated_usage_keys.setdefault(xblock_type, []).append(
child.location
)
descend(child, stack[1:])
with self.store.bulk_operations(self.course.id, emit_signals=emit_signals):
descend(self.course, ['chapter', 'sequential', 'vertical', 'problem'])
class MemoryCache(object):
"""
This fits the metadata_inheritance_cache_subsystem interface used by
the modulestore, and stores the data in a dictionary in memory.
"""
def __init__(self):
self.data = {}
def get(self, key, default=None):
"""
Get a key from the cache.
Args:
key: The key to update.
default: The value to return if the key hasn't been set previously.
"""
return self.data.get(key, default)
def set(self, key, value):
"""
Set a key in the cache.
Args:
key: The key to update.
value: The value change the key to.
"""
self.data[key] = value
class MongoContentstoreBuilder(object):
"""
A builder class for a MongoContentStore.
"""
@contextmanager
def build(self):
"""
A contextmanager that returns a MongoContentStore, and deletes its contents
when the context closes.
"""
contentstore = MongoContentStore(
db='contentstore{}'.format(THIS_UUID),
collection='content',
**COMMON_DOCSTORE_CONFIG
)
contentstore.ensure_indexes()
try:
yield contentstore
finally:
# Delete the created database
contentstore._drop_database() # pylint: disable=protected-access
def __repr__(self):
return 'MongoContentstoreBuilder()'
class StoreBuilderBase(object):
"""
Base class for all modulestore builders.
"""
@contextmanager
def build(self, **kwargs):
"""
Build the modulestore, optionally building the contentstore as well.
"""
contentstore = kwargs.pop('contentstore', None)
if not contentstore:
with self.build_without_contentstore(**kwargs) as (contentstore, modulestore):
yield contentstore, modulestore
else:
with self.build_with_contentstore(contentstore, **kwargs) as modulestore:
yield modulestore
@contextmanager
def build_without_contentstore(self, **kwargs):
"""
Build both the contentstore and the modulestore.
"""
with MongoContentstoreBuilder().build() as contentstore:
with self.build_with_contentstore(contentstore, **kwargs) as modulestore:
yield contentstore, modulestore
class MongoModulestoreBuilder(StoreBuilderBase):
"""
A builder class for a DraftModuleStore.
"""
@contextmanager
def build_with_contentstore(self, contentstore, **kwargs):
"""
A contextmanager that returns an isolated mongo modulestore, and then deletes
all of its data at the end of the context.
Args:
contentstore: The contentstore that this modulestore should use to store
all of its assets.
"""
doc_store_config = dict(
db='modulestore{}'.format(THIS_UUID),
collection='xmodule',
asset_collection='asset_metadata',
**COMMON_DOCSTORE_CONFIG
)
# Set up a temp directory for storing filesystem content created during import
fs_root = mkdtemp()
modulestore = DraftModuleStore(
contentstore,
doc_store_config,
fs_root,
render_template=repr,
branch_setting_func=lambda: ModuleStoreEnum.Branch.draft_preferred,
metadata_inheritance_cache_subsystem=MemoryCache(),
xblock_mixins=XBLOCK_MIXINS,
)
modulestore.ensure_indexes()
try:
yield modulestore
finally:
# Delete the created database
modulestore._drop_database() # pylint: disable=protected-access
# Delete the created directory on the filesystem
rmtree(fs_root, ignore_errors=True)
def __repr__(self):
return 'MongoModulestoreBuilder()'
class VersioningModulestoreBuilder(StoreBuilderBase):
"""
A builder class for a VersioningModuleStore.
"""
@contextmanager
def build_with_contentstore(self, contentstore, **kwargs):
"""
A contextmanager that returns an isolated versioning modulestore, and then deletes
all of its data at the end of the context.
Args:
contentstore: The contentstore that this modulestore should use to store
all of its assets.
"""
doc_store_config = dict(
db='modulestore{}'.format(THIS_UUID),
collection='split_module',
**COMMON_DOCSTORE_CONFIG
)
# Set up a temp directory for storing filesystem content created during import
fs_root = mkdtemp()
modulestore = DraftVersioningModuleStore(
contentstore,
doc_store_config,
fs_root,
render_template=repr,
xblock_mixins=XBLOCK_MIXINS,
**kwargs
)
modulestore.ensure_indexes()
try:
yield modulestore
finally:
# Delete the created database
modulestore._drop_database() # pylint: disable=protected-access
# Delete the created directory on the filesystem
rmtree(fs_root, ignore_errors=True)
def __repr__(self):
return 'SplitModulestoreBuilder()'
class XmlModulestoreBuilder(StoreBuilderBase):
"""
A builder class for a XMLModuleStore.
"""
# pylint: disable=unused-argument
@contextmanager
def build_with_contentstore(self, contentstore=None, course_ids=None, **kwargs):
"""
A contextmanager that returns an isolated xml modulestore
Args:
contentstore: The contentstore that this modulestore should use to store
all of its assets.
"""
modulestore = XMLModuleStore(
DATA_DIR,
course_ids=course_ids,
default_class='xmodule.hidden_module.HiddenDescriptor',
xblock_mixins=XBLOCK_MIXINS,
)
yield modulestore
class MixedModulestoreBuilder(StoreBuilderBase):
"""
A builder class for a MixedModuleStore.
"""
def __init__(self, store_builders, mappings=None):
"""
Args:
store_builders: A list of modulestore builder objects. These will be instantiated, in order,
as the backing stores for the MixedModuleStore.
mappings: Any course mappings to pass to the MixedModuleStore on instantiation.
"""
self.store_builders = store_builders
self.mappings = mappings or {}
self.mixed_modulestore = None
@contextmanager
def build_with_contentstore(self, contentstore, **kwargs):
"""
A contextmanager that returns a mixed modulestore built on top of modulestores
generated by other builder classes.
Args:
contentstore: The contentstore that this modulestore should use to store
all of its assets.
"""
names, generators = list(zip(*self.store_builders))
with ExitStack() as stack:
modulestores = [stack.enter_context(gen.build_with_contentstore(contentstore, **kwargs)) for gen in generators]
# Make the modulestore creation function just return the already-created modulestores
store_iterator = iter(modulestores)
next_modulestore = lambda *args, **kwargs: next(store_iterator)
# Generate a fake list of stores to give the already generated stores appropriate names
stores = [{'NAME': name, 'ENGINE': 'This space deliberately left blank'} for name in names]
self.mixed_modulestore = MixedModuleStore(
contentstore,
self.mappings,
stores,
create_modulestore_instance=next_modulestore,
xblock_mixins=XBLOCK_MIXINS,
)
yield self.mixed_modulestore
def __repr__(self):
return 'MixedModulestoreBuilder({!r}, {!r})'.format(self.store_builders, self.mappings)
def asset_collection(self):
"""
Returns the collection storing the asset metadata.
"""
all_stores = self.mixed_modulestore.modulestores
if len(all_stores) > 1:
return None
store = all_stores[0]
if hasattr(store, 'asset_collection'):
# Mongo modulestore beneath mixed.
# Returns the entire collection with *all* courses' asset metadata.
return store.asset_collection
else:
# Split modulestore beneath mixed.
# Split stores all asset metadata in the structure collection.
return store.db_connection.structures
THIS_UUID = uuid4().hex
COMMON_DOCSTORE_CONFIG = {
'host': MONGO_HOST,
'port': MONGO_PORT_NUM,
}
DATA_DIR = path(__file__).dirname().parent.parent / "tests" / "data" / "xml-course-root"
TEST_DATA_DIR = 'common/test/data/'
XBLOCK_MIXINS = (InheritanceMixin, XModuleMixin)
MIXED_MODULESTORE_BOTH_SETUP = MixedModulestoreBuilder([
('draft', MongoModulestoreBuilder()),
('split', VersioningModulestoreBuilder())
])
DRAFT_MODULESTORE_SETUP = MixedModulestoreBuilder([('draft', MongoModulestoreBuilder())])
SPLIT_MODULESTORE_SETUP = MixedModulestoreBuilder([('split', VersioningModulestoreBuilder())])
MIXED_MODULESTORE_SETUPS = (
DRAFT_MODULESTORE_SETUP,
SPLIT_MODULESTORE_SETUP,
)
MIXED_MS_SETUPS_SHORT = (
'mixed_mongo',
'mixed_split',
)
DIRECT_MODULESTORE_SETUPS = (
MongoModulestoreBuilder(),
# VersioningModulestoreBuilder(), # FUTUREDO: LMS-11227
)
DIRECT_MS_SETUPS_SHORT = (
'mongo',
#'split',
)
MODULESTORE_SETUPS = DIRECT_MODULESTORE_SETUPS + MIXED_MODULESTORE_SETUPS
MODULESTORE_SHORTNAMES = DIRECT_MS_SETUPS_SHORT + MIXED_MS_SETUPS_SHORT
SHORT_NAME_MAP = dict(list(zip(MODULESTORE_SETUPS, MODULESTORE_SHORTNAMES)))
CONTENTSTORE_SETUPS = (MongoContentstoreBuilder(),)
DOT_FILES_DICT = {
".DS_Store": None,
".example.txt": "BLUE",
}
TILDA_FILES_DICT = {
"example.txt~": "RED"
}
class PureModulestoreTestCase(TestCase):
"""
A TestCase designed to make testing Modulestore implementations without using Django
easier.
"""
MODULESTORE = None
def setUp(self):
super(PureModulestoreTestCase, self).setUp()
builder = self.MODULESTORE.build()
self.assets, self.store = builder.__enter__()
self.addCleanup(builder.__exit__, None, None, None)
|
cpennington/edx-platform
|
common/lib/xmodule/xmodule/modulestore/tests/utils.py
|
Python
|
agpl-3.0
| 17,084
|
import shutil
from os.path import isfile
from os import remove
from coalib.results.result_actions.ShowPatchAction import ShowPatchAction
from coalib.results.result_actions.ResultAction import ResultAction
class ApplyPatchAction(ResultAction):
SUCCESS_MESSAGE = "Patch applied successfully."
is_applicable = staticmethod(ShowPatchAction.is_applicable)
def apply(self,
result,
original_file_dict,
file_diff_dict,
no_orig: bool=False):
"""
Apply patch
:param no_orig: Whether or not to create .orig backup files
"""
for filename in result.diffs:
pre_patch_filename = filename
if filename in file_diff_dict:
diff = file_diff_dict[filename]
pre_patch_filename = (diff.rename
if diff.rename is not False
else filename)
file_diff_dict[filename] += result.diffs[filename]
else:
file_diff_dict[filename] = result.diffs[filename]
# Backup original file, only if there was no previous patch
# from this run though!
if not no_orig and isfile(pre_patch_filename):
shutil.copy2(pre_patch_filename,
pre_patch_filename + ".orig")
diff = file_diff_dict[filename]
if diff.delete or diff.rename:
if isfile(pre_patch_filename):
remove(pre_patch_filename)
if not diff.delete:
new_filename = (diff.rename
if diff.rename is not False
else filename)
with open(new_filename, mode='w', encoding='utf-8') as file:
file.writelines(diff.modified)
return file_diff_dict
|
sils1297/coala
|
coalib/results/result_actions/ApplyPatchAction.py
|
Python
|
agpl-3.0
| 1,931
|
# -*- coding: UTF-8 -*-
# Copyright 2012-2015 Luc Saffre
# This file is part of Lino Welfare.
#
# Lino Welfare is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Lino Welfare is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with Lino Welfare. If not, see
# <http://www.gnu.org/licenses/>.
"""
Database fields for `lino_welfare.modlib.debts`.
"""
from __future__ import unicode_literals
from django.db import models
from lino.api import _
class PeriodsField(models.DecimalField):
"""
Used for `Entry.periods` and `Account.periods`
(the latter holds simply the default value for the former).
It means: for how many months the entered amount counts.
Default value is 1. For yearly amounts set it to 12.
"""
def __init__(self, *args, **kwargs):
defaults = dict(
blank=True,
default=1,
help_text=_("""\
For how many months the entered amount counts.
For example 1 means a monthly amount, 12 a yearly amount."""),
#~ max_length=3,
max_digits=3,
decimal_places=0,
)
defaults.update(kwargs)
super(PeriodsField, self).__init__(*args, **defaults)
#~ class PeriodsField(models.IntegerField):
#~ """
#~ Used for `Entry.periods` and `Account.periods`
#~ (which holds simply the default value for the former).
#~ It means: for how many months the entered amount counts.
#~ Default value is 1. For yearly amounts set it to 12.
#~ """
#~ def __init__(self, *args, **kwargs):
#~ defaults = dict(
#~ max_length=3,
# max_digits=3,
#~ blank=True,
#~ null=True
#~ )
#~ defaults.update(kwargs)
#~ super(PeriodsField, self).__init__(*args, **defaults)
|
khchine5/lino-welfare
|
lino_welfare/modlib/debts/fields.py
|
Python
|
agpl-3.0
| 2,249
|
from django.conf.urls.defaults import *
from django.conf import settings
from django.views.generic.simple import direct_to_template, redirect_to
UUID_REGEX = '[\w]{8}(-[\w]{4}){3}-[\w]{12}'
urlpatterns = patterns('main.views',
# Index
(r'^$', 'home'),
# Forbidden
(r'forbidden/$', 'forbidden'),
# Transfer
(r'transfer/$', 'transfer_grid'),
(r'transfer/(?P<uuid>' + UUID_REGEX + ')/$', 'transfer_detail'),
(r'transfer/(?P<uuid>' + UUID_REGEX + ')/delete/$', 'transfer_delete'),
(r'transfer/(?P<uuid>' + UUID_REGEX + ')/microservices/$', 'transfer_microservices'),
(r'transfer/(?P<uuid>' + UUID_REGEX + ')/rights/$', 'transfer_rights_list'),
(r'transfer/(?P<uuid>' + UUID_REGEX + ')/rights/add/$', 'transfer_rights_edit'),
(r'transfer/(?P<uuid>' + UUID_REGEX + ')/rights/(?P<id>\d+)/$', 'transfer_rights_edit'),
(r'transfer/(?P<uuid>' + UUID_REGEX + ')/rights/delete/(?P<id>\d+)/$', 'transfer_rights_delete'),
(r'transfer/(?P<uuid>' + UUID_REGEX + ')/rights/grants/(?P<id>\d+)/$', 'transfer_rights_grants_edit'),
(r'transfer/status/$', 'transfer_status'),
(r'transfer/status/(?P<uuid>' + UUID_REGEX + ')/$', 'transfer_status'),
(r'transfer/select/(?P<source_directory_id>\d+)/$', 'transfer_select'),
(r'transfer/browser/$', 'transfer_browser'),
# Ingest
(r'ingest/$', 'ingest_grid'),
(r'ingest/(?P<uuid>' + UUID_REGEX + ')/$', 'ingest_detail'),
(r'ingest/(?P<uuid>' + UUID_REGEX + ')/delete/$', 'ingest_delete'),
(r'ingest/(?P<uuid>' + UUID_REGEX + ')/metadata/$', 'ingest_metadata_list'),
(r'ingest/(?P<uuid>' + UUID_REGEX + ')/metadata/add/$', 'ingest_metadata_edit'),
(r'ingest/(?P<uuid>' + UUID_REGEX + ')/metadata/(?P<id>\d+)/$', 'ingest_metadata_edit'),
(r'ingest/(?P<uuid>' + UUID_REGEX + ')/metadata/delete/(?P<id>\d+)/$', 'ingest_metadata_delete'),
(r'ingest/(?P<uuid>' + UUID_REGEX + ')/microservices/$', 'ingest_microservices'),
(r'ingest/(?P<uuid>' + UUID_REGEX + ')/rights/$', 'ingest_rights_list'),
(r'ingest/(?P<uuid>' + UUID_REGEX + ')/rights/add/$', 'ingest_rights_edit'),
(r'ingest/(?P<uuid>' + UUID_REGEX + ')/rights/(?P<id>\d+)/$', 'ingest_rights_edit'),
(r'ingest/(?P<uuid>' + UUID_REGEX + ')/rights/delete/(?P<id>\d+)/$', 'ingest_rights_delete'),
(r'ingest/(?P<uuid>' + UUID_REGEX + ')/rights/grants/(?P<id>\d+)/$', 'ingest_rights_grants_edit'),
(r'ingest/(?P<uuid>' + UUID_REGEX + ')/upload/$', 'ingest_upload'),
(r'ingest/status/$', 'ingest_status'),
(r'ingest/status/(?P<uuid>' + UUID_REGEX + ')/$', 'ingest_status'),
(r'ingest/normalization-report/(?P<uuid>' + UUID_REGEX + ')/$', 'ingest_normalization_report'),
(r'ingest/preview/aip/(?P<jobuuid>' + UUID_REGEX + ')/$', 'ingest_browse_aip'),
(r'ingest/preview/normalization/(?P<jobuuid>' + UUID_REGEX + ')/$', 'ingest_browse_normalization'),
# Jobs and tasks (is part of ingest)
(r'jobs/(?P<uuid>' + UUID_REGEX + ')/explore/$', 'jobs_explore'),
(r'jobs/(?P<uuid>' + UUID_REGEX + ')/list-objects/$', 'jobs_list_objects'),
(r'tasks/(?P<uuid>' + UUID_REGEX + ')/$', 'tasks'),
(r'task/(?P<uuid>' + UUID_REGEX + ')/$', 'task'),
# Access
(r'access/$', 'access_list'),
(r'access/(?P<id>\d+)/delete/$', 'access_delete'),
# Lookup
(r'lookup/rightsholder/(?P<id>\d+)/$', 'rights_holders_lookup'),
# Autocomplete
(r'autocomplete/rightsholders$', 'rights_holders_autocomplete'),
# Administration
(r'administration/$', 'administration'),
#(r'administration/edit/(?P<id>\d+)/$', 'administration_edit'),
(r'administration/dip/$', 'administration_dip'),
(r'administration/dip/edit/(?P<id>\d+)/$', 'administration_dip_edit'),
(r'administration/dips/atom/$', 'administration_atom_dips'),
(r'administration/dips/contentdm/$', 'administration_contentdm_dips'),
(r'administration/sources/$', 'administration_sources'),
(r'administration/sources/delete/json/(?P<id>\d+)/$', 'administration_sources_delete_json'),
(r'administration/processing/$', 'administration_processing'),
(r'administration/sources/json/$', 'administration_sources_json'),
# Disabled until further development can be done
#(r'administration/search/$', 'administration_search'),
#(r'administration/search/flush/aips/$', 'administration_search_flush_aips'),
# JSON feeds
(r'status/$', 'status'),
(r'formdata/(?P<type>\w+)/(?P<parent_id>\d+)/(?P<delete_id>\d+)/$', 'formdata_delete'),
(r'formdata/(?P<type>\w+)/(?P<parent_id>\d+)/$', 'formdata'),
)
# Filesystem related JSON views
urlpatterns += patterns('main.filesystem',
(r'filesystem/download/$', 'download'),
(r'filesystem/contents/$', 'contents'),
(r'filesystem/children/$', 'directory_children'),
(r'filesystem/delete/$', 'delete'),
(r'filesystem/copy_to_originals/$', 'copy_to_originals'),
(r'filesystem/copy_to_arrange/$', 'copy_to_arrange'),
(r'filesystem/copy_transfer_component/$', 'copy_transfer_component'),
(r'filesystem/get_temp_directory/$', 'get_temp_directory'),
(r'filesystem/ransfer/$', 'copy_to_start_transfer'),
(r'filesystem/copy_from_arrange/$', 'copy_from_arrange_to_completed')
)
|
artefactual/archivematica-history
|
src/dashboard/src/main/urls.py
|
Python
|
agpl-3.0
| 5,306
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'root'
import unittest
from mock import Mock
from Protocol import Protocol
import json
import os
from settings import settings
class ProtocolTest (unittest.TestCase):
def setUp(self):
self.protocol = Protocol(True)
def tearDown(self):
os.remove("test.u1db")
os.remove("test1.u1db")
"""
method: protocol
when: called
with: typeInsertAndList
should: insertCorrect
"""
def test_protocol_called_typeInsertAndList_insertCorrect(self):
if settings[ 'NEW_CODE' ] == "true":
params = '{"type":"insert","lista":[{"cloud":"Stacksync", "user_eyeos":"eyeID_EyeosUser_2","status": "NEW", "is_root": false, "version": 1, "filename": "clients", "parent_id": "null", "server_modified": "2013-03-08 10:36:41.997", "path": "/documents/clients", "client_modified": "2013-03-08 10:36:41.997", "id": 9873615, "user": "eyeos","is_folder":true}]}'
else:
params = '{"type":"insert","lista":[{"user_eyeos":"eyeID_EyeosUser_2","status": "NEW", "is_root": false, "version": 1, "filename": "clients", "parent_id": "null", "server_modified": "2013-03-08 10:36:41.997", "path": "/documents/clients", "client_modified": "2013-03-08 10:36:41.997", "id": 9873615, "user": "eyeos","is_folder":true}]}'
aux = json.loads(params)
self.protocol.insert = Mock()
self.protocol.insert.return_value = True
result = self.protocol.protocol(params)
self.protocol.insert.assert_called_once_with(aux['lista'])
self.assertEquals('true',result)
"""
method: protocol
when: called
with: typeSelectAndList
should: returnArray
"""
def test_protocol_called_typeSelectAndList_returnArray(self):
if settings[ 'NEW_CODE' ] == "true":
params = '{"type":"select","lista":[{"id":"124568", "user_eyeos":"eyeID_EyeosUser_2", "cloud":"Stacksync", "path":"/documents/clients"}]}'
else:
params = '{"type":"select","lista":[{"id":"124568", "user_eyeos":"eyeID_EyeosUser_2", "path":"/documents/clients"}]}'
aux = json.loads(params)
self.protocol.select = Mock()
self.protocol.select.return_value = []
result = self.protocol.protocol(params)
self.protocol.select.assert_called_once_with(aux['lista'][0])
self.assertEquals('[]',result)
"""
method: protocol
when: called
with: typeUpdateAndList
should: updateCorrect
"""
def test_protocol_called_typeUpdateAndList_updateCorrect(self):
if settings[ 'NEW_CODE' ] == "true":
params = '{"type":"update","lista":[{"parent_old":"null"},{"cloud": "Stacksync", "user_eyeos": "eyeID_EyeosUser_2", "status": "NEW", "is_root": false, "version": 1, "filename": "clients", "parent_id": "null", "server_modified": "2013-03-08 10:36:41.997", "path": "/documents/clients", "client_modified": "2013-03-08 10:36:41.997", "id": "9873615", "user": "eyeos","is_folder":true}]}'
else:
params = '{"type":"update","lista":[{"parent_old":"null"},{"user_eyeos":"eyeID_EyeosUser_2","status": "NEW", "is_root": false, "version": 1, "filename": "clients", "parent_id": "null", "server_modified": "2013-03-08 10:36:41.997", "path": "/documents/clients", "client_modified": "2013-03-08 10:36:41.997", "id": "9873615", "user": "eyeos","is_folder":true}]}'
aux = json.loads(params)
self.protocol.update = Mock()
self.protocol.update.return_value = True
result = self.protocol.protocol(params)
self.protocol.update.assert_called_once_with(aux['lista'])
self.assertEquals('true',result)
"""
method: protocol
when: called
with: typeDeleteAndList
should: deleteCorrect
"""
def test_protocol_called_typeDeleteAndList_deleteCorrect(self):
if settings[ 'NEW_CODE' ] == "true":
params = '{"type":"delete","lista":[{"id":1234, "user_eyeos":"eyeID_EyeosUser_2", "cloud": "Stacksync", "parent_id":"3456"},{"id":"8907", "user_eyeos":"eyeID_EyeosUser_2", "cloud": "Stacksync", "parent_id":"3456"}]}'
else:
params = '{"type":"delete","lista":[{"id":1234,"user_eyeos":"eyeID_EyeosUser_2","parent_id":"3456"},{"id":"8907","user_eyeos":"eyeID_EyeosUser_2","parent_id":"3456"}]}'
aux = json.loads(params)
self.protocol.delete = Mock()
self.protocol.delete.return_value = True
result = self.protocol.protocol(params)
self.protocol.delete.assert_called_once_with(aux['lista'])
self.assertEquals('true',result)
"""
method: protocol
when: called
with: typeGetParentAndPath
should: returnArray
"""
def test_protocol_called_typeGetParentAndList_returnArray(self):
if settings[ 'NEW_CODE' ] == "true":
params = '{"type":"parent", "lista":[{"cloud": "Stacksync", "path":"/Documents/", "filename":"prueba", "user_eyeos":"eyeID_EyeosUser_2"}]}'
else:
params = '{"type":"parent", "lista":[{"path":"/Documents/", "filename":"prueba", "user_eyeos":"eyeID_EyeosUser_2"}]}'
aux = json.loads(params)
self.protocol.getParent = Mock()
self.protocol.getParent.return_value = []
result = self.protocol.protocol(params)
self.protocol.getParent.assert_called_once_with(aux[ 'lista' ][0])
self.assertEquals('[]',result)
"""
method: protocol
when: called
with: typeDeleteFolderAndList
should: deleteCorrect
"""
def test_protocol_called_typeDeleteFolderAndList_deleteCorrect(self):
if settings[ 'NEW_CODE' ] == "true":
params = '{"type":"deleteFolder","lista":[{"id":"1234","user_eyeos":"eyeID_EyeosUser_2", "cloud":"Stacksync", "path":"/documents/clients"}]}'
else:
params = '{"type":"deleteFolder","lista":[{"id":"1234","user_eyeos":"eyeID_EyeosUser_2","path":"/documents/clients"}]}'
aux = json.loads(params)
self.protocol.deleteFolder = Mock()
self.protocol.deleteFolder.return_value = True
result = self.protocol.protocol(params)
self.protocol.deleteFolder.assert_called_once_with(aux[ 'lista' ][0])
self.assertEquals('true',result)
"""
method: protocol
when: called
with: typeDeleteMetadataUserAndListUser
should: deleteCorrect
"""
def test_protocol_called_typeDeleteMetadataUserAndListUser_deleteCorrect(self):
params = '{"type":"deleteMetadataUser","lista":[{"user_eyeos":"eyeID_EyeosUser_2"}]}'
self.protocol.deleteMetadataUser = Mock()
self.protocol.deleteMetadataUser.return_value = True
result = self.protocol.protocol(params)
self.protocol.deleteMetadataUser.assert_called_once_with(json.loads(params)['lista'])
self.assertEquals('true',result)
"""
method: protocol
when: called
with: typeDeleteMetadataUserAndListUserAndCloud
should: deleteCorrect
"""
def test_protocol_called_typeDeleteMetadataUserAndListUserAndCloud_deleteCorrect(self):
params = '{"type":"deleteMetadataUser","lista":[{"user_eyeos":"eyeID_EyeosUser_2", "cloud":"Stacksync"}]}'
self.protocol.deleteMetadataUser = Mock()
self.protocol.deleteMetadataUser.return_value = True
result = self.protocol.protocol(params)
self.protocol.deleteMetadataUser.assert_called_once_with(json.loads(params)['lista'])
self.assertEquals('true',result)
"""
method: protocol
when: called
with: typeSelectMetatadataUserAndList
should: returnArray
"""
def test_protocol_called_typeSelectMetadataUserAndList_returnArray(self):
params = '{"type":"selectMetadataUser","lista":[{"user_eyeos":"eyeID_EyeosUser_2"}]}'
self.protocol.selectMetadataUser = Mock()
self.protocol.selectMetadataUser.return_value = []
result = self.protocol.protocol(params)
self.protocol.selectMetadataUser.assert_called_once_with("eyeID_EyeosUser_2")
self.assertEquals('[]',result)
"""
method: protocol
when: called
with: typeRenameMetadataAndUserAndList
"""
def test_protocol_called_typeRenameMetadataAndUserAndList_renameCorrect(self):
if settings[ 'NEW_CODE' ] == "true":
params = '{"type": "rename", "lista": [{"user_eyeos": "eyeID_EyeosUser_2", "cloud": "Stacksync", "status": "NEW", "version": 1, "filename": "prueba.txt", "parent_id": "null", "server_modified": "2013-03-08 10:36:41.997", "path": "/", "client_modified": "2013-03-08 10:36:41.997", "id": "9873615", "user": "eyeos","is_folder":false}]}'
else:
params = '{"type": "rename", "lista": [{"user_eyeos": "eyeID_EyeosUser_2", "status": "NEW", "version": 1, "filename": "prueba.txt", "parent_id": "null", "server_modified": "2013-03-08 10:36:41.997", "path": "/", "client_modified": "2013-03-08 10:36:41.997", "id": "9873615", "user": "eyeos","is_folder":false}]}'
aux = json.loads(params)
self.protocol.renameMetadata = Mock()
self.protocol.renameMetadata.return_value = True
result = self.protocol.protocol(params)
self.protocol.renameMetadata.assert_called_once_with(aux[ 'lista' ][0])
self.assertEquals('true',result)
"""
##################################################################################################################################################
TEST DOWNLOAD FILES
##################################################################################################################################################
"""
"""
method: protocol
when: called
with: typeInsertDownloadVersionAndList
should: insertCorrect
"""
def test_protocol_called_typeInsertDownloadVersionAndList_insertCorrect(self):
if settings[ 'NEW_CODE' ] == "true":
params = '{"type": "insertDownloadVersion", "lista": [{"id": "9873615", "cloud": "Stacksync", "user_eyeos": "eyeID_EyeosUser_2", "version": "2", "recover": false}]}'
else:
params = '{"type": "insertDownloadVersion", "lista": [{"id": "9873615", "user_eyeos": "eyeID_EyeosUser_2", "version": "2", "recover": false}]}'
aux = json.loads(params)
self.protocol.insertDownloadVersion = Mock()
self.protocol.insertDownloadVersion.return_value = True
result = self.protocol.protocol(params)
self.protocol.insertDownloadVersion.assert_called_once_with(aux[ 'lista' ][0])
self.assertEquals('true',result)
"""
method: protocol
when: called
with: typeUpdateDownloadVersionAndList
should: updateCorrect
"""
def test_protocol_called_typeUpdateDownloadVersionAndList_updateCorrect(self):
if settings[ 'NEW_CODE' ] == "true":
params = '{"type": "updateDownloadVersion", "lista": [{"id": "9873615", "cloud": "Stacksync", "user_eyeos": "eyeID_EyeosUser_2", "version": "3", "recover": false}]}'
else:
params = '{"type": "updateDownloadVersion", "lista": [{"id": "9873615", "user_eyeos": "eyeID_EyeosUser_2", "version": "3", "recover": false}]}'
aux = json.loads(params)
self.protocol.updateDownloadVersion = Mock()
self.protocol.updateDownloadVersion.return_value = True
result = self.protocol.protocol(params)
self.protocol.updateDownloadVersion.assert_called_once_with(aux[ 'lista' ][0])
self.assertEquals('true',result)
"""
method: protocol
when: called
with: typeDeleteDownloadVersionAndList
should: deleteCorrect
"""
def test_protocol_called_typeDeleteDownloadVersionAndList_deleteCorrect(self):
params = '{"type":"deleteDownloadVersion","lista":[{"id":"9873615","user_eyeos":"eyeID_EyeosUser_2"}]}'
self.protocol.deleteDownloadVersion = Mock()
self.protocol.deleteDownloadVersion.return_value = True
result = self.protocol.protocol(params)
self.protocol.deleteDownloadVersion.assert_called_once_with("9873615","eyeID_EyeosUser_2")
self.assertEquals('true',result)
"""
method: protocol
when: called
with: typeGetDownloadVersionAndList
should: returnMetadata
"""
def test_protocol_called_typeGetDownloadVersionAndList_returnMetadata(self):
if settings[ 'NEW_CODE' ] == "true":
params = '{"type":"getDownloadVersion","lista":[{"id": "9873615", "user_eyeos": "eyeID_EyeosUser_2", "cloud": "Stacksync"}]}'
expected = {"id": "9873615", "cloud": "Stacksync", "user_eyeos": "eyeID_EyeosUser_2", "version": "3", "recover": False}
else:
params = '{"type":"getDownloadVersion","lista":[{"id": "9873615", "user_eyeos": "eyeID_EyeosUser_2"}]}'
expected = {"id": "9873615", "user_eyeos": "eyeID_EyeosUser_2", "version": "3", "recover": False}
aux = json.loads(params)
self.protocol.getDownloadVersion = Mock()
self.protocol.getDownloadVersion.return_value = expected
result = self.protocol.protocol(params)
self.protocol.getDownloadVersion.assert_called_once_with(aux[ 'lista' ][0])
self.assertEquals(json.dumps(expected), result)
"""
method: protocol
when: called
with: typeRecursiveDeleteVersionAndList
should: deleteCorrect
"""
def test_protocol_called_typeRecursiveDeleteVersionAndList_deleteCorrect(self):
if settings[ 'NEW_CODE' ] == "true":
params = '{"type":"recursiveDeleteVersion","lista":[{"cloud":"Stacksync","id":"9873615","user_eyeos":"eyeID_EyeosUser_2"}]}'
else:
params = '{"type":"recursiveDeleteVersion","lista":[{"id":"9873615","user_eyeos":"eyeID_EyeosUser_2"}]}'
aux = json.loads(params)
self.protocol.recursiveDeleteVersion = Mock()
self.protocol.recursiveDeleteVersion.return_value = True
result = self.protocol.protocol(params)
self.protocol.recursiveDeleteVersion.assert_called_once_with(aux['lista'][0])
self.assertEquals('true',result)
"""
##################################################################################################################################################
TEST CALENDAR
##################################################################################################################################################
"""
"""
method: protocol
when: called
with: typeDeleteEventAndList
should: deleteCorrect
"""
def test_protocol_called_typeDeleteEventAndList_deleteCorrect(self):
params = '{"type":"deleteEvent" , "lista":[{"type":"event","user_eyeos": "eyeos","calendar": "personal", "status":"DELETED" ,"isallday":"0", "timestart": "201419160000", "timeend":"201419170000", "repetition": "None", "finaltype": "1", "finalvalue": "0", "subject": "Visita Médico", "location": "Barcelona", "description": "Llevar justificante"},{"type":"event","user_eyeos": "eyeos","calendarid": "eyeID_Calendar_2b", "isallday": "1", "timestart": "201420160000", "timeend":"201420170000", "repetition": "None", "finaltype": "1", "finalvalue": "0", "subject": "Excursión", "location": "Girona", "description": "Mochila"}]}'
aux = json.loads(params)
self.protocol.deleteEvent = Mock()
self.protocol.deleteEvent.return_value = True
result = self.protocol.protocol(params)
self.protocol.deleteEvent.assert_called_once_with(aux['lista'])
self.assertEquals("true",result)
"""
method: protocol
when: called
with: typeUpdateEventAndList
should: updateCorrect
"""
def test_protocol_called_typeUpdateEventAndList_updateCorrect(self):
params = '{"type":"updateEvent" , "lista":[{"type":"event","user_eyeos": "eyeos","calendar": "personal", "status":"CHANGED", "isallday":"0", "timestart": "201419160000", "timeend":"201419170000", "repetition": "None", "finaltype": "1", "finalvalue": "0", "subject": "Visita Médico", "location": "Barcelona", "description": "Llevar justificante"},{"type":"event","user_eyeos": "eyeos","calendarid": "eyeID_Calendar_2b", "isallday": "1", "timestart": "201420160000", "timeend":"201420170000", "repetition": "None", "finaltype": "1", "finalvalue": "0", "subject": "Excursión", "location": "Girona", "description": "Mochila"}]}'
aux = json.loads(params)
self.protocol.updateEvent = Mock()
self.protocol.updateEvent.return_value = True
result = self.protocol.protocol(params)
self.protocol.updateEvent.assert_called_once_with(aux['lista'])
self.assertEquals("true",result)
"""
method: protocol
when: called
with: typeSelectEventAndList
should: return Array
"""
def test_protocol_called_typeSelectEventAndList_returnArray(self):
params = '{"type":"selectEvent","lista":[{"type":"event","user_eyeos":"eyeos","calendar":"personal"}]}'
aux = json.loads(params)
self.protocol.selectEvent = Mock()
self.protocol.selectEvent.return_value = []
result = self.protocol.protocol(params)
self.protocol.selectEvent.assert_called_once_with("event","eyeos","personal")
self.assertEquals("[]",result)
"""
method: protocol
when: called
with: typeInsertEventAndList
should: insertCorrect
"""
def test_protocol_called_typeInsertEventAndList_insertCorrect(self):
params = '{"type":"insertEvent" , "lista":[{"type":"event","user_eyeos": "eyeos","calendar": "personal", "status":"NEW", "isallday":"0", "timestart": "201419160000", "timeend":"201419170000", "repetition": "None", "finaltype": "1", "finalvalue": "0", "subject": "Visita Médico", "location": "Barcelona", "description": "Llevar justificante"},{"type":"event","user_eyeos": "eyeos","calendarid": "eyeID_Calendar_2b", "isallday": "1", "timestart": "201420160000", "timeend":"201420170000", "repetition": "None", "finaltype": "1", "finalvalue": "0", "subject": "Excursión", "location": "Girona", "description": "Mochila"}]}'
aux = json.loads(params)
self.protocol.insertEvent = Mock()
self.protocol.insertEvent.return_value = True
result = self.protocol.protocol(params)
self.protocol.insertEvent.assert_called_once_with(aux['lista'])
self.assertEquals("true",result)
"""
method: protocol
when: called
with: typeInsertCalendarAndList
should: insertCorrect
"""
def test_protocol_called_typeInsertCalendarAndList_insertCorrect(self):
params = '{"type":"insertCalendar" , "lista":[{"type":"calendar","user_eyeos": "eyeos","name": "personal", "status":"NEW","description":"personal calendar","timezone":0}]}'
aux = json.loads(params)
self.protocol.insertCalendar = Mock()
self.protocol.insertCalendar.return_value = True
result = self.protocol.protocol(params)
self.protocol.insertCalendar.assert_called_once_with(aux['lista'])
self.assertEquals("true",result)
"""
method: protocol
when: called
with: typeDeleteCalendarAndList
should: deleteCorrect
"""
def test_protocol_called_typeDeleteCalendarAndList_deleteCorrect(self):
params = '{"type":"deleteCalendar" , "lista":[{"type":"calendar","user_eyeos": "eyeos","name": "personal"}]}'
aux = json.loads(params)
self.protocol.deleteCalendar = Mock()
self.protocol.deleteCalendar.return_value = True
result = self.protocol.protocol(params)
self.protocol.deleteCalendar.assert_called_once_with(aux['lista'])
self.assertEquals("true",result)
"""
method: protocol
when: called
with: typeSelectCalendarAndList
should: returnArray
"""
def test_protocol_called_typeSelectCalendarAndList_returnArray(self):
params = '{"type":"selectCalendar" , "lista":[{"type":"calendar","user_eyeos": "eyeos"}]}'
aux = json.loads(params)
self.protocol.selectCalendar = Mock()
self.protocol.selectCalendar.return_value = []
result = self.protocol.protocol(params)
self.protocol.selectCalendar.assert_called_once_with(aux['lista'][0])
self.assertEquals("[]",result)
"""
method: protocol
when: called
with: typeUpdateCalendarAndList
should: updateCorrect
"""
def test_protocol_called_typeUpdateCalendarAndList_updateCorrect(self):
params = '{"type":"updateCalendar" , "lista":[{"type":"calendar","user_eyeos": "eyeos","name":"personal","description":"personal calendar","timezone":0,"status":"CHANGED"}]}'
aux = json.loads(params)
self.protocol.updateCalendar = Mock()
self.protocol.updateCalendar.return_value = True
result = self.protocol.protocol(params)
self.protocol.updateCalendar.assert_called_once_with(aux['lista'])
self.assertEquals("true",result)
"""
method: protocol
when: called
with: typeDeleteCalendarUserAndList
should: deleteCorrect
"""
def test_protocol_called_typeDeleteCalendarUserAndList_deleteCorrect(self):
params = '{"type":"deleteCalendarUser","lista":[{"user_eyeos":"eyeos"}]}'
self.protocol.deleteCalendarUser = Mock()
self.protocol.deleteCalendarUser.return_value = True
result = self.protocol.protocol(params)
self.protocol.deleteCalendarUser.assert_called_once_with("eyeos")
self.assertEquals('true',result)
"""
method: protocol
when: called
with: selectCalendarsAndEventsAndList
should: returnArray
"""
def test_protocol_called_selectCalendarsAndEventsAndList_returnArray(self):
params = '{"type":"selectCalendarsAndEvents","lista":[{"user_eyeos":"eyeos"}]}'
self.protocol.selectCalendarsAndEvents = Mock()
self.protocol.selectCalendarsAndEvents.return_value = []
result = self.protocol.protocol(params)
self.protocol.selectCalendarsAndEvents.assert_called_once_with("eyeos")
self.assertEquals('[]',result)
"""
##################################################################################################################################################
TEST LOCK FILE
##################################################################################################################################################
"""
"""
method: protocol
when: called
with: typeGetMetadataFileAndList
should: returnArray
"""
def test_protocol_called_typeGetMetadataFileAndList_returnArray(self):
params = '{"type":"getMetadataFile","lista":[{"id":"124568","cloud":"Stacksync"}]}'
self.protocol.getMetadataFile = Mock()
self.protocol.getMetadataFile.return_value = []
result = self.protocol.protocol(params)
self.protocol.getMetadataFile.assert_called_once_with("124568","Stacksync")
self.assertEquals('[]',result)
"""
method: protocol
when: called
with: typeLockFileAndList
should: correctBlock
"""
def test_protocol_called_typeLockFileAndList_returnCorrectBlock(self):
params = '{"type":"lockFile","lista":[{"id":"124568","cloud":"Stacksync","username":"eyeos","IpServer":"192.168.56.101","datetime":"2015-05-12 10:50:00","status":"open","timeLimit":10}]}'
aux = json.loads(params)
self.protocol.lockFile = Mock()
self.protocol.lockFile.return_value = True
result = self.protocol.protocol(params)
self.protocol.lockFile.assert_called_once_with(aux['lista'][0])
self.assertEquals('true',result)
"""
method: protocol
when: called
with: typeUpdateDateTimeAndList
should: updateCorrect
"""
def test_protocol_called_typeUpdateDateTimeAndList_returnCorrectBlock(self):
params = '{"type":"updateDateTime","lista":[{"id":"124568","cloud":"Stacksync","username":"eyeos","IpServer":"192.168.56.101","datetime":"2015-05-12 10:50:00","status":"open"}]}'
aux = json.loads(params)
self.protocol.updateDateTime = Mock()
self.protocol.updateDateTime.return_value = True
result = self.protocol.protocol(params)
self.protocol.updateDateTime.assert_called_once_with(aux['lista'][0])
self.assertEquals('true',result)
"""
method: protocol
when: called
with: typeUnLockFileAndList
should: returnCorrectUnBlock
"""
def test_protocol_called_typeUnLockFileAndList_returnCorrectUnBlock(self):
params = '{"type":"unLockFile","lista":[{"id":"124568","cloud":"Stacksync","username":"eyeos","IpServer":"192.168.56.101","datetime":"2015-05-12 10:50:00","status":"close"}]}'
aux = json.loads(params)
self.protocol.unLockFile = Mock()
self.protocol.unLockFile.return_value = True
result = self.protocol.protocol(params)
self.protocol.unLockFile.assert_called_once_with(aux['lista'][0])
self.assertEquals('true',result)
|
cloudspaces/eyeos-u1db
|
eyeos/extern/u1db/ProtocolTest.py
|
Python
|
agpl-3.0
| 25,065
|
# Copyright (C) 2020 Terrabit
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
from . import test_ro_city
|
OCA/l10n-romania
|
l10n_ro_city/tests/__init__.py
|
Python
|
agpl-3.0
| 126
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# res_partner
# Copyright (c) 2013 Codeback Software S.L. (http://codeback.es)
# @author: Miguel García <miguel@codeback.es>
# @author: Javier Fuentes <javier@codeback.es>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import fields, osv
from datetime import datetime, timedelta
from openerp.tools.translate import _
class product_product(osv.osv):
"""añadimos los nuevos campos"""
_name = "product.product"
_inherit = "product.product"
_columns = {
'web_visible': fields.boolean(string='Web Visible')
}
|
codeback/openerp-cbk_product_web_visible
|
product.py
|
Python
|
agpl-3.0
| 1,398
|
# This file is part of VoltDB.
# Copyright (C) 2008-2014 VoltDB Inc.
#
# This file contains original code and/or modifications of original code.
# Any modifications made by VoltDB Inc. are licensed under the following
# terms and conditions:
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
@VOLT.Command(
bundles = VOLT.ServerBundle('create',
needs_catalog=True,
supports_live=False,
default_host=True,
safemode_available=False,
supports_daemon=True),
options = (
# Hidden option to restore the hashinator in addition to the tables.
VOLT.BooleanOption('-r', '--replica', 'replica', 'start replica cluster', default = False),
),
description = 'Start a new, empty database.'
)
def create(runner):
runner.go()
|
eoneil1942/voltdb-4.7fix
|
lib/python/voltcli/voltdb.d/create.py
|
Python
|
agpl-3.0
| 1,894
|
from haystack.indexes import SearchIndex, CharField
from haystack import site
from serie.models import Serie
class SerieIndex(SearchIndex):
text = CharField(document=True, use_template=True)
name = CharField(model_attr='name')
name_es = CharField(model_attr='name_es')
name_en = CharField(model_attr='name_en')
site.register(Serie, SerieIndex)
|
alabs/petateca
|
petateca/apps/serie/search_indexes.py
|
Python
|
agpl-3.0
| 363
|
# -*- coding: utf-8 -*-
# Django settings for basic pinax project.
import os.path
import posixpath
import pinax
PINAX_ROOT = os.path.abspath(os.path.dirname(pinax.__file__))
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
# tells Pinax to use the default theme
PINAX_THEME = "default"
DEBUG = False
TEMPLATE_DEBUG = DEBUG
# tells Pinax to serve media through the staticfiles app.
SERVE_MEDIA = DEBUG
INTERNAL_IPS = [
"127.0.0.1",
]
ADMINS = [
# ("Your Name", "your_email@domain.com"),
]
MANAGERS = ADMINS
# Local time zone for this installation. Choices can be found here:
# http://www.postgresql.org/docs/8.1/static/datetime-keywords.html#DATETIME-TIMEZONE-SET-TABLE
# although not all variations may be possible on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = "US/Eastern"
# Language code for this installation. All choices can be found here:
# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
# http://blogs.law.harvard.edu/tech/stories/storyReader$15
LANGUAGE_CODE = "en"
USE_I18N = True
ugettext = lambda s: s
LANGUAGES = (
('en', u'English'),
('it', u'Italiano'),
)
CMS_LANGUAGES = LANGUAGES
# Make English the default language
DEFAULT_LANGUAGE = 1
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'site_media', 'media')
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com"
MEDIA_URL = '/site_media/media/'
# Absolute path to the directory that holds static files like app media.
# Example: "/home/media/media.lawrence.com/apps/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'site_media', 'static')
# URL that handles the static files like app media.
# Example: "http://media.lawrence.com"
STATIC_URL = '/site_media/static/'
# Additional directories which hold static files
STATICFILES_DIRS = (
('basic071', os.path.join(PROJECT_ROOT, 'media')),
('pinax', os.path.join(PINAX_ROOT, 'media', PINAX_THEME)),
)
ADMIN_MEDIA_PREFIX = posixpath.join(STATIC_URL, "admin/")
# 1.2
#MEDIA_ROOT = os.path.join(PROJECT_ROOT, "site_media", "media")
#MEDIA_URL = "/site_media/media/"
OLWIDGET_MEDIA_URL = "/site_media/static/olwidget/"
#STATIC_ROOT = os.path.join(PROJECT_ROOT, "site_media", "static")
#STATIC_URL = "/site_media/static/"
#STATICFILES_DIRS = [
# os.path.join(PROJECT_ROOT, "media"),
# os.path.join(PINAX_ROOT, "media", PINAX_THEME),
#]
#ADMIN_MEDIA_PREFIX = posixpath.join(STATIC_URL, "admin/")
# Make this unique, and don"t share it with anybody.
SECRET_KEY = "wdsk$eseb7-11y_kb%r$j)%azk-0&l*v#q0$j0d2e%aqcna+l$"
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = [
"django.template.loaders.filesystem.load_template_source",
"django.template.loaders.app_directories.load_template_source",
]
MIDDLEWARE_CLASSES = [
'django.middleware.cache.UpdateCacheMiddleware',
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
#"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django_openid.consumer.SessionConsumer",
#"django.contrib.messages.middleware.MessageMiddleware",
"middleware.LocaleMiddleware",
"django.middleware.doc.XViewMiddleware",
"pagination.middleware.PaginationMiddleware",
"pinax.middleware.security.HideSensistiveFieldsMiddleware",
'middleware.MultilingualURLMiddleware',
'middleware.DefaultLanguageMiddleware',
#"debug_toolbar.middleware.DebugToolbarMiddleware",
'django.middleware.cache.FetchFromCacheMiddleware',
"flatpages.middleware.FlatpageFallbackMiddleware",
]
ROOT_URLCONF = "urls"
TEMPLATE_DIRS = [
os.path.join(PROJECT_ROOT, "templates"),
os.path.join(PINAX_ROOT, "templates", PINAX_THEME),
]
TEMPLATE_CONTEXT_PROCESSORS = [
"django.core.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.request",
#"django.contrib.messages.context_processors.messages",
"pinax.core.context_processors.pinax_settings",
"notification.context_processors.notification",
"announcements.context_processors.site_wide_announcements",
"account.context_processors.openid",
"account.context_processors.account",
"multilingual.context_processors.multilingual",
]
INSTALLED_APPS = [
# included
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
#"django.contrib.messages",
"django.contrib.humanize",
"django.contrib.gis",
"django.contrib.sitemaps",
"flatpages",
"pinax.templatetags",
# external
"notification", # must be first
"django_openid",
"emailconfirmation",
"mailer",
"announcements",
"pagination",
"timezones",
"ajax_validation",
"uni_form",
"staticfiles",
#"debug_toolbar",
#added to basic_project
"django_extensions",
"tagging",
# internal (for now)
"basic_profiles",
"account",
"signup_codes",
"about",
# non-pinax
"rosetta",
# ours
"olwidget",
"attractions",
"django_extensions",
"multilingual",
]
#MESSAGE_STORAGE = "django.contrib.messages.storage.session.SessionStorage"
ABSOLUTE_URL_OVERRIDES = {
"auth.user": lambda o: "/profiles/profile/%s/" % o.username,
}
MARKUP_FILTER_FALLBACK = "none"
MARKUP_CHOICES = [
("restructuredtext", u"reStructuredText"),
("textile", u"Textile"),
("markdown", u"Markdown"),
("creole", u"Creole"),
]
WIKI_MARKUP_CHOICES = MARKUP_CHOICES
AUTH_PROFILE_MODULE = "basic_profiles.Profile"
NOTIFICATION_LANGUAGE_MODULE = "account.Account"
ACCOUNT_OPEN_SIGNUP = True
ACCOUNT_REQUIRED_EMAIL = False
ACCOUNT_EMAIL_VERIFICATION = False
ACCOUNT_EMAIL_AUTHENTICATION = False
ACCOUNT_UNIQUE_EMAIL = EMAIL_CONFIRMATION_UNIQUE_EMAIL = False
if ACCOUNT_EMAIL_AUTHENTICATION:
AUTHENTICATION_BACKENDS = [
"account.auth_backends.EmailModelBackend",
]
else:
AUTHENTICATION_BACKENDS = [
"django.contrib.auth.backends.ModelBackend",
]
EMAIL_CONFIRMATION_DAYS = 2
EMAIL_DEBUG = DEBUG
CONTACT_EMAIL = ""
SITE_NAME = ""
LOGIN_URL = "/account/login/"
LOGIN_REDIRECT_URLNAME = "what_next"
DEBUG_TOOLBAR_CONFIG = {
"INTERCEPT_REDIRECTS": False,
}
LANGUAGE_HREF_IGNORES = ['sitemap']
# local_settings.py can be used to override environment-specific settings
# like database and email that differ between development and production.
try:
from local_settings import *
except ImportError:
pass
FORCE_LOWERCASE_TAGS = True
#CACHE_BACKEND = "memcached://127.0.0.1:11211/"
#CACHE_MIDDLEWARE_SECONDS = 10000
#CACHE_MIDDLEWARE_KEY_PREFIX = 'cittadelcapo'
#CACHE_MIDDLEWARE_ANONYMOUS_ONLY = True
|
markherringer/waywayd
|
settings.py
|
Python
|
agpl-3.0
| 7,189
|
from django_restapi.model_resource import Collection
from django_restapi.responder import XMLResponder
from django_restapi.resource import Resource
from django_restapi.authentication import *
from django.contrib.auth.models import User
from django.shortcuts import render_to_response,get_object_or_404
from uwcs_website.games.models import Game
#class UserEntry(Resource):
# def read(self, request, user_id):
# context = {'friendship':get_object_or_404(}
# return render_to_response('xml/user.xml', context)
xml_user = Collection(
queryset = User.objects.all(),
permitted_methods = ('GET',),
expose_fields = ('first_name','last_name','is_staff'),
responder = XMLResponder(),
authentication = HttpBasicAuthentication()
)
xml_games = Collection(
queryset = Game.objects.all(),
permitted_methods = ('GET',),
responder = XMLResponder(),
)
|
UWCS/uwcs-website
|
uwcs_website/rest.py
|
Python
|
agpl-3.0
| 889
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author Vincent Renaville. Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{"name": "Tax analysis",
"version": "8.0.1.0.0",
"depends": ["base", "account"],
"author": "Camptocamp SA,Odoo Community Association (OCA)",
"category": 'Accounting & Finance',
"website": "http://www.camptocamp.com",
"license": "AGPL-3",
"data": ["account_tax_analysis_view.xml"],
'installable': False,
"active": False,
}
|
OpenPymeMx/account-financial-tools
|
account_tax_analysis/__openerp__.py
|
Python
|
agpl-3.0
| 1,265
|
from django import forms
from utils import setting_handler
from core.homepage_elements.about import plugin_settings
class AboutForm(forms.Form):
title = forms.CharField(
help_text='The title of the about block eg. "About this Journal"',
)
description = forms.CharField(
widget=forms.Textarea,
help_text='A general description of the journal.',
)
def save(self, journal, commit=True):
title = self.cleaned_data.get('title')
description = self.cleaned_data.get('description')
if commit:
setting_handler.save_plugin_setting(
plugin_settings.get_self(),
'about_title',
title,
journal,
)
setting_handler.save_setting(
'general',
'journal_description',
journal,
description,
)
|
BirkbeckCTP/janeway
|
src/core/homepage_elements/about/forms.py
|
Python
|
agpl-3.0
| 921
|
# Generated by Django 2.2.20 on 2021-07-08 14:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cornerstone', '0006_auto_20191001_0742'),
]
operations = [
migrations.AlterField(
model_name='cornerstoneenterprisecustomerconfiguration',
name='catalogs_to_transmit',
field=models.TextField(blank=True, help_text='A comma-separated list of catalog UUIDs to transmit. If blank, all customer catalogs will be transmitted. If there are overlapping courses in the customer catalogs, the overlapping course metadata will be selected from the newest catalog.', null=True),
),
migrations.AlterField(
model_name='historicalcornerstoneenterprisecustomerconfiguration',
name='catalogs_to_transmit',
field=models.TextField(blank=True, help_text='A comma-separated list of catalog UUIDs to transmit. If blank, all customer catalogs will be transmitted. If there are overlapping courses in the customer catalogs, the overlapping course metadata will be selected from the newest catalog.', null=True),
),
]
|
edx/edx-enterprise
|
integrated_channels/cornerstone/migrations/0007_auto_20210708_1446.py
|
Python
|
agpl-3.0
| 1,174
|
'''
Created on 04.12.2017
@author: sebastian
'''
from flask import render_template
from flask import redirect
from requests.exceptions import ConnectionError
from butler_offline.viewcore.state import persisted_state
from butler_offline.viewcore import request_handler
from butler_offline.viewcore import viewcore
from butler_offline.viewcore.base_html import set_error_message
import random
DATABASE_VERSION = 0
SESSION_RANDOM = str(random.random())
REDIRECTOR = lambda x: redirect(x, code=301)
RENDER_FULL_FUNC = render_template
BASE_THEME_PATH = 'theme/'
REDIRECT_KEY = 'redirect_to'
def handle_request(request, request_action, html_base_page):
if request.method == 'POST' and 'ID' in request.values:
print('transactional request found')
if request.values['ID'] != current_key():
print('transaction rejected (requested:' + current_key() + ", got:" + request.values['ID'] + ')')
context = viewcore.generate_base_context('Fehler')
rendered_content = request_handler.RENDER_FULL_FUNC(theme('core/error_race.html'), **{})
context['content'] = rendered_content
return request_handler.RENDER_FULL_FUNC(theme('index.html'), **context)
print('transaction allowed')
request_handler.DATABASE_VERSION = request_handler.DATABASE_VERSION + 1
print('new db version: ' + str(request_handler.DATABASE_VERSION))
context = viewcore.generate_base_context('Fehler')
try:
context = request_action(request)
persisted_state.save_tainted()
except ConnectionError as err:
set_error_message(context, 'Verbindung zum Server konnte nicht aufgebaut werden.')
context['%Errortext'] = ''
except Exception as e:
set_error_message(context, 'Ein Fehler ist aufgetreten: \n ' + str(e))
print(e)
context['%Errortext'] = ''
if request.method == 'POST' and 'redirect' in request.values:
return request_handler.REDIRECTOR('/' + str(request.values['redirect']) + '/')
if '%Errortext' in context:
rendered_content = context['%Errortext']
elif REDIRECT_KEY in context:
return REDIRECTOR(context[REDIRECT_KEY])
else:
if 'special_page' in context:
html_base_page = context['special_page']
rendered_content = request_handler.RENDER_FULL_FUNC(theme(html_base_page), **context)
context['content'] = rendered_content
response = request_handler.RENDER_FULL_FUNC(theme('index.html'), **context)
return response
def create_redirect_context(url):
return {
REDIRECT_KEY: url
}
def theme(page):
return request_handler.BASE_THEME_PATH + page
def current_key():
return request_handler.SESSION_RANDOM + ' ' + persisted_state.database_instance().name + '_VERSION_' + str(request_handler.DATABASE_VERSION)
def stub_me():
request_handler.RENDER_FULL_FUNC = full_render_stub
request_handler.REDIRECTOR = lambda x: x
def stub_me_theme():
request_handler.RENDER_FULL_FUNC = full_render_stub_theme
request_handler.REDIRECTOR = lambda x: x
def full_render_stub(theme, **context):
return context
def full_render_stub_theme(theme, **context):
if not 'content' in context:
return theme
return context
|
RosesTheN00b/BudgetButlerWeb
|
butler_offline/viewcore/request_handler.py
|
Python
|
agpl-3.0
| 3,268
|
# Based on https://bitbucket.org/jokull/django-email-login/
from django.contrib.auth import authenticate
from django.contrib.auth import forms as auth_forms
from django.contrib.auth.models import User
from django import forms
from django.utils.translation import ugettext_lazy as _
attrs_dict = {'class': 'required'}
class RegistrationForm(forms.Form):
email = forms.EmailField(
widget=forms.TextInput(attrs=dict(attrs_dict, maxlength=75)),
label=_("Email"))
password1 = forms.CharField(
widget=forms.PasswordInput(attrs=attrs_dict, render_value=False),
label=_("Password"))
password2 = forms.CharField(
widget=forms.PasswordInput(attrs=attrs_dict, render_value=False),
label=_("Password (again)"))
def clean(self):
"""
Verifiy that the values entered into the two password fields
match. Note that an error here will end up in
``non_field_errors()`` because it doesn't apply to a single
field.
"""
if ('password1' in self.cleaned_data
and 'password2' in self.cleaned_data):
if (self.cleaned_data['password1'] !=
self.cleaned_data['password2']):
raise forms.ValidationError(_(
"The two password fields didn't match."))
return self.cleaned_data
def clean_email(self):
"""
Validate that the supplied email address is unique for the
site.
"""
if User.objects.filter(email__iexact=self.cleaned_data['email']):
raise forms.ValidationError(_(
"This email address is already in use. "
"Please supply a different email address."))
return self.cleaned_data['email']
class AuthenticationForm(forms.Form):
"""
Base class for authenticating users. Extend this to get a form that accepts
username/password logins.
"""
email = forms.EmailField(label=_("Email"), max_length=75)
password = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
def __init__(self, request=None, *args, **kwargs):
"""
If request is passed in, the form will validate that cookies are
enabled. Note that the request (a HttpRequest object) must have set a
cookie with the key TEST_COOKIE_NAME and value TEST_COOKIE_VALUE before
running this validation.
"""
self.request = request
self.user_cache = None
super(AuthenticationForm, self).__init__(*args, **kwargs)
def clean(self):
email = self.cleaned_data.get('email')
password = self.cleaned_data.get('password')
if email and password:
self.user_cache = authenticate(email=email, password=password)
if self.user_cache is None:
raise forms.ValidationError(_(
"Please enter a correct username and password. "
"Note that both fields are case-sensitive."))
elif not self.user_cache.is_active:
raise forms.ValidationError(_("This account is inactive."))
return self.cleaned_data
def get_user_id(self):
if self.user_cache:
return self.user_cache.id
return None
def get_user(self):
return self.user_cache
class PasswordResetForm(auth_forms.PasswordResetForm):
def __init__(self, *args, **kwargs):
auth_forms.PasswordResetForm.__init__(self, *args, **kwargs)
self.fields['email'].label = 'Email'
|
datagutten/comics
|
comics/accounts/forms.py
|
Python
|
agpl-3.0
| 3,543
|
# Ariane ProcOS plugin
# Gears
#
# Copyright (C) 2015 echinopsii
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import socket
import threading
import time
import timeit
import traceback
from sys import platform as _platform
import subprocess
from ariane_clip3.exceptions import ArianeMessagingTimeoutError
from ariane_clip3.mapping import ContainerService, Container, NodeService, Node, Endpoint, EndpointService, Transport, \
Link, LinkService, Gate, GateService
from ariane_clip3.directory import LocationService, Location, RoutingAreaService, RoutingArea, OSInstanceService,\
OSInstance, SubnetService, Subnet, IPAddressService, IPAddress, EnvironmentService, Environment, TeamService, Team,\
OSTypeService, OSType, Company, CompanyService, NICService, NIC
from ariane_clip3.injector import InjectorGearSkeleton
from ariane_procos.components import SystemComponent
from ariane_procos.config import RoutingAreaConfig, SubnetConfig
from ariane_procos.system import NetworkInterfaceCard, MapSocket
from ariane_clip3.domino import DominoActivator
__author__ = 'mffrench'
LOGGER = logging.getLogger(__name__)
class DirectoryGear(InjectorGearSkeleton):
def __init__(self):
LOGGER.debug("DirectoryGear.__init__")
super(DirectoryGear, self).__init__(
gear_id='ariane.community.plugin.procos.gears.cache.directory_gear@' + str(SystemGear.hostname),
gear_name='procos_directory_gear@' + str(SystemGear.hostname),
gear_description='Ariane ProcOS directory gear for ' + str(SystemGear.hostname),
gear_admin_queue='ariane.community.plugin.procos.gears.cache.directory_gear@' + str(SystemGear.hostname),
running=False
)
self.update_count = 0
self.is_network_sync_possible = True
self.current_possible_network = []
def on_start(self):
LOGGER.debug("DirectoryGear.on_start")
self.running = True
self.cache(running=self.running)
def on_stop(self):
LOGGER.debug("DirectoryGear.on_stop")
if self.running:
self.running = False
self.cache(running=self.running)
def on_failure(self, exception_type, exception_value, traceback_):
LOGGER.debug("DirectoryGear.on_failure")
LOGGER.error("DirectoryGear.on_failure - " + exception_type.__str__() + "/" + exception_value.__str__())
LOGGER.error("DirectoryGear.on_failure - " + traceback_.format_exc())
if self.running:
self.running = False
self.cache(running=self.running)
def gear_start(self):
LOGGER.debug("DirectoryGear.gear_start")
self.on_start()
LOGGER.info('procos_directory_gear@' + str(SystemGear.hostname) + ' has been started.')
def gear_stop(self):
LOGGER.debug("DirectoryGear.gear_stop")
if self.running:
self.running = False
self.cache(running=self.running)
LOGGER.info('procos_directory_gear@' + str(SystemGear.hostname) + ' has been stopped.')
def compute_current_possible_network(self, operating_system):
LOGGER.debug("DirectoryGear.compute_current_possible_network")
# Find current Location, routing area and subnets according to runtime IP on NICs and possible locations:
current_possible_location_config = []
current_possible_routing_area_config = []
current_possible_subnet_config = []
current_possible_remote_vpn_location_config = []
current_possible_remote_vpn_routing_area_config = []
current_possible_remote_vpn_subnet_config = []
local_routing_area = SystemGear.config.local_routing_area
if local_routing_area is not None:
local_routing_area.name = SystemGear.hostname+".local"
local_routing_area.description = SystemGear.hostname+".local routing area"
local_routing_area.multicast = RoutingArea.RA_MULTICAST_NOLIMIT
local_routing_area.ra_type = RoutingArea.RA_TYPE_VIRT
else:
local_routing_area = RoutingAreaConfig(
name=SystemGear.hostname+".local",
description=SystemGear.hostname+".local routing area",
multicast=RoutingArea.RA_MULTICAST_NOLIMIT,
ra_type=RoutingArea.RA_TYPE_VIRT
)
local_virt_subnet_config = []
for nic in operating_system.nics:
nic_is_located = False
LOGGER.debug("DirectoryGear.compute_current_possible_network - current nic: " + str(nic))
try:
if nic.ipv4_address is not None:
if not nic.ipv4_address.startswith('127'):
for location_config in SystemGear.config.potential_locations:
LOGGER.debug("DirectoryGear.compute_current_possible_network - current loc config: " +
str(location_config))
for routing_area_config in location_config.routing_areas:
LOGGER.debug("DirectoryGear.compute_current_possible_network - current RA config: " +
str(routing_area_config))
for subnet_config in routing_area_config.subnets:
LOGGER.debug("DirectoryGear.compute_current_possible_network - "
"current SUB config: " + str(subnet_config))
if NetworkInterfaceCard.ip_is_in_subnet(nic.ipv4_address,
subnet_config.subnet_ip,
subnet_config.subnet_mask):
if routing_area_config.type == RoutingArea.RA_TYPE_VPN:
current_possible_remote_vpn_location_config.append(location_config)
current_possible_remote_vpn_routing_area_config.append(routing_area_config)
current_possible_remote_vpn_subnet_config.append(subnet_config)
else:
if location_config not in current_possible_location_config:
current_possible_location_config.append(location_config)
current_possible_routing_area_config.append(routing_area_config)
current_possible_subnet_config.append(subnet_config)
nic_is_located = True
current_fqdn = MapSocket.get_cached_hostbyaddr(nic.ipv4_address)
if current_fqdn is not None:
SystemGear.fqdn = current_fqdn
break
if nic_is_located:
break
if nic_is_located:
break
if not nic_is_located:
for subnet_config in local_routing_area.subnets:
LOGGER.debug("DirectoryGear.compute_current_possible_network - "
"current local RA subnet config: " + str(subnet_config))
if NetworkInterfaceCard.ip_is_in_subnet(nic.ipv4_address,
subnet_config.subnet_ip,
subnet_config.subnet_mask):
local_virt_subnet_config.append(subnet_config)
nic_is_located = True
break
if not nic_is_located:
if nic.mac_address is not None:
LOGGER.warn('DirectoryGear.compute_current_possible_network - '
'nic ' + nic.mac_address + '/' + nic.ipv4_address +
' has not been located on the possibles networks')
else:
LOGGER.warn('DirectoryGear.compute_current_possible_network - '
'nic ' + nic.ipv4_address +
' has not been located on the possibles networks')
except Exception as e:
print(e.__str__())
if current_possible_location_config.__len__() > 1:
LOGGER.warn('DirectoryGear.compute_current_possible_network - '
'multiple current possible location found - will ignore directories sync')
elif current_possible_location_config.__len__() == 0:
LOGGER.warn('DirectoryGear.compute_current_possible_network - '
'no current possible location found - will ignore directories sync')
if current_possible_location_config.__len__() != 1:
self.is_network_sync_possible = False
if current_possible_routing_area_config.__len__() == 0:
self.is_network_sync_possible = False
if current_possible_subnet_config.__len__() == 0:
self.is_network_sync_possible = False
if SystemGear.fqdn is None:
SystemGear.fqdn = SystemGear.hostname
LOGGER.debug("DirectoryGear.compute_current_possible_network - FQDN : " + str(SystemGear.fqdn))
self.current_possible_network = [
current_possible_location_config,
current_possible_routing_area_config,
current_possible_subnet_config,
current_possible_remote_vpn_location_config,
current_possible_remote_vpn_routing_area_config,
current_possible_remote_vpn_subnet_config,
local_routing_area,
local_virt_subnet_config
]
def sync_operating_system(self, operating_system):
LOGGER.debug("DirectoryGear.sync_operating_system")
# Sync Operating System
if operating_system.osi_id is not None:
LOGGER.debug("DirectoryGear.sync_operating_system - search by id")
SystemGear.osi = OSInstanceService.find_os_instance(osi_id=operating_system.osi_id)
if SystemGear.osi.name != SystemGear.hostname:
SystemGear.osi = None
operating_system.osi_id = None
if SystemGear.osi is None:
LOGGER.debug("DirectoryGear.sync_operating_system - search by hostname")
SystemGear.osi = OSInstanceService.find_os_instance(osi_name=SystemGear.hostname)
if SystemGear.osi is None:
SystemGear.osi = OSInstance(
name=SystemGear.hostname,
description=SystemGear.config.system_context.description,
admin_gate_uri=SystemGear.config.system_context.admin_gate_protocol+SystemGear.fqdn)
LOGGER.debug("DirectoryGear.sync_operating_system - save new osi")
SystemGear.osi.save()
operating_system.osi_id = SystemGear.osi.id
# SYNC EMBEDDING OSI
if SystemGear.config.system_context.embedding_osi_hostname is not None and \
SystemGear.config.system_context.embedding_osi_hostname:
LOGGER.debug("DirectoryGear.sync_operating_system - search embedding host by hostname")
embedding_osi = OSInstanceService.find_os_instance(
osi_name=SystemGear.config.system_context.embedding_osi_hostname
)
if embedding_osi is not None and SystemGear.osi.embedding_osi_id is not embedding_osi.id:
SystemGear.osi.embedding_osi_id = embedding_osi.id
@staticmethod
def sync_operating_system_type(operating_system):
LOGGER.debug("DirectoryGear.sync_operating_system_type")
if SystemGear.osi is None:
LOGGER.error('DirectoryGear.sync_operating_system_type - operating system instance is not synced')
return
# Sync OS Type
if operating_system.ost_id is not None and operating_system.ost_id != 0:
SystemGear.ost = OSTypeService.find_ostype(ost_id=operating_system.ost_id)
if SystemGear.ost is not None and SystemGear.osi.ost_id != SystemGear.ost.id:
SystemGear.ost = None
SystemGear.ost_company = None
SystemGear.osi.ost_id = 0
SystemGear.osi.save()
if SystemGear.ost is None:
SystemGear.ost_company = CompanyService.find_company(
cmp_name=SystemGear.config.system_context.os_type.company.name
)
if SystemGear.ost_company is None:
SystemGear.ost_company = Company(
name=SystemGear.config.system_context.os_type.company.name,
description=SystemGear.config.system_context.os_type.company.description
)
SystemGear.ost_company.save()
SystemGear.ost = OSTypeService.find_ostype(ost_name=SystemGear.config.system_context.os_type.name,
ost_arch=SystemGear.config.system_context.os_type.architecture)
if SystemGear.ost is None:
SystemGear.ost = OSType(
name=SystemGear.config.system_context.os_type.name,
architecture=SystemGear.config.system_context.os_type.architecture,
os_type_company_id=SystemGear.ost_company.id
)
SystemGear.ost.save()
if SystemGear.osi.ost_id != SystemGear.ost.id:
SystemGear.osi.ost_id = SystemGear.ost.id
SystemGear.osi.save()
operating_system.ost_id = SystemGear.ost.id
@staticmethod
def sync_environment(operating_system):
LOGGER.debug("DirectoryGear.sync_environment")
if SystemGear.osi is None:
LOGGER.error('DirectoryGear.sync_environment - operating system instance is not synced')
return
# Sync environment
if SystemGear.config.organisation_context is not None and \
SystemGear.config.organisation_context.environment is not None:
if operating_system.environment_id is not None:
SystemGear.environment = EnvironmentService.find_environment(operating_system.environment_id)
if SystemGear.environment is not None and \
SystemGear.environment.name != SystemGear.config.organisation_context.environment.name:
SystemGear.environment.del_os_instance(SystemGear.osi)
SystemGear.environment = None
operating_system.environment_id = None
if SystemGear.environment is None:
SystemGear.environment = EnvironmentService.find_environment(
env_name=SystemGear.config.organisation_context.environment.name
)
if SystemGear.environment is None:
SystemGear.environment = Environment(
name=SystemGear.config.organisation_context.environment.name,
description=SystemGear.config.organisation_context.environment.description,
color_code=SystemGear.config.organisation_context.environment.color_code
)
SystemGear.environment.save()
operating_system.environment_id = SystemGear.environment.id
SystemGear.osi.add_environment(SystemGear.environment)
else:
if operating_system.environment_id is not None:
environment = EnvironmentService.find_environment(operating_system.environment_id)
environment.del_os_instance(SystemGear.osi)
operating_system.environment_id = None
@staticmethod
def sync_team(operating_system):
LOGGER.debug("DirectoryGear.sync_team")
if SystemGear.osi is None:
LOGGER.error('DirectoryGear.sync_team - operating system instance is not synced')
return
# Sync team
if SystemGear.config.organisation_context is not None and \
SystemGear.config.organisation_context.team is not None:
if operating_system.team_id is not None:
SystemGear.team = TeamService.find_team(team_id=operating_system.team_id)
if SystemGear.team is not None and \
SystemGear.team.name != SystemGear.config.organisation_context.team.name:
SystemGear.team.del_os_instance(SystemGear.osi)
SystemGear.team = None
operating_system.team_id = None
if SystemGear.team is None:
SystemGear.team = TeamService.find_team(team_name=SystemGear.config.organisation_context.team.name)
if SystemGear.team is None:
SystemGear.team = Team(name=SystemGear.config.organisation_context.team.name,
color_code=SystemGear.config.organisation_context.team.color_code,
description=SystemGear.config.organisation_context.team.description)
SystemGear.team.save()
operating_system.team_id = SystemGear.team.id
SystemGear.osi.add_team(SystemGear.team)
else:
if operating_system.team_id is not None:
team = TeamService.find_team(team_id=operating_system.team_id)
team.del_os_instance(SystemGear.osi)
operating_system.team_id = None
def sync_network(self, operating_system):
LOGGER.debug("DirectoryGear.sync_network")
if SystemGear.osi is None:
LOGGER.error('DirectoryGear.sync_network - operating system instance is not synced')
return
# Sync network stuffs
current_possible_location_config = self.current_possible_network[0]
current_possible_routing_area_config = self.current_possible_network[1]
current_possible_subnet_config = self.current_possible_network[2]
current_possible_remote_vpn_location_config = self.current_possible_network[3]
current_possible_remote_vpn_routing_area_config = self.current_possible_network[4]
current_possible_remote_vpn_subnet_config = self.current_possible_network[5]
local_routing_area = self.current_possible_network[6]
local_virt_subnet_config = self.current_possible_network[7]
current_location = current_possible_location_config[0]
# Sync location
LOGGER.debug("DirectoryGear.sync_network - Sync location")
if operating_system.location_id is not None:
SystemGear.location = LocationService.find_location(operating_system.location_id)
if SystemGear.location is not None and SystemGear.location.name != current_location.name:
# This OS has moved
LOGGER.debug("DirectoryGear.sync_network - operating system has a new location !")
SystemGear.location = None
operating_system.location_id = None
for subnet_id in SystemGear.osi.subnet_ids:
subnet_to_unbind = SubnetService.find_subnet(sb_id=subnet_id)
if subnet_to_unbind is not None:
SystemGear.osi.del_subnet(subnet_to_unbind)
operating_system.routing_area_ids.remove(subnet_to_unbind.routing_area_id)
operating_system.subnet_ids.remove(subnet_id)
embedding_osi = OSInstanceService.find_os_instance(osi_id=SystemGear.osi.embedding_osi_id)
embedding_osi.del_embedded_osi(SystemGear.osi)
for ip_id in SystemGear.osi.ip_address_ids:
ip_to_unbind = IPAddressService.find_ip_address(ipa_id=ip_id)
if ip_to_unbind is not None:
ip_to_unbind.remove()
SystemGear.osi.sync()
if SystemGear.location is None:
SystemGear.location = LocationService.find_location(loc_name=current_location.name)
if SystemGear.location is None:
SystemGear.location = Location(name=current_location.name,
description=current_location.description,
dc_type=current_location.type,
address=current_location.address,
zip_code=current_location.zipcode,
town=current_location.town,
country=current_location.country,
gps_latitude=current_location.gps_lat,
gps_longitude=current_location.gps_lng)
SystemGear.location.save()
operating_system.location_id = SystemGear.location.id
# Sync routing areas and subnets
LOGGER.debug("DirectoryGear.sync_network - Sync ra and subnets")
for cached_routing_area_id in operating_system.routing_area_ids:
cached_routing_area = RoutingAreaService.find_routing_area(ra_id=cached_routing_area_id)
if cached_routing_area is not None:
mimic_cached_routing_area_config = RoutingAreaConfig(name=cached_routing_area.name)
if mimic_cached_routing_area_config in current_possible_routing_area_config or \
mimic_cached_routing_area_config in current_possible_remote_vpn_routing_area_config or \
mimic_cached_routing_area_config != local_routing_area:
for subnet_id in cached_routing_area.subnet_ids:
subnet = SubnetService.find_subnet(sb_id=subnet_id)
if subnet is not None:
mimic_cached_subnet_config = SubnetConfig(name=subnet.name)
if mimic_cached_subnet_config in current_possible_subnet_config or \
mimic_cached_subnet_config in current_possible_remote_vpn_subnet_config:
if subnet.id not in operating_system.subnet_ids:
operating_system.subnet_ids.append(subnet.id)
if subnet.id not in SystemGear.osi.subnet_ids:
SystemGear.osi.add_subnet(subnet)
if subnet not in SystemGear.subnets:
SystemGear.subnets.append(subnet)
if mimic_cached_subnet_config in current_possible_subnet_config:
current_possible_subnet_config.remove(mimic_cached_subnet_config)
if mimic_cached_subnet_config in current_possible_remote_vpn_subnet_config:
current_possible_remote_vpn_subnet_config.remove(mimic_cached_subnet_config)
else:
if subnet.id in operating_system.subnet_ids:
operating_system.subnet_ids.remove(subnet.id)
if subnet.id in SystemGear.osi.subnet_ids:
SystemGear.osi.del_subnet(subnet)
if subnet in SystemGear.subnets:
SystemGear.subnets.remove(subnet)
if cached_routing_area not in SystemGear.routing_areas:
SystemGear.routing_areas.append(cached_routing_area)
if mimic_cached_routing_area_config in current_possible_routing_area_config:
current_possible_routing_area_config.remove(mimic_cached_routing_area_config)
if mimic_cached_routing_area_config in current_possible_remote_vpn_routing_area_config:
current_possible_remote_vpn_routing_area_config.remove(mimic_cached_routing_area_config)
elif mimic_cached_routing_area_config != local_routing_area:
for subnet_id in cached_routing_area.subnet_ids:
subnet = SubnetService.find_subnet(sb_id=subnet_id)
if subnet is not None:
mimic_cached_subnet_config = SubnetConfig(name=subnet.name)
if mimic_cached_subnet_config in current_possible_subnet_config:
current_possible_subnet_config.remove(mimic_cached_subnet_config)
if subnet.id in operating_system.subnet_ids:
operating_system.subnet_ids.remove(subnet.id)
if subnet.id in SystemGear.osi.subnet_ids:
SystemGear.osi.del_subnet(subnet)
if subnet in SystemGear.subnets:
SystemGear.subnets.remove(subnet)
if cached_routing_area in SystemGear.routing_areas:
SystemGear.routing_areas.remove(cached_routing_area)
else:
operating_system.routing_area_ids.remove(cached_routing_area_id)
for remote_vpn_loc_config in current_possible_remote_vpn_location_config:
vpn_loc = LocationService.find_location(loc_name=remote_vpn_loc_config.name)
if vpn_loc is None:
vpn_loc = Location(
name=remote_vpn_loc_config.name,
description=remote_vpn_loc_config.description,
address=remote_vpn_loc_config.address,
zip_code=remote_vpn_loc_config.zipcode,
town=remote_vpn_loc_config.town,
country=remote_vpn_loc_config.country,
gps_latitude=remote_vpn_loc_config.gps_lat,
gps_longitude=remote_vpn_loc_config.gps_lng
)
vpn_loc.save()
for remote_routing_area_config in remote_vpn_loc_config.routing_areas:
if remote_routing_area_config in current_possible_remote_vpn_routing_area_config:
vpn_ra = RoutingAreaService.find_routing_area(ra_name=remote_routing_area_config.name)
if vpn_ra is None:
vpn_ra = RoutingArea(name=remote_routing_area_config.name,
multicast=remote_routing_area_config.multicast,
ra_type=remote_routing_area_config.type,
description=remote_routing_area_config.description)
vpn_ra.save()
vpn_ra.add_location(SystemGear.location)
vpn_ra.add_location(vpn_loc)
SystemGear.routing_areas.append(vpn_ra)
operating_system.routing_area_ids.append(vpn_ra.id)
for remote_subnet_config in remote_routing_area_config.subnets:
if remote_subnet_config in current_possible_remote_vpn_subnet_config:
vpn_subnet = SubnetService.find_subnet(sb_name=remote_subnet_config.name)
if vpn_subnet is None:
vpn_subnet = Subnet(name=remote_subnet_config.name,
description=remote_subnet_config.description,
routing_area_id=vpn_ra.id,
ip=remote_subnet_config.subnet_ip,
mask=remote_subnet_config.subnet_mask)
vpn_subnet.save()
vpn_subnet.add_location(SystemGear.location)
vpn_subnet.add_location(vpn_loc)
operating_system.subnet_ids.append(vpn_subnet.id)
SystemGear.subnets.append(vpn_subnet)
if vpn_subnet.id not in SystemGear.osi.subnet_ids:
SystemGear.osi.add_subnet(vpn_subnet)
for routing_area_config in current_possible_routing_area_config:
routing_area = RoutingAreaService.find_routing_area(ra_name=routing_area_config.name)
if routing_area is None:
routing_area = RoutingArea(name=routing_area_config.name,
multicast=routing_area_config.multicast,
ra_type=routing_area_config.type,
description=routing_area_config.description)
routing_area.save()
routing_area.add_location(SystemGear.location)
operating_system.routing_area_ids.append(routing_area.id)
SystemGear.routing_areas.append(routing_area)
for subnet_config in routing_area_config.subnets:
if subnet_config in current_possible_subnet_config:
subnet = SubnetService.find_subnet(sb_name=subnet_config.name)
if subnet is None:
subnet = Subnet(name=subnet_config.name,
description=subnet_config.description,
routing_area_id=routing_area.id,
ip=subnet_config.subnet_ip, mask=subnet_config.subnet_mask)
subnet.save()
subnet.add_location(SystemGear.location)
operating_system.subnet_ids.append(subnet.id)
SystemGear.subnets.append(subnet)
if subnet.id not in SystemGear.osi.subnet_ids:
SystemGear.osi.add_subnet(subnet)
# CLEAN LOCAL SUBNETS FIRST
LOGGER.debug("DirectoryGear.sync_network - Clean local subnets first")
for local_subnet_config in local_virt_subnet_config:
subnet = SubnetService.find_subnet(sb_name=local_subnet_config.name)
if subnet is not None:
if subnet.id in operating_system.subnet_ids:
operating_system.subnet_ids.remove(subnet.id)
if subnet in SystemGear.subnets:
SystemGear.subnets.remove(subnet)
subnet.remove()
# THEN CLEAN LOCAL RA
LOGGER.debug("DirectoryGear.sync_network - Then lean local ra")
loc_ra = RoutingAreaService.find_routing_area(ra_name=local_routing_area.name)
if loc_ra is not None:
if loc_ra.id in operating_system.routing_area_ids:
operating_system.routing_area_ids.remove(loc_ra.id)
if loc_ra in SystemGear.routing_areas:
SystemGear.routing_areas.remove(loc_ra)
loc_ra.remove()
# FINALLY REINIT LOCAL RA AND SUBNETS
LOGGER.debug("DirectoryGear.sync_network - Reinit local ra and subnets")
loc_ra = RoutingArea(name=local_routing_area.name,
multicast=local_routing_area.multicast,
ra_type=local_routing_area.type,
description=local_routing_area.description)
loc_ra.save()
loc_ra.add_location(SystemGear.location)
LOGGER.debug("DirectoryGear.sync_network - local ra reinit done")
operating_system.routing_area_ids.append(loc_ra.id)
loopback_subnet_conf = SubnetConfig(
name=SystemGear.hostname+".loopback",
description=SystemGear.hostname + " loopback subnet",
subnet_ip="127.0.0.0",
subnet_mask="255.0.0.0"
)
if loopback_subnet_conf not in local_virt_subnet_config:
local_virt_subnet_config.append(loopback_subnet_conf)
for local_subnet_config in local_virt_subnet_config:
subnet = Subnet(name=local_subnet_config.name,
description=local_subnet_config.description,
routing_area_id=loc_ra.id,
ip=local_subnet_config.subnet_ip, mask=local_subnet_config.subnet_mask)
subnet.save()
subnet.add_location(SystemGear.location)
SystemGear.osi.add_subnet(subnet)
operating_system.subnet_ids.append(subnet.id)
SystemGear.subnets.append(subnet)
LOGGER.debug("DirectoryGear.sync_network - local sn " + str(subnet) + " reinit done")
LOGGER.debug("DirectoryGear.sync_network - check former nics to be removed...")
nics_2_rm = []
for nic_id in SystemGear.osi.nic_ids:
still_here = False
nic = NICService.find_nic(nic_id=nic_id)
if nic is not None:
for sniffed_nic in operating_system.nics:
if (sniffed_nic.mac_address is None or not sniffed_nic.mac_address) or sniffed_nic.name == "lo":
nicmcaddr = sniffed_nic.ipv4_fqdn
else:
nicmcaddr = sniffed_nic.mac_address
if nic.mac_address == nicmcaddr:
still_here = True
if not still_here:
nics_2_rm.append(nic)
LOGGER.debug("DirectoryGear.sync_network - remove former nic for osi...")
for nic_2_rm in nics_2_rm:
LOGGER.debug("DirectoryGear.sync_network - getting ip attached to nic " + str(nic_2_rm))
if nic_2_rm.ipv4_fqdn:
ip_address = IPAddressService.find_ip_address(ipa_fqdn=nic_2_rm.ipv4_fqdn)
if ip_address is not None:
SystemGear.osi.del_ip_address(ip_address)
ip_address.remove()
SystemGear.osi.del_nic(nic_2_rm)
nic_2_rm.remove()
LOGGER.debug("DirectoryGear.sync_network - Sync nic")
for nic in operating_system.nics:
is_in_subnet = False
ip_address = None
LOGGER.debug("DirectoryGear.sync_network - nic: " + str(nic))
if nic.ipv4_address is not None:
if not nic.ipv4_address.startswith('127'):
for subnet in SystemGear.subnets:
LOGGER.debug("DirectoryGear.sync_network - non localhost subnet: " + str(subnet))
if NetworkInterfaceCard.ip_is_in_subnet(nic.ipv4_address, subnet.ip, subnet.mask):
ip_address = IPAddressService.find_ip_address(ipa_ip_address=nic.ipv4_address,
ipa_subnet_id=subnet.id)
if ip_address is None:
ip_address = IPAddress(ip_address=nic.ipv4_address, fqdn=nic.ipv4_fqdn,
ipa_subnet_id=subnet.id, ipa_osi_id=SystemGear.osi.id)
LOGGER.debug("DirectoryGear.sync_network - save new ip: " + str(ip_address))
ip_address.save()
subnet.sync()
else:
if ip_address.ipa_os_instance_id != SystemGear.osi.id:
ip_address.ipa_os_instance_id = SystemGear.osi.id
LOGGER.debug("DirectoryGear.sync_network - upgrade ip: " + str(ip_address))
ip_address.save()
subnet.is_default = nic.is_default
is_in_subnet = True
break
else:
loopback_subnet = SubnetService.find_subnet(sb_name=SystemGear.hostname+".loopback")
ip_address = IPAddressService.find_ip_address(ipa_ip_address=nic.ipv4_address,
ipa_subnet_id=loopback_subnet.id)
if ip_address is not None and (ip_address.fqdn != nic.ipv4_fqdn or
ip_address.ip_address != nic.ipv4_address or ip_address.ipa_subnet_id != loopback_subnet.id or
ip_address.ipa_osi_id != SystemGear.osi.id):
ip_address.remove()
ip_address = IPAddress(ip_address=nic.ipv4_address, fqdn=nic.ipv4_fqdn,
ipa_subnet_id=loopback_subnet.id, ipa_osi_id=SystemGear.osi.id)
LOGGER.debug("DirectoryGear.sync_network - upgrade ip: " + str(ip_address))
ip_address.save()
LOGGER.debug("DirectoryGear.sync_network - sync loopback subnet...")
loopback_subnet.sync()
elif ip_address is None:
ip_address = IPAddress(ip_address=nic.ipv4_address, fqdn=nic.ipv4_fqdn,
ipa_subnet_id=loopback_subnet.id, ipa_osi_id=SystemGear.osi.id)
LOGGER.debug("DirectoryGear.sync_network - save new ip: " + str(ip_address))
ip_address.save()
LOGGER.debug("DirectoryGear.sync_network - sync loopback subnet...")
loopback_subnet.sync()
is_in_subnet = True
if is_in_subnet:
if (nic.mac_address is None or not nic.mac_address) or nic.name == "lo":
nicmcaddr = nic.ipv4_fqdn
else:
nicmcaddr = nic.mac_address
if nicmcaddr is not None and nicmcaddr:
LOGGER.debug("DirectoryGear.sync_network - searching nic from mcaddr " + str(nicmcaddr))
nic2save = NICService.find_nic(nic_mac_address=nicmcaddr)
if nic2save is None:
nic2save = NIC(
name=SystemGear.hostname+"."+nic.name,
mac_address=nicmcaddr,
duplex=nic.duplex,
speed=nic.speed,
mtu=nic.mtu,
nic_osi_id=operating_system.osi_id,
nic_ipa_id=ip_address.id if ip_address is not None else None
)
LOGGER.debug("DirectoryGear.sync_network - saving new nic " + str(nicmcaddr))
nic2save.save()
else:
to_upgrade = False
if ip_address is not None and nic2save.nic_ipa_id != ip_address.id or\
ip_address is None and nic2save.nic_ipa_id != -1:
nic2save.nic_ipa_id = ip_address.id if ip_address is not None else None
to_upgrade = True
if nic2save.nic_osi_id != operating_system.osi_id:
nic2save.nic_osi_id = operating_system.osi_id
to_upgrade = True
if to_upgrade:
LOGGER.debug("DirectoryGear.sync_network - ip_address: " + str(ip_address))
LOGGER.debug("DirectoryGear.sync_network - nic2save: " + str(nic2save))
nic2save.nic_ipa_id = ip_address.id if ip_address is not None else None
nic2save.nic_osi_id = operating_system.osi_id
LOGGER.debug("DirectoryGear.sync_network - upgrading new nic " + str(nicmcaddr))
nic2save.save()
else:
LOGGER.error("DirectoryGear.sync_network - Error while saving nic : " + str(nic))
SystemGear.osi = OSInstanceService.find_os_instance(osi_id=operating_system.osi_id)
def init_ariane_directories(self, component):
LOGGER.debug("DirectoryGear.init_ariane_directories")
operating_system = component.operating_system.get()
try:
start_time = timeit.default_timer()
self.compute_current_possible_network(operating_system)
self.sync_operating_system(operating_system)
self.sync_operating_system_type(operating_system)
self.sync_environment(operating_system)
self.sync_team(operating_system)
if self.is_network_sync_possible:
self.sync_network(operating_system)
sync_proc_time = timeit.default_timer()-start_time
LOGGER.info('DirectoryGear.init_ariane_directories - time : ' + str(sync_proc_time))
except Exception as e:
LOGGER.error("DirectoryGear.init_ariane_directories - " + e.__str__())
LOGGER.debug("DirectoryGear.init_ariane_directories - " + traceback.format_exc())
def update_ariane_directories(self, operating_system):
LOGGER.debug("DirectoryGear.update_ariane_directories")
# check last / new sniff diff on nics
if self.is_network_sync_possible:
try:
if operating_system.last_nics != operating_system.nics:
self.compute_current_possible_network(operating_system)
if self.is_network_sync_possible:
self.sync_network(operating_system)
else:
LOGGER.debug('DirectoryGear.update_ariane_directories - no changes with last sniff')
except Exception as e:
LOGGER.error("DirectoryGear.update_ariane_directories - " + e.__str__())
LOGGER.debug("DirectoryGear.update_ariane_directories - " + traceback.format_exc())
else:
LOGGER.warn('DirectoryGear.update_ariane_directories - DIRECTORIES SYNC ARE IGNORED')
def synchronize_with_ariane_directories(self, component):
LOGGER.debug("DirectoryGear.synchronize_with_ariane_directories")
if self.running:
start_time = timeit.default_timer()
operating_system = component.operating_system.get()
self.update_ariane_directories(operating_system)
self.update_count += 1
sync_proc_time = timeit.default_timer()-start_time
LOGGER.info('DirectoryGear.synchronize_with_ariane_directories - time : ' + str(sync_proc_time))
else:
LOGGER.warn("DirectoryGear.synchronize_with_ariane_directories - "
"Synchronization requested but procos_directory_gear@" + str(SystemGear.hostname) +
" is not running.")
class MappingGear(InjectorGearSkeleton):
def __init__(self):
LOGGER.debug("MappingGear.__init__")
super(MappingGear, self).__init__(
gear_id='ariane.community.plugin.procos.gears.cache.mapping_gear@' + str(SystemGear.hostname),
gear_name='procos_mapping_gear@' + str(SystemGear.hostname),
gear_description='Ariane ProcOS injector gear for ' + str(SystemGear.hostname),
gear_admin_queue='ariane.community.plugin.procos.gears.cache.mapping_gear@' + str(SystemGear.hostname),
running=False
)
self.update_count = 0
self.osi_container = None
self.init_done = False
self.target_osi_cache = {}
self.cache_clean_counter = 0
self.cache_clean_counter_max = 60
def on_start(self):
LOGGER.debug("MappingGear.on_start")
self.running = True
self.cache(running=self.running)
def on_stop(self):
LOGGER.debug("MappingGear.on_stop")
if self.running:
self.running = False
self.cache(running=self.running)
def on_failure(self, exception_type, exception_value, traceback_):
LOGGER.debug("MappingGear.on_failure")
LOGGER.error("MappingGear.on_failure - " + exception_type.__str__() + "/" + exception_value.__str__())
LOGGER.error("MappingGear.on_failure - " + traceback_.format_exc())
if self.running:
self.running = False
self.cache(running=self.running)
def gear_start(self):
LOGGER.debug("MappingGear.gear_start")
self.on_start()
LOGGER.info('procos_mapping_gear@' + str(SystemGear.hostname) + ' has been started.')
def gear_stop(self):
LOGGER.debug("MappingGear.gear_stop")
if self.running:
self.running = False
self.cache(running=self.running)
LOGGER.info('procos_mapping_gear@' + str(SystemGear.hostname) + ' has been stopped.')
@staticmethod
def diff_container_network_location(container, location):
if container.properties is not None and Container.PL_MAPPING_PROPERTIES in container.properties:
return (
container.properties[Container.PL_MAPPING_PROPERTIES][Container.PL_NAME_MAPPING_FIELD] != location.name or
container.properties[Container.PL_MAPPING_PROPERTIES][Container.PL_ADDR_MAPPING_FIELD] != location.address or
container.properties[Container.PL_MAPPING_PROPERTIES][Container.PL_TOWN_MAPPING_FIELD] != location.town or
container.properties[Container.PL_MAPPING_PROPERTIES][Container.PL_CNTY_MAPPING_FIELD] != location.country or
container.properties[Container.PL_MAPPING_PROPERTIES][Container.PL_GPSA_MAPPING_FIELD] != location.gpsLatitude or
container.properties[Container.PL_MAPPING_PROPERTIES][Container.PL_GPSN_MAPPING_FIELD] != location.gpsLongitude
)
else:
return True
@staticmethod
def sync_container_network(container, location, routing_areas, subnets):
LOGGER.debug("MappingGear.sync_container_network")
if location is not None and MappingGear.diff_container_network_location(container, location):
LOGGER.debug("MappingGear.sync_container_network - add location property")
location_properties = {
Container.PL_NAME_MAPPING_FIELD: location.name,
Container.PL_ADDR_MAPPING_FIELD: location.address,
Container.PL_TOWN_MAPPING_FIELD: location.town,
Container.PL_CNTY_MAPPING_FIELD: location.country,
Container.PL_GPSA_MAPPING_FIELD: location.gpsLatitude,
Container.PL_GPSN_MAPPING_FIELD: location.gpsLongitude
}
container.add_property((Container.PL_MAPPING_PROPERTIES, location_properties))
if routing_areas is not None:
network_properties = []
for routing_area in routing_areas:
routing_area_subnets = []
for subnet in subnets:
if subnet.id in routing_area.subnet_ids:
routing_area_subnets.append(
{
Container.SUBNET_NAME_MAPPING_FIELD: subnet.name,
Container.SUBNET_IPAD_MAPPING_FIELD: subnet.ip,
Container.SUBNET_MASK_MAPPING_FIELD: subnet.mask,
Container.SUBNET_ISDEFAULT_MAPPING_FIELD: subnet.is_default
}
)
if routing_area_subnets.__len__() > 0:
network_properties.append(
{
Container.RAREA_NAME_MAPPING_FIELD: routing_area.name,
Container.RAREA_MLTC_MAPPING_FIELD: routing_area.multicast,
Container.RAREA_TYPE_MAPPING_FIELD: routing_area.type,
Container.RAREA_SUBNETS: routing_area_subnets
})
else:
network_properties.append(
{
Container.RAREA_NAME_MAPPING_FIELD: routing_area.name,
Container.RAREA_MLTC_MAPPING_FIELD: routing_area.multicast,
Container.RAREA_TYPE_MAPPING_FIELD: routing_area.type
})
if network_properties.__len__() > 0:
LOGGER.debug("MappingGear.sync_container_network - add network property")
container.add_property((Container.NETWORK_MAPPING_PROPERTIES, network_properties))
if _platform == "linux" or _platform == "linux2":
bytes_ = subprocess.check_output(['cat', '/proc/sys/net/ipv4/ip_forward'])
if '1' in str(bytes_):
container.add_property((Container.OSI_IS_ROUTER_FIELD, True))
else:
container.add_property((Container.OSI_IS_ROUTER_FIELD, False))
@staticmethod
def diff_container_team(container, team):
if container.properties is not None and Container.TEAM_SUPPORT_MAPPING_PROPERTIES in container.properties:
try:
ret = container.properties[Container.TEAM_SUPPORT_MAPPING_PROPERTIES][Container.TEAM_NAME_MAPPING_FIELD] != team.name or \
container.properties[Container.TEAM_SUPPORT_MAPPING_PROPERTIES][Container.TEAM_COLR_MAPPING_FIELD] != team.color_code
return ret
except Exception as e:
try:
ret = container.properties[Container.TEAM_SUPPORT_MAPPING_PROPERTIES][0][Container.TEAM_NAME_MAPPING_FIELD][1] != team.name or \
container.properties[Container.TEAM_SUPPORT_MAPPING_PROPERTIES][0][Container.TEAM_COLR_MAPPING_FIELD][1] != team.color_code
return ret
except Exception as e:
return True
else:
return True
def sync_container_properties(self, operating_system):
LOGGER.debug("MappingGear.sync_container_properties - begin")
if not self.init_done or operating_system.last_nics != operating_system.nics:
self.sync_container_network(self.osi_container, SystemGear.location, SystemGear.routing_areas,
SystemGear.subnets)
if SystemGear.team is not None and MappingGear.diff_container_team(self.osi_container, SystemGear.team):
team_properties = {
Container.TEAM_NAME_MAPPING_FIELD: SystemGear.team.name,
Container.TEAM_COLR_MAPPING_FIELD: SystemGear.team.color_code
}
LOGGER.debug("MappingGear.sync_container_network - add team property")
self.osi_container.add_property((Container.TEAM_SUPPORT_MAPPING_PROPERTIES, team_properties))
self.osi_container.add_property((
Container.OWNER_MAPPING_PROPERTY,
'procos_system_gear@'+str(SystemGear.hostname)
))
LOGGER.debug("MappingGear.sync_container_properties - done")
def sync_container(self, operating_system):
LOGGER.debug("MappingGear.sync_container - begin")
if self.osi_container is None and operating_system.container_id is not None:
self.osi_container = ContainerService.find_container(cid=operating_system.container_id)
if self.osi_container is None:
LOGGER.error('MappingGear.sync_container - consistency error between ProcOS cache and mapping DB (' +
str(operating_system.container_id) + ')')
operating_system.container_id = None
if self.osi_container is None:
LOGGER.debug("MappingGear.sync_container - FQDN : " + str(SystemGear.fqdn))
if SystemGear.fqdn is None:
SystemGear.fqdn = SystemGear.hostname
existing_container = ContainerService.find_container(
primary_admin_gate_url=SystemGear.config.system_context.admin_gate_protocol+SystemGear.fqdn
)
if existing_container is not None:
deleted = False
while not deleted:
if existing_container is not None and existing_container.remove() is not None:
time.sleep(5)
existing_container = ContainerService.find_container(
primary_admin_gate_url=SystemGear.config.system_context.admin_gate_protocol+SystemGear.fqdn
)
else:
deleted = True
self.osi_container = Container(
name=SystemGear.hostname,
gate_uri=SystemGear.config.system_context.admin_gate_protocol+SystemGear.fqdn,
primary_admin_gate_name=SystemGear.config.system_context.admin_gate_protocol + ' daemon',
company=SystemGear.config.system_context.os_type.company.name,
product=SystemGear.config.system_context.os_type.name + ' - ' +
SystemGear.config.system_context.os_type.architecture,
c_type='Operating System'
)
self.osi_container.save()
operating_system.container_id = self.osi_container.id
LOGGER.debug('operating_system.container_id : (' + str(SystemGear.hostname) + ',' +
str(operating_system.container_id) + ')')
self.sync_container_properties(operating_system)
LOGGER.debug("MappingGear.sync_container - done")
@staticmethod
def sync_remote_container_network(target_os_instance, target_container):
LOGGER.debug("MappingGear.sync_remote_container_network - begin")
target_possible_locations = []
target_routing_areas = []
target_subnets = []
if target_container.properties is not None and Container.PL_MAPPING_PROPERTIES in target_container.properties and \
Container.NETWORK_MAPPING_PROPERTIES in target_container.properties:
LOGGER.debug("MappingGear.sync_remote_container_network - network already defined for remote container.")
return
for subnet_id in target_os_instance.subnet_ids:
target_subnet = SubnetService.find_subnet(
sb_id=subnet_id
)
if target_subnet is not None and target_subnet not in target_subnets:
target_subnets.append(target_subnet)
target_routing_area = RoutingAreaService.find_routing_area(
ra_id=target_subnet.routing_area_id
)
if target_routing_area is not None and target_routing_area not in target_routing_areas:
target_routing_areas.append(target_routing_area)
for location_id in target_routing_area.loc_ids:
target_possible_location = LocationService.find_location(
loc_id=location_id
)
if target_possible_location is not None and \
target_possible_location not in target_possible_locations:
target_possible_locations.append(target_possible_location)
if target_possible_locations.__len__() == 1:
target_location = target_possible_locations[0]
MappingGear.sync_container_network(target_container, target_location, target_routing_areas, target_subnets)
else:
LOGGER.warn("MappingGear.sync_remote_container_network - "
"remote container loc not found for " + target_container.name)
LOGGER.debug("MappingGear.sync_remote_container_network - done")
@staticmethod
def sync_remote_container_team(target_os_instance, target_container):
LOGGER.debug("MappingGear.sync_remote_container_team - begin")
if target_container.properties is not None and \
Container.TEAM_SUPPORT_MAPPING_PROPERTIES in target_container.properties:
LOGGER.debug("MappingGear.sync_remote_container_network - team already defined for remote container.")
return
teams_props = []
for team_id in target_os_instance.team_ids:
team = TeamService.find_team(team_id)
team_properties = {
Container.TEAM_NAME_MAPPING_FIELD: team.name,
Container.TEAM_COLR_MAPPING_FIELD: team.color_code
}
teams_props.append(team_properties)
target_container.add_property((Container.TEAM_SUPPORT_MAPPING_PROPERTIES, teams_props))
LOGGER.debug("MappingGear.sync_remote_container_team - done")
@staticmethod
def search_map_socket(map_sockets, endpoint_id):
LOGGER.debug("MappingGear.find_map_socket")
ret = None
for map_socket in map_sockets:
if map_socket.source_endpoint_id == endpoint_id or map_socket.destination_endpoint_id == endpoint_id:
ret = map_socket
break
return ret
@staticmethod
def search_local_endpoint_by_url(proto, port, suffix):
endpoint_to_search = None
other_url_possibilities = [
proto + "::1:" + str(port) + suffix,
proto + "::ffff:127.0.0.1:" + str(port) + suffix,
proto + "::127.0.0.1:" + str(port) + suffix,
proto + "127.0.0.1:" + str(port) + suffix,
]
for other_url_possibility in other_url_possibilities:
endpoint_to_search = EndpointService.find_endpoint(
url=other_url_possibility
)
if endpoint_to_search is not None:
break
return endpoint_to_search
def sync_map_socket(self, operating_system):
LOGGER.debug("MappingGear.sync_map_socket - begin")
if self.osi_container is None:
LOGGER.error('MappingGear.sync_map_socket - operating system container is not synced')
return
start_time = timeit.default_timer()
for proc in operating_system.processs:
if SystemGear.config.processes_filter is not None:
is_found = False
for process_name_filter in SystemGear.config.processes_filter:
if process_name_filter in proc.name:
is_found = True
break
if not is_found:
continue
if proc.mapping_id is not None and proc.new_map_sockets is not None:
if proc.name != "exe":
if "java" in proc.name or "python" in proc.name:
if "java" in proc.name and "java" not in proc.cmdline[0]:
name = '[' + str(proc.pid) + '] ' + str(proc.cmdline[0])
elif "python" in proc.name and "python" not in proc.cmdline[0]:
name = '[' + str(proc.pid) + '] ' + str(proc.cmdline[0])
else:
name = '[' + str(proc.pid) + '] ' + str(proc.name)
else:
name = '[' + str(proc.pid) + '] ' + str(proc.name)
else:
name = '[' + str(proc.pid) + '] ' + str(proc.name) + ' - ' + str(proc.cmdline[0])
LOGGER.debug("MappingGear.sync_map_socket - " + str(proc.new_map_sockets.__len__()) +
' new socket found for process ' + name)
for map_socket in proc.new_map_sockets:
if map_socket.source_ip is not None and map_socket.source_port is not None:
proto = None
if map_socket.type == "SOCK_STREAM":
proto = "tcp://"
elif map_socket.type == "SOCK_DGRAM":
proto = "udp://"
else:
LOGGER.warn("MappingGear.sync_map_socket - socket type " + map_socket.type +
" currently not supported !")
if proto is not None:
if proc.is_node:
source_parent_node_id = proc.mapping_id
else:
source_parent_node_id = 0
LOGGER.warn("MappingGear.sync_map_socket - process as container not yet implemented !")
if source_parent_node_id != 0:
destination_is_local = operating_system.is_local_destination(map_socket)
suffix = str(map_socket.file_descriptors) + '[' + str(proc.pid) + ']'
source_url = proto + map_socket.source_ip + ":" + str(map_socket.source_port) + suffix
source_endpoint = EndpointService.find_endpoint(
url=source_url
)
if source_endpoint is None and destination_is_local:
source_endpoint = MappingGear.search_local_endpoint_by_url(
proto,
map_socket.source_port,
suffix
)
if source_endpoint is None:
source_endpoint = Endpoint(url=source_url, parent_node_id=proc.mapping_id,
ignore_sync=True)
source_endpoint.add_property(('type', map_socket.type))
source_endpoint.add_property(('family', map_socket.family))
source_endpoint.add_property(('status', map_socket.status))
source_endpoint.add_property(('file descriptors', map_socket.file_descriptors))
source_endpoint.save()
if map_socket.status == "LISTEN" and \
hasattr(proc, 'to_be_refined') and proc.to_be_refined:
gate_to_refine = GateService.find_gate(nid=proc.mapping_id)
if gate_to_refine is not None:
for eid in gate_to_refine.endpoints_id:
gep = EndpointService.find_endpoint(eid=eid)
if gep is not None and gep.url.startswith("tbc://"):
gep.remove()
gate_to_refine.sync()
if map_socket.source_port == SystemGear.config.system_context.admin_gate_port:
previous_prim_gate = GateService.find_gate(
self.osi_container.primary_admin_gate_id
)
gate_to_refine.url = SystemGear.config.system_context.admin_gate_protocol+SystemGear.fqdn
gate_to_refine.is_primary_admin = True
gate_to_refine.save()
previous_prim_gate.remove()
else:
gate_to_refine.url = source_url
gate_to_refine.save()
proc.to_be_refined = False
else:
LOGGER.warn("Gate not found for LISTEN url " + source_url)
else:
LOGGER.debug("Found source endpoint : (" +
source_url + ',' + str(source_endpoint.id) + ")")
if source_endpoint.id not in operating_system.duplex_links_endpoints \
and destination_is_local:
operating_system.duplex_links_endpoints.append(source_endpoint.id)
map_socket.source_endpoint_id = source_endpoint.id
LOGGER.debug('MappingGear.sync_map_socket - source socket endpoint on mapping db : (' +
source_url + ',' + str(map_socket.source_endpoint_id) + ')')
if map_socket.destination_ip is not None and map_socket.destination_port is not None:
target_url = proto + map_socket.destination_ip + ":" + \
str(map_socket.destination_port)
target_fqdn = None
if map_socket.family == "AF_INET":
target_fqdn = MapSocket.get_cached_hostbyaddr(
map_socket.destination_ip
)
elif map_socket.family == "AF_INET6":
target_fqdn = MapSocket.get_cached_hostbyaddr(
MapSocket.ipv6_2_ipv4(map_socket.destination_ip)
)
target_container = None if not destination_is_local else self.osi_container
target_node = None
target_endpoint = None
if target_fqdn != "localhost" and target_fqdn is not None:
target_os_instance = None
target_os_hostname = None
if target_fqdn.split(".").__len__() > 1:
target_os_hostname = target_fqdn.split(".")[0]
if target_os_hostname in self.target_osi_cache and \
self.target_osi_cache[target_os_hostname] is not None:
target_os_instance = self.target_osi_cache[target_os_hostname]
else:
target_os_instance = OSInstanceService.find_os_instance(
osi_name=target_os_hostname
)
else:
target_os_hostname = target_fqdn
if target_os_hostname in self.target_osi_cache and \
self.target_osi_cache[target_os_hostname] is not None:
target_os_instance = self.target_osi_cache[target_os_hostname]
if target_os_instance is None:
target_os_instance = OSInstanceService.find_os_instance(
osi_name=target_fqdn
)
if target_os_instance is None:
target_ipa = IPAddressService.find_ip_address(ipa_fqdn=target_fqdn)
if target_ipa is not None:
target_os_instance = OSInstanceService.find_os_instance(
osi_id=target_ipa.ipa_os_instance_id
)
if target_os_instance is not None:
if target_os_hostname not in self.target_osi_cache:
self.target_osi_cache[target_os_hostname] = target_os_instance
if target_container is None:
target_container = ContainerService.find_container(
primary_admin_gate_url=target_os_instance.admin_gate_uri
)
if target_container is None:
target_os_instance_type = OSTypeService.find_ostype(
ost_id=target_os_instance.ost_id
)
product = target_os_instance_type.name + " - " + \
target_os_instance_type.architecture \
if target_os_instance_type is not None else\
"Unknown OS Type"
target_os_instance_type_cmp = CompanyService.find_company(
cmp_id=target_os_instance_type.company_id
) if target_os_instance_type is not None else None
company = target_os_instance_type_cmp.name\
if target_os_instance_type_cmp is not None else\
"Unknown OS Type Company"
name = target_fqdn.split(".")[0] if target_fqdn is not None else\
map_socket.destination_ip
target_container = Container(
name=name,
gate_uri=target_os_instance.admin_gate_uri,
primary_admin_gate_name=target_fqdn + " Primary Admin Gate",
company=company,
product=product,
c_type="Operating System"
)
target_container.save()
if target_container.properties is None or \
Container.OWNER_MAPPING_PROPERTY not in target_container.properties:
MappingGear.sync_remote_container_network(target_os_instance,
target_container)
MappingGear.sync_remote_container_team(target_os_instance,
target_container)
if target_container is None:
target_container = ContainerService.find_container(
primary_admin_gate_url="not_my_concern://"+map_socket.destination_ip
)
if target_container is None:
target_container = Container(
name=target_fqdn if target_fqdn is not None else map_socket.destination_ip,
gate_uri="not_my_concern://"+map_socket.destination_ip,
primary_admin_gate_name="External OS Primary Admin Gate"
)
target_container.save()
if target_container.id is not None and not destination_is_local:
selector = "endpointURL =~ '.*:" + str(map_socket.destination_port) + ".*'"
endpoints = EndpointService.find_endpoint(
selector=selector,
cid=target_container.id,
local_cache=destination_is_local
)
if endpoints is not None and endpoints.__len__() == 1:
target_endpoint = endpoints[0]
target_node = NodeService.find_node(nid=target_endpoint.parent_node_id)
elif endpoints is not None and endpoints.__len__() > 1:
LOGGER.debug("MappingGear.sync_map_socket - "
"Multiple endpoints found for selector " + selector +
" on container " + target_container.id +
" - let remote do the job...")
elif (endpoints is not None and endpoints.__len__() == 0) or endpoints is None:
LOGGER.debug("MappingGear.sync_map_socket - "
"No endpoint found for selector " + selector +
" on container " + target_container.id)
if target_endpoint is None and \
Container.OWNER_MAPPING_PROPERTY not in target_container.properties:
addr = target_fqdn if target_fqdn is not None else map_socket.destination_ip
LOGGER.debug("create node " + Container.OSI_KERNEL_PROC_NAME +
" through container " + target_container.id)
target_node = NodeService.find_node(
name=Container.OSI_KERNEL_PROC_NAME, cid=target_container.id
)
if target_node is None:
target_node = Node(
name=Container.OSI_KERNEL_PROC_NAME,
container_id=target_container.id,
ignore_sync=True
)
target_node.save()
target_endpoint = Endpoint(
url=target_url, parent_node_id=target_node.id, ignore_sync=True
)
target_endpoint.save()
else:
for proc_srv in operating_system.processs:
for srv_socket in proc_srv.map_sockets:
map_ipv4_ap = map_socket.transform_system_ipv6_to_ipv4()
srv_ipv4_ap = srv_socket.transform_system_ipv6_to_ipv4()
srv_source_ip = srv_ipv4_ap[0]
srv_destination_ip = srv_ipv4_ap[1]
map_source_ip = map_ipv4_ap[0]
map_destination_ip = map_ipv4_ap[1]
if srv_source_ip == map_destination_ip and\
srv_socket.source_port == map_socket.destination_port and\
srv_destination_ip == map_source_ip and\
srv_socket.destination_port == map_socket.source_port:
if proc_srv.is_node:
target_node = NodeService.find_node(nid=proc_srv.mapping_id)
else:
LOGGER.warn("MappingGear.sync_map_socket - process as container"
" not yet implemented !")
suffix = str(srv_socket.file_descriptors) + \
'[' + str(proc_srv.pid) + ']'
target_url += suffix
if target_node is not None:
target_endpoint = EndpointService.find_endpoint(
url=target_url
)
if target_endpoint is None:
target_endpoint = MappingGear.search_local_endpoint_by_url(
proto,
map_socket.destination_port,
suffix
)
if target_endpoint is None:
target_endpoint = Endpoint(
url=target_url, parent_node_id=target_node.id,
ignore_sync=True
)
target_endpoint.add_property(('type', srv_socket.type))
target_endpoint.add_property(('family', srv_socket.family))
target_endpoint.add_property(('status', srv_socket.status))
target_endpoint.add_property(('file descriptors',
srv_socket.file_descriptors))
target_endpoint.save()
if target_endpoint.id \
not in operating_system.duplex_links_endpoints and \
destination_is_local:
operating_system.duplex_links_endpoints.append(
target_endpoint.id
)
break
if target_endpoint is not None:
map_socket.destination_endpoint_id = target_endpoint.id
LOGGER.debug('MappingGear.sync_map_socket - target socket endpoint '
'on mapping db : (' + target_url + ',' +
str(map_socket.destination_endpoint_id) + ')')
if target_node is not None:
map_socket.destination_node_id = target_node.id
LOGGER.debug('MappingGear.sync_map_socket - target socket node '
'on mapping db : (' + target_url + ',' +
str(map_socket.destination_node_id) + ')')
map_socket.destination_container_id = target_container.id
LOGGER.debug('MappingGear.sync_map_socket - target socket container '
'on mapping db : (' + target_url + ',' +
str(map_socket.destination_container_id) + ')')
if map_socket.destination_endpoint_id is not None and \
map_socket.source_endpoint_id is not None:
transport = Transport(name=proto)
transport.save()
if transport is not None:
link = Link(source_endpoint_id=map_socket.source_endpoint_id,
target_endpoint_id=map_socket.destination_endpoint_id,
transport_id=transport.id)
link.save()
map_socket.transport_id = transport.id
map_socket.link_id = link.id
else:
LOGGER.debug('MappingGear.sync_map_socket - missing destination endpoint id '
'for ' + str(map_socket))
else:
LOGGER.debug('MappingGear.sync_map_socket - no source ip / port - ' + str(map_socket))
if proc.mapping_id is not None and proc.dead_map_sockets is not None:
if proc.name != "exe":
if "java" in proc.name or "python" in proc.name:
if "java" in proc.name and "java" not in proc.cmdline[0]:
name = '[' + str(proc.pid) + '] ' + str(proc.cmdline[0])
elif "python" in proc.name and "python" not in proc.cmdline[0]:
name = '[' + str(proc.pid) + '] ' + str(proc.cmdline[0])
else:
name = '[' + str(proc.pid) + '] ' + str(proc.name)
else:
name = '[' + str(proc.pid) + '] ' + str(proc.name)
else:
name = '[' + str(proc.pid) + '] ' + str(proc.name) + ' - ' + str(proc.cmdline[0])
LOGGER.debug("MappingGear.sync_map_socket - " + str(proc.dead_map_sockets.__len__()) +
' dead socket found for process [' + str(proc.mapping_id) + ']' + name)
for map_socket in proc.dead_map_sockets:
# if map_socket.link_id is not None:
# link = LinkService.find_link(lid=map_socket.link_id)
# if link is not None:
# link.remove()
# else:
# LOGGER.warn("Dead socket (link : " + str(map_socket.link_id) + ") "
# "doesn't exist anymore on DB !")
destination_is_local = operating_system.is_local_destination(map_socket)
if map_socket.source_endpoint_id is not None and \
(
map_socket.source_endpoint_id not in operating_system.wip_delete_duplex_links_endpoints or
map_socket.source_endpoint_id not in operating_system.duplex_links_endpoints
):
source_endpoint = EndpointService.find_endpoint(eid=map_socket.source_endpoint_id)
if source_endpoint is not None:
LOGGER.debug('MappingGear.sync_map_socket - Remove source endpoint ' +
str(map_socket.source_endpoint_id))
source_endpoint.remove()
if map_socket.source_endpoint_id in operating_system.duplex_links_endpoints:
operating_system.wip_delete_duplex_links_endpoints.append(map_socket.source_endpoint_id)
else:
LOGGER.warn("MappingGear.sync_map_socket - Dead socket (source endpoint : " +
str(map_socket.source_endpoint_id) +
") doesn't exist anymore on DB!")
elif map_socket.source_endpoint_id is not None and \
map_socket.source_endpoint_id in operating_system.wip_delete_duplex_links_endpoints:
operating_system.wip_delete_duplex_links_endpoints.remove(map_socket.source_endpoint_id)
operating_system.duplex_links_endpoints.remove(map_socket.source_endpoint_id)
if map_socket.destination_endpoint_id is not None and \
(
map_socket.destination_endpoint_id not in
operating_system.wip_delete_duplex_links_endpoints or
map_socket.destination_endpoint_id not in operating_system.duplex_links_endpoints
):
target_endpoint = EndpointService.find_endpoint(
eid=map_socket.destination_endpoint_id,
local_cache=destination_is_local
)
if target_endpoint is not None:
array_link = LinkService.find_link(tep_id=target_endpoint.id)
if array_link is not None and array_link.__len__() == 0:
LOGGER.debug('MappingGear.sync_map_socket - Remove target endpoint ' +
str(map_socket.destination_endpoint_id))
target_endpoint.remove()
if map_socket.destination_endpoint_id in operating_system.duplex_links_endpoints:
operating_system.wip_delete_duplex_links_endpoints.append(
map_socket.destination_endpoint_id
)
else:
LOGGER.warn("MappingGear.sync_map_socket - Dead socket (target endpoint : " +
str(map_socket.destination_endpoint_id) +
") doesn't exist anymore on DB!")
elif map_socket.destination_endpoint_id is not None and \
map_socket.destination_endpoint_id in operating_system.wip_delete_duplex_links_endpoints:
operating_system.wip_delete_duplex_links_endpoints.remove(map_socket.destination_endpoint_id)
operating_system.duplex_links_endpoints.remove(map_socket.destination_endpoint_id)
sync_proc_time = timeit.default_timer()-start_time
LOGGER.debug('MappingGear.sync_map_socket - time : ' + str(sync_proc_time))
LOGGER.debug("MappingGear.sync_map_socket - done")
def sync_processs(self, operating_system):
LOGGER.debug("MappingGear.sync_processs - begin")
if self.osi_container is None:
LOGGER.error('MappingGear.sync_processs - operating system container is not synced')
return
start_time = timeit.default_timer()
kernel_map_obj = NodeService.find_node(name=Container.OSI_KERNEL_PROC_NAME, cid=self.osi_container.id)
if kernel_map_obj is None:
kernel_map_obj = Node(
name=Container.OSI_KERNEL_PROC_NAME,
container=self.osi_container
)
kernel_map_obj.add_property(('pid', 0), sync=False)
kernel_map_obj.add_property(('username', "root"), sync=False)
kernel_map_obj.add_property(('uids', [0]), sync=False)
kernel_map_obj.add_property(('gids', [0]), sync=False)
kernel_map_obj.save()
LOGGER.debug("MappingGear.sync_processs - " + str(operating_system.new_processs.__len__()) +
' new processes found')
for process in operating_system.new_processs:
if SystemGear.config.processes_filter is not None:
is_found = False
for process_name_filter in SystemGear.config.processes_filter:
if process_name_filter in process.name:
is_found = True
break
if not is_found:
continue
if process.name != "exe":
if "java" in process.name or "python" in process.name:
if "java" in process.name and "java" not in process.cmdline[0]:
name = '[' + str(process.pid) + '] ' + str(process.cmdline[0])
elif "python" in process.name and "python" not in process.cmdline[0]:
name = '[' + str(process.pid) + '] ' + str(process.cmdline[0])
else:
name = '[' + str(process.pid) + '] ' + str(process.name)
else:
name = '[' + str(process.pid) + '] ' + str(process.name)
else:
name = '[' + str(process.pid) + '] ' + str(process.name) + ' - ' + str(process.cmdline[0])
is_gate = False
if process.new_map_sockets is not None and \
"docker-proxy" not in process.name: # let ariane docker plugin manage this
for map_socket in process.new_map_sockets:
if map_socket.source_ip is not None and map_socket.source_port is not None:
if map_socket.status == "LISTEN" and not operating_system.is_local_service(map_socket):
LOGGER.debug("MappingGear.sync_processs - gate process found (" + name + ")")
is_gate = True
break
if not is_gate:
process_map_obj = Node(
name=name,
container=self.osi_container
)
process.to_be_refined = False
else:
process_map_obj = Gate(
name=name,
is_primary_admin=False,
url="tbc://" + str(SystemGear.fqdn) + "[" + name + "]", # will be redefined in sync_map_socket
container=self.osi_container
)
process.to_be_refined = True
process_map_obj.add_property(('pid', process.pid), sync=False)
process_map_obj.add_property(('exe', process.exe), sync=False)
process_map_obj.add_property(('cwd', process.cwd), sync=False)
process_map_obj.add_property(('creation time', process.create_time), sync=False)
process_map_obj.add_property(('username', process.username), sync=False)
process_map_obj.add_property(('uids', process.uids), sync=False)
process_map_obj.add_property(('gids', process.gids), sync=False)
if process.terminal is not None:
process_map_obj.add_property(('terminal', process.terminal), sync=False)
if process.cpu_affinity is not None:
process_map_obj.add_property(('cpu_affinity', process.cpu_affinity), sync=False)
process_map_obj.save()
if process.cmdline.__len__() > 0:
for cmdline_part in process.cmdline:
if "-pass" in cmdline_part or "-pwd" in cmdline_part:
pass_index = process.cmdline.index(cmdline_part)
if pass_index + 1 < process.cmdline.__len__():
process.cmdline[pass_index+1] = "*****"
process_map_obj.add_property(('cmdline', process.cmdline))
process.mapping_id = process_map_obj.id
LOGGER.debug('MappingGear.sync_processs - new process on mapping db : (' + name + ',' +
str(process.mapping_id) + ')')
LOGGER.debug("MappingGear.sync_processs - " + str(operating_system.dead_processs.__len__()) +
' old processes found')
for process in operating_system.dead_processs:
if SystemGear.config.processes_filter is not None:
is_found = False
for process_name_filter in SystemGear.config.processes_filter:
if process_name_filter in process.name:
is_found = True
break
if not is_found:
continue
if process.name != "exe":
if "java" in process.name or "python" in process.name:
if "java" in process.name and "java" not in process.cmdline[0]:
name = '[' + str(process.pid) + '] ' + str(process.cmdline[0])
elif "python" in process.name and "python" not in process.cmdline[0]:
name = '[' + str(process.pid) + '] ' + str(process.cmdline[0])
else:
name = '[' + str(process.pid) + '] ' + str(process.name)
else:
name = '[' + str(process.pid) + '] ' + str(process.name)
else:
name = '[' + str(process.pid) + '] ' + str(process.name) + ' - ' + str(process.cmdline[0])
if process.mapping_id is None:
LOGGER.error('MappingGear.sync_processs - dead process (' + name +
') has not been saved on mapping db !')
else:
if process.is_node:
process_map_obj = NodeService.find_node(nid=process.mapping_id)
else:
process_map_obj = ContainerService.find_container(cid=process.mapping_id)
if process_map_obj is None:
LOGGER.error('MappingGear.sync_processs - consistency error between ProcOS cache and mapping DB (' +
name + ',' + str(process.mapping_id) + ')')
else:
process_map_obj.remove()
sync_proc_time = timeit.default_timer()-start_time
LOGGER.debug('MappingGear.sync_processs - time : ' + str(sync_proc_time))
LOGGER.debug("MappingGear.sync_processs - done")
def synchronize_with_ariane_mapping(self, component):
LOGGER.debug("MappingGear.synchronize_with_ariane_mapping")
if self.running:
try:
start_time = timeit.default_timer()
self.cache_clean_counter += 1
operating_system = component.operating_system.get()
if self.cache_clean_counter == self.cache_clean_counter_max:
self.cache_clean_counter = 0
self.target_osi_cache.clear()
self.sync_container(operating_system)
self.sync_processs(operating_system)
self.sync_map_socket(operating_system)
self.update_count += 1
sync_proc_time = timeit.default_timer()-start_time
LOGGER.info('MappingGear.synchronize_with_ariane_mapping - time : ' + str(sync_proc_time))
LOGGER.debug("MappingGear.synchronize_with_ariane_mapping - activate " +
SystemGear.domino_ariane_sync_topic)
if not self.init_done:
self.init_done = True
else:
SystemGear.domino_activator.activate(SystemGear.domino_ariane_sync_topic)
except Exception as e:
LOGGER.error("MappingGear.synchronize_with_ariane_mapping - " + e.__str__())
LOGGER.error("MappingGear.synchronize_with_ariane_mapping - " + traceback.format_exc())
else:
LOGGER.warn('Synchronization requested but procos_mapping_gear@' + str(SystemGear.hostname) +
' is not running.')
class SystemGear(InjectorGearSkeleton):
# static reference on commons var
config = None
hostname = None
fqdn = None
# static reference to up to date ariane directories objects linked to this System
location = None
routing_areas = []
subnets = []
osi = None
embedding_osi = None
ost = None
ost_company = None
team = None
environment = None
domino_component_topic = "domino_component"
domino_ariane_sync_topic = "domino_ariane_sync"
domino_activator = None
def __init__(self, config):
LOGGER.debug("SystemGear.__init__")
SystemGear.hostname = socket.gethostname()
if SystemGear.hostname.split(".").__len__() > 1:
SystemGear.hostname = SystemGear.hostname.split(".")[0]
SystemGear.config = config
super(SystemGear, self).__init__(
gear_id='ariane.community.plugin.procos.gears.cache.system_gear@'+str(SystemGear.hostname),
gear_name='procos_system_gear@'+str(SystemGear.hostname),
gear_description='Ariane ProcOS system gear for '+str(SystemGear.hostname),
gear_admin_queue='ariane.community.plugin.procos.gears.cache.system_gear@'+str(SystemGear.hostname),
running=False
)
self.sleeping_period = config.sleeping_period
self.service = None
self.service_name = 'system_procos@'+str(SystemGear.hostname)+' gear'
component_type = SystemGear.config.system_context.os_type.name + " - " + \
SystemGear.config.system_context.os_type.architecture
SystemGear.domino_activator = DominoActivator({'type': 'Z0MQ'})
self.component = SystemComponent.start(
attached_gear_id=self.gear_id(),
hostname=SystemGear.hostname,
component_type=component_type,
system_gear_actor_ref=self.actor_ref,
domino_activator=SystemGear.domino_activator,
domino_topic=SystemGear.domino_component_topic,
config=config
).proxy()
self.directory_gear = DirectoryGear.start().proxy()
self.mapping_gear = MappingGear.start().proxy()
self.sync_in_progress = False
def synchronize_with_ariane_dbs(self):
LOGGER.debug("SystemGear.synchronize_with_ariane_dbs - sync db")
self.sync_in_progress = True
self.directory_gear.synchronize_with_ariane_directories(self.component).get()
self.mapping_gear.synchronize_with_ariane_mapping(self.component).get()
self.sync_in_progress = False
def init_with_ariane_dbs(self):
LOGGER.debug("SystemGear.init_with_ariane_dbs - Initializing...")
self.directory_gear.init_ariane_directories(self.component).get()
self.mapping_gear.synchronize_with_ariane_mapping(self.component).get()
# self.component.sniff(synchronize_with_ariane_dbs=False).get()
# self.directory_gear.synchronize_with_ariane_directories(self.component).get()
# self.mapping_gear.synchronize_with_ariane_mapping(self.component).get()
LOGGER.info("SystemGear.init_with_ariane_dbs - Initialization done.")
def run(self):
LOGGER.debug("SystemGear.run")
if self.sleeping_period is not None and self.sleeping_period > 0:
while self.running:
time.sleep(self.sleeping_period)
if self.running:
if not self.sync_in_progress:
self.component.sniff().get()
else:
LOGGER.warn("SystemGear.run - wait last sync to be completed !")
def on_start(self):
LOGGER.debug("SystemGear.on_start")
self.cache(running=self.running)
self.init_with_ariane_dbs()
self.running = True
self.cache(running=self.running)
self.service = threading.Thread(target=self.run, name=self.service_name)
self.service.start()
def on_stop(self):
LOGGER.debug("SystemGear.on_stop")
try:
if self.running:
self.running = False
self.cache(running=self.running)
self.service = None
self.component.stop().get()
self.directory_gear.stop().get()
self.mapping_gear.stop().get()
self.cached_gear_actor.remove().get()
SystemGear.domino_activator.stop()
except Exception as e:
LOGGER.error(e.__str__())
LOGGER.debug(traceback.format_exc())
def on_failure(self, exception_type, exception_value, traceback_):
LOGGER.debug("SystemGear.on_failure")
LOGGER.error("SystemGear.on_failure - " + exception_type.__str__() + "/" + exception_value.__str__())
LOGGER.error("SystemGear.on_failure - " + traceback_.format_exc())
try:
if self.running:
self.running = False
self.cache(running=self.running)
self.service = None
self.component.stop().get()
self.directory_gear.stop().get()
self.mapping_gear.stop().get()
self.cached_gear_actor.remove().get()
SystemGear.domino_activator.stop()
except Exception as e:
LOGGER.error(e.__str__())
LOGGER.debug(traceback.format_exc())
def gear_start(self):
LOGGER.debug("SystemGear.gear_start")
if self.service is not None:
self.running = True
self.service = threading.Thread(target=self.run, name=self.service_name)
self.service.start()
self.cache(running=self.running)
LOGGER.info('procos_system_gear@'+str(SystemGear.hostname)+' has been started')
else:
self.on_start()
LOGGER.info('procos_system_gear@'+str(SystemGear.hostname)+' has been restarted')
def gear_stop(self):
LOGGER.debug("SystemGear.gear_stop")
if self.running:
self.running = False
self.cache(running=self.running)
LOGGER.info('procos_system_gear@'+str(SystemGear.hostname)+' has been stopped')
|
echinopsii/net.echinopsii.ariane.community.plugin.procos
|
ariane_procos/gears.py
|
Python
|
agpl-3.0
| 104,388
|
# -*- coding: utf-8 -*-
# Copyright 2016-2017 LasLabs Inc.
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html).
{
'name': 'MFA Support',
'summary': 'Allows users to enable MFA and add optional trusted devices',
'version': '8.0.1.0.0',
'category': 'Extra Tools',
'website': 'https://laslabs.com/',
'author': 'LasLabs, Odoo Community Association (OCA)',
'license': 'LGPL-3',
'application': False,
'installable': True,
'external_dependencies': {
'python': ['pyotp'],
},
'depends': [
'report',
'web',
],
'data': [
'data/ir_config_parameter.xml',
'security/ir.model.access.csv',
'security/res_users_authenticator_security.xml',
'wizards/res_users_authenticator_create.xml',
'views/auth_totp.xml',
'views/res_users.xml',
],
}
|
acsone/server-tools
|
auth_totp/__openerp__.py
|
Python
|
agpl-3.0
| 869
|
from collections import deque
from nltk.corpus import wordnet as wn
from grafeno.transformers.wordnet import Transformer as WNGet
from grafeno.transformers.__utils import Transformer as Utils
class Transformer (WNGet, Utils):
'''Adds to the graph all WordNet hypernyms of every possible concept node.
The hypernyms are added as nodes with grammateme ``hyper = True'', and
related by edges with functor ``HYP''.
Parameters
----------
extend_min_depth : int
Minimum depth of hypernyms to add. This depth is defined as the shortest
path from the synset to the root of the WordNet hypernym hierarchy.
'''
def __init__ (self, extend_min_depth = 4, **kwds):
super().__init__(**kwds)
self.__min_depth = extend_min_depth
def post_process (self):
super().post_process()
g = self.graph
mind = self.__min_depth
# Extend with hypernyms
to_extend = deque(list(self.nodes))
while len(to_extend)>0:
n = to_extend.popleft()
node = self.nodes[n]
ss = node.get('synset')
if not ss:
continue
for cc in ss.hypernyms() + ss.instance_hypernyms():
depth = ss.min_depth()
if depth < mind:
continue
concept = cc.lemmas()[0].name()
nid = self.sprout(n,
{'functor':'HYP','weight':depth/(depth+1)},
{'concept':concept,'synset':cc,'hyper':True})
to_extend.append(nid)
|
agarsev/grafeno
|
grafeno/transformers/extend.py
|
Python
|
agpl-3.0
| 1,585
|