text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
|---|---|---|---|---|---|---|
from __future__ import unicode_literals
from django.forms import MediaDefiningClass, Media
from django.forms.utils import flatatt
from django.utils.text import slugify
from django.utils.safestring import mark_safe
from django.utils.six import text_type
from django.utils.six import with_metaclass
from wagtail.utils.compat import render_to_string
from wagtail.wagtailcore import hooks
class MenuItem(with_metaclass(MediaDefiningClass)):
template = 'wagtailadmin/shared/menu_item.html'
def __init__(self, label, url, name=None, classnames='', attrs=None, order=1000):
self.label = label
self.url = url
self.classnames = classnames
self.name = (name or slugify(text_type(label)))
self.order = order
if attrs:
self.attr_string = flatatt(attrs)
else:
self.attr_string = ""
def is_shown(self, request):
"""
Whether this menu item should be shown for the given request; permission
checks etc should go here. By default, menu items are shown all the time
"""
return True
def is_active(self, request):
return request.path.startswith(self.url)
def render_html(self, request):
return render_to_string(self.template, {
'name': self.name,
'url': self.url,
'classnames': self.classnames,
'attr_string': self.attr_string,
'label': self.label,
'active': self.is_active(request)
}, request=request)
class Menu(object):
def __init__(self, register_hook_name, construct_hook_name=None):
self.register_hook_name = register_hook_name
self.construct_hook_name = construct_hook_name
# _registered_menu_items will be populated on first access to the
# registered_menu_items property. We can't populate it in __init__ because
# we can't rely on all hooks modules to have been imported at the point that
# we create the admin_menu and settings_menu instances
self._registered_menu_items = None
@property
def registered_menu_items(self):
if self._registered_menu_items is None:
self._registered_menu_items = [fn() for fn in hooks.get_hooks(self.register_hook_name)]
return self._registered_menu_items
def menu_items_for_request(self, request):
return [item for item in self.registered_menu_items if item.is_shown(request)]
def active_menu_items(self, request):
return [item for item in self.menu_items_for_request(request) if item.is_active(request)]
@property
def media(self):
media = Media()
for item in self.registered_menu_items:
media += item.media
return media
def render_html(self, request):
menu_items = self.menu_items_for_request(request)
# provide a hook for modifying the menu, if construct_hook_name has been set
if self.construct_hook_name:
for fn in hooks.get_hooks(self.construct_hook_name):
fn(request, menu_items)
rendered_menu_items = []
for item in sorted(menu_items, key=lambda i: i.order):
try:
rendered_menu_items.append(item.render_html(request))
except TypeError:
# fallback for older render_html methods that don't accept a request arg
rendered_menu_items.append(item.render_html(request))
return mark_safe(''.join(rendered_menu_items))
class SubmenuMenuItem(MenuItem):
template = 'wagtailadmin/shared/menu_submenu_item.html'
"""A MenuItem which wraps an inner Menu object"""
def __init__(self, label, menu, **kwargs):
self.menu = menu
super(SubmenuMenuItem, self).__init__(label, '#', **kwargs)
@property
def media(self):
return Media(js=['wagtailadmin/js/submenu.js']) + self.menu.media
def is_shown(self, request):
# show the submenu if one or more of its children is shown
return bool(self.menu.menu_items_for_request(request))
def is_active(self, request):
return bool(self.menu.active_menu_items(request))
def render_html(self, request):
return render_to_string(self.template, {
'name': self.name,
'url': self.url,
'classnames': self.classnames,
'attr_string': self.attr_string,
'menu_html': self.menu.render_html(request),
'label': self.label,
'request': request,
'active': self.is_active(request)
}, request=request)
admin_menu = Menu(register_hook_name='register_admin_menu_item', construct_hook_name='construct_main_menu')
settings_menu = Menu(register_hook_name='register_settings_menu_item')
|
WQuanfeng/wagtail
|
wagtail/wagtailadmin/menu.py
|
Python
|
bsd-3-clause
| 4,777
| 0.002303
|
from django.db import models
class ThingItem(object):
def __init__(self, value, display):
self.value = value
self.display = display
def __iter__(self):
return (x for x in [self.value, self.display])
def __len__(self):
return 2
class Things(object):
def __iter__(self):
return (x for x in [ThingItem(1, 2), ThingItem(3, 4)])
class ThingWithIterableChoices(models.Model):
# Testing choices= Iterable of Iterables
# See: https://code.djangoproject.com/ticket/20430
thing = models.CharField(max_length=100, blank=True, choices=Things())
|
atruberg/django-custom
|
tests/model_validation/models.py
|
Python
|
bsd-3-clause
| 614
| 0
|
#!/usr/bin/python
#
# $Id: LISP-Sonar.py 18 2014-10-06 13:23:37Z ggx $
#
# -------------------------------Important Marks-------------------------------
# Surprisingly, we found that when manually executing the current Python script,
# the point symbol in file path is recognized,but not recognized when called via NEPI !!!
# Therefore, we should cover this issue replacing explicitly point symbol
# (in file path with current directory)
# Also, it is supposed to verify the existence of file path in JSON file.
# -------------------------------End of Important Marks-------------------------
#Library import
import subprocess
import socket
import os
import sys
import time
import random
import threading
import json
import Queue
import ipaddress
import resource
from jsoncomment import JsonComment
#Custom import
from SonarPulse import Pulse, PulseTarget
#-------------------------------------------------------------------
# Variables and Setting
#
#Error Exit Value
ERR = 1
Revision = "$Revision: 18 $"
# Define Default Configuration File
# Note that avoiding to use point symbol(meaning current directory in this context) in file
# path to assure the portability(we found Python scripts called by NEPI do not recognize this
# symbol)
# Refer to https://infohost.nmt.edu/tcc/help/pubs/python/web/new-str-format.html to
# get more information about Python string format's usage.
CURRENTDIR = os.path.dirname(os.path.realpath(__file__))+'/' # for example : /Users/qipengsong/Documents/First_LISP_measurement
ConfigFile = '{0}LISP-Sonar-Config.json'.format(CURRENTDIR)
#-------------------------------------------------------------------
# SubRoutines
#
######
# Logs Directory & Files Verification
#
def BootstrapFilesCheck(TimeStamp):
#Check if the root log directory exists, if not create it.
itexists = os.path.isdir(LogRootDirectory)
if itexists == False :
try:
os.makedirs(LogRootDirectory)
except os.error:
print '=====> Critical Error: Creating ' + LogRootDirectory
sys.exit(ERR)
print '\tRoot Log Dir. [Created]\t: ' + LogRootDirectory
else:
print '\tRoot Log Dir. [Found]\t: ' + LogRootDirectory
#Get Date to check/create date-based directory tree
rundate = time.gmtime(TimeStamp)
DateDirectory = str(rundate.tm_year) + '/' + str(rundate.tm_mon) + '/' + str(rundate.tm_mday) +'/'
#Check if the date-based sub-directory exists, if not create it.
itexists = os.path.isdir(LogRootDirectory + DateDirectory)
if itexists == False :
try:
os.makedirs(LogRootDirectory + DateDirectory)
except os.error:
print '=====> Critical Error: Creating ' + LogRootDirectory + DateDirectory
sys.exit(ERR)
print '\tDate Directory [Created]: ' + LogRootDirectory + DateDirectory
else:
print '\tDate Directory [Found]\t: ' + LogRootDirectory + DateDirectory
return LogRootDirectory + DateDirectory
######
# Read a list from file shuffle the order and return it
#
def LoadList(FILE):
try:
F = open( FILE, "r" )
except IOError:
print '=====> Critical Error:' + FILE + ' Not Found!!!'
sys.exit(ERR)
LLIST = F.read().split('\n')
F.close()
if LLIST.count('') > 0:
#If closing empty line exists remove it
LLIST.remove('')
# Randomize List so to not follow the same order at each experiment
random.shuffle(LLIST)
return LLIST
######
# Pulse Thread Class
#
class SonarThread (threading.Thread):
def __init__(self, threadID, tname, prqueue):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = tname
self.prqueue = prqueue
def run(self):
while True:
item = self.prqueue.get()
if item is None:
break # End Loop and finish thread
#print 'Thread ' + self.name + ' Working on: ' + str(item.eid) + '\n'
Evalue = Pulse(item)
if not (Evalue is None):
print '\tError \t(!)\t\t: ' + str(Evalue)
print >> sys.stderr, 'LISP-Sonar Error: ' + str(Evalue)
#-------------------------------------------------------------------
# Main
#
TimeStamp = int(time.time())
print 'LISP-Sonar \t\t\t: ' + Revision
print '\tRun \t\t\t: '+ time.strftime("%d.%m.%Y %H:%M:%S")
# Identify Machine and Date to Mark Logs
HOST = socket.gethostname()
print '\tHost Name \t\t: ' + HOST
# Read Configuration File
if (len(sys.argv) > 2):
print '=====> Exiting! Too many arguments... \n'
sys.exit(ERR)
if (len(sys.argv) == 2):
#Always take the first argument as configuration file
ConfigFile = str(sys.argv[1])
try:
JsonFile = open(ConfigFile)
except:
print '=====> Exiting! Error opening configuration file: '+ConfigFile+'\n'
sys.exit(ERR)
Cfg = json.load(JsonFile)
JsonFile.close()
try:
# Remember to replace "CURRENTDIR" with real current directory path
# for example, for item "DirsConfig"
# "DirsConfig":
# {
# "LogRootDirectory":"CURRENTDIR/SonarOutput/",
# "MRListDirectory":"CURRENTDIR",
# "MRListFile":"MR-Current-List.txt",
# "EIDListDirectory":"CURRENTDIR",
# "EIDListFile":"EID-Current-List.txt"
# },
# Replace "CURRENTDIR" with variable CURRENTDIR defined at the beginning
LogRootDirectory = Cfg["DirsConfig"]["LogRootDirectory"].replace("$CURRENTDIR", CURRENTDIR)
MRListDirectory = Cfg["DirsConfig"]["MRListDirectory"].replace("$CURRENTDIR", CURRENTDIR)
MRListFile = Cfg["DirsConfig"]["MRListFile"]
EIDListDirectory = Cfg["DirsConfig"]["EIDListDirectory"].replace("$CURRENTDIR", CURRENTDIR)
EIDListFile = Cfg["DirsConfig"]["EIDListFile"]
SpawnTimeGap = Cfg["ThreadSpawn"]["TimeGap"]
SpawnRandomization = Cfg["ThreadSpawn"]["Randomization"]
SpawnMaxThreads = Cfg["ThreadSpawn"]["MaxThreads"]
LIGRequestTimeOut = Cfg["Lig"]["TimeOut"]
LIGMaxRetries = Cfg["Lig"]["MaxTries"]
LIGSrcAddr = Cfg["Lig"]["SourceAddress"]
except KeyError:
print '=====> Exiting! Configuration Error for '+str(sys.exc_value)+' in file '+ConfigFile+'\n'
sys.exit(ERR)
# Final directory where results of this instance will be written
InstanceDirectory = BootstrapFilesCheck(TimeStamp)
#Load and shuffle list of Map-Resolvers
MRList = LoadList(MRListDirectory + MRListFile)
print '\tMR List File \t\t: ' + MRListDirectory + MRListFile
print '\tMR Loaded \t\t: ' + str(len(MRList))
#Load and shuffle list of EID to lookup
EIDList = LoadList(EIDListDirectory + EIDListFile)
print '\tEID List File \t\t: ' + EIDListDirectory + EIDListFile
print '\tEID Loaded \t\t: ' + str(len(EIDList))
# CHeck Valid Source Address
if (LIGSrcAddr != "None"):
try:
LIGSrcIP = ipaddress.ip_address(LIGSrcAddr)
except ValueError:
print 'Not Valid Source Address: ' + LIGSrcAddr
sys.exit(ERR)
else:
LIGSrcIP = None
print '\tQuery Source Address \t: ' + str(LIGSrcIP)
# Spawn sonar threads
threads = []
threadID = 1
resource.setrlimit(resource.RLIMIT_NOFILE,(SpawnMaxThreads*4+256, resource.getrlimit(resource.RLIMIT_NOFILE)[1]))
PulseRequestQueue = Queue.Queue(SpawnMaxThreads)
for t in range(SpawnMaxThreads):
# Create the pool of threads
tName = 'Sonar Thread ' + `threadID`
thread = SonarThread(threadID, tName, PulseRequestQueue)
thread.start()
threads.append(thread)
threadID += 1
print '\tThreads [Now Working]\t: ' + str(SpawnMaxThreads) + ' [' + str(SpawnTimeGap) + ' +/- ' + str(SpawnRandomization) + ']'
for EID in EIDList:
for MR in MRList:
# Validate Addresses
try:
EIDIP = ipaddress.ip_address(EID)
except ValueError:
print 'Not Valid EID address: ' + str(EID)
print >> sys.stderr, 'Not Valid EID address: ' + str(EID)
continue
try:
MRIP = ipaddress.ip_address(MR)
except ValueError:
print 'Not Valid MR address: ' + str(MR)
print >> sys.stderr, 'Not Valid MR address: ' + str(MR)
continue
# Put Metadata for Pulse Request in the queue only if
# LIGSrcIP and MR are in the same family.
if (LIGSrcIP and (LIGSrcIP.version != MRIP.version)):
continue
Target = PulseTarget(HOST, TimeStamp, EIDIP, MRIP, InstanceDirectory, LIGRequestTimeOut, LIGMaxRetries, LIGSrcIP)
PulseRequestQueue.put(Target)
# Let's put some more randomization just avoiding threads to trigger
# requests at the same time
time.sleep(SpawnTimeGap + random.uniform(-SpawnRandomization, SpawnRandomization))
for t in range(SpawnMaxThreads):
# Signal to all Threads that no more request to process
PulseRequestQueue.put(None)
for t in threads:
# Wait for all threads to end
t.join()
seconds = int(time.time()) - TimeStamp
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
print ('=====> Done [Duration] \t: %s:%s:%s' % (hours, minutes, seconds))
sys.exit()
|
hansomesong/First_LISP_measurement
|
LISP-Sonar.py
|
Python
|
gpl-2.0
| 9,113
| 0.006145
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import warnings
from pymatgen.core.structure import Structure
from pymatgen.core.units import Ha_to_eV, bohr_to_ang
from pymatgen.io.abinit.abiobjects import *
from pymatgen.util.testing import PymatgenTest
class LatticeFromAbivarsTest(PymatgenTest):
def test_rprim_acell(self):
l1 = lattice_from_abivars(acell=3 * [10], rprim=np.eye(3))
self.assertAlmostEqual(l1.volume, bohr_to_ang ** 3 * 1000)
assert l1.angles == (90, 90, 90)
l2 = lattice_from_abivars(acell=3 * [10], angdeg=(90, 90, 90))
assert l1 == l2
l2 = lattice_from_abivars(acell=3 * [8], angdeg=(60, 60, 60))
abi_rprimd = (
np.reshape(
[
4.6188022,
0.0000000,
6.5319726,
-2.3094011,
4.0000000,
6.5319726,
-2.3094011,
-4.0000000,
6.5319726,
],
(3, 3),
)
* bohr_to_ang
)
self.assertArrayAlmostEqual(l2.matrix, abi_rprimd)
l3 = lattice_from_abivars(acell=[3, 6, 9], angdeg=(30, 40, 50))
abi_rprimd = (
np.reshape(
[
3.0000000,
0.0000000,
0.0000000,
3.8567257,
4.5962667,
0.0000000,
6.8944000,
4.3895544,
3.7681642,
],
(3, 3),
)
* bohr_to_ang
)
self.assertArrayAlmostEqual(l3.matrix, abi_rprimd)
with self.assertRaises(ValueError):
lattice_from_abivars(acell=[1, 1, 1], angdeg=(90, 90, 90), rprim=np.eye(3))
with self.assertRaises(ValueError):
lattice_from_abivars(acell=[1, 1, 1], angdeg=(-90, 90, 90))
def test_znucl_typat(self):
"""Test the order of typat and znucl in the Abinit input and enforce_typat, enforce_znucl."""
# Ga Ga1 1 0.33333333333333 0.666666666666667 0.500880 1.0
# Ga Ga2 1 0.66666666666667 0.333333333333333 0.000880 1.0
# N N3 1 0.333333333333333 0.666666666666667 0.124120 1.0
# N N4 1 0.666666666666667 0.333333333333333 0.624120 1.0
gan = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "abinit", "gan.cif"))
# By default, znucl is filled using the first new type found in sites.
def_vars = structure_to_abivars(gan)
def_znucl = def_vars["znucl"]
self.assertArrayEqual(def_znucl, [31, 7])
def_typat = def_vars["typat"]
self.assertArrayEqual(def_typat, [1, 1, 2, 2])
# But it's possible to enforce a particular value of typat and znucl.
enforce_znucl = [7, 31]
enforce_typat = [2, 2, 1, 1]
enf_vars = structure_to_abivars(gan, enforce_znucl=enforce_znucl, enforce_typat=enforce_typat)
self.assertArrayEqual(enf_vars["znucl"], enforce_znucl)
self.assertArrayEqual(enf_vars["typat"], enforce_typat)
self.assertArrayEqual(def_vars["xred"], enf_vars["xred"])
assert [s.symbol for s in species_by_znucl(gan)] == ["Ga", "N"]
for itype1, itype2 in zip(def_typat, enforce_typat):
assert def_znucl[itype1 - 1] == enforce_znucl[itype2 - 1]
with self.assertRaises(Exception):
structure_to_abivars(gan, enforce_znucl=enforce_znucl, enforce_typat=None)
class SpinModeTest(PymatgenTest):
def test_base(self):
polarized = SpinMode.as_spinmode("polarized")
other_polarized = SpinMode.as_spinmode("polarized")
unpolarized = SpinMode.as_spinmode("unpolarized")
polarized.to_abivars()
self.assertTrue(polarized is other_polarized)
self.assertTrue(polarized == other_polarized)
self.assertTrue(polarized != unpolarized)
# Test pickle
self.serialize_with_pickle(polarized)
# Test dict methods
self.assertMSONable(polarized)
self.assertMSONable(unpolarized)
class SmearingTest(PymatgenTest):
def test_base(self):
fd1ev = Smearing.as_smearing("fermi_dirac:1 eV")
fd1ev.to_abivars()
self.assertTrue(fd1ev)
same_fd = Smearing.as_smearing("fermi_dirac:" + str(1.0 / Ha_to_eV))
self.assertTrue(same_fd == fd1ev)
nosmear = Smearing.nosmearing()
assert nosmear == Smearing.as_smearing("nosmearing")
self.assertFalse(nosmear)
self.assertTrue(nosmear != fd1ev)
self.assertMSONable(nosmear)
new_fd1ev = Smearing.from_dict(fd1ev.as_dict())
self.assertTrue(new_fd1ev == fd1ev)
# Test pickle
self.serialize_with_pickle(fd1ev)
# Test dict methods
self.assertMSONable(fd1ev)
class ElectronsAlgorithmTest(PymatgenTest):
def test_base(self):
algo = ElectronsAlgorithm(nstep=70)
abivars = algo.to_abivars()
# Test pickle
self.serialize_with_pickle(algo)
# Test dict methods
self.assertMSONable(algo)
class ElectronsTest(PymatgenTest):
def test_base(self):
default_electrons = Electrons()
self.assertTrue(default_electrons.nsppol == 2)
self.assertTrue(default_electrons.nspinor == 1)
self.assertTrue(default_electrons.nspden == 2)
abivars = default_electrons.to_abivars()
# new = Electron.from_dict(default_electrons.as_dict())
# Test pickle
self.serialize_with_pickle(default_electrons, test_eq=False)
custom_electrons = Electrons(
spin_mode="unpolarized",
smearing="marzari4:0.2 eV",
algorithm=ElectronsAlgorithm(nstep=70),
nband=10,
charge=1.0,
comment="Test comment",
)
# Test dict methods
self.assertMSONable(custom_electrons)
class KSamplingTest(PymatgenTest):
def test_base(self):
monkhorst = KSampling.monkhorst((3, 3, 3), (0.5, 0.5, 0.5), 0, False, False)
gamma_centered = KSampling.gamma_centered((3, 3, 3), False, False)
monkhorst.to_abivars()
# Test dict methods
self.assertMSONable(monkhorst)
self.assertMSONable(gamma_centered)
class RelaxationTest(PymatgenTest):
def test_base(self):
atoms_and_cell = RelaxationMethod.atoms_and_cell()
atoms_only = RelaxationMethod.atoms_only()
atoms_and_cell.to_abivars()
# Test dict methods
self.assertMSONable(atoms_and_cell)
self.assertMSONable(atoms_only)
class PPModelTest(PymatgenTest):
def test_base(self):
godby = PPModel.as_ppmodel("godby:12 eV")
# print(godby)
# print(repr(godby))
godby.to_abivars()
self.assertTrue(godby)
same_godby = PPModel.as_ppmodel("godby:" + str(12.0 / Ha_to_eV))
self.assertTrue(same_godby == godby)
noppm = PPModel.get_noppmodel()
self.assertFalse(noppm)
self.assertTrue(noppm != godby)
new_godby = PPModel.from_dict(godby.as_dict())
self.assertTrue(new_godby == godby)
# Test pickle
self.serialize_with_pickle(godby)
# Test dict methods
self.assertMSONable(godby)
|
vorwerkc/pymatgen
|
pymatgen/io/abinit/tests/test_abiobjects.py
|
Python
|
mit
| 7,463
| 0.000804
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Point of Sale',
'version': '1.0.1',
'category': 'Point Of Sale',
'sequence': 20,
'summary': 'Touchscreen Interface for Shops',
'description': """
Quick and Easy sale process
===========================
This module allows you to manage your shop sales very easily with a fully web based touchscreen interface.
It is compatible with all PC tablets and the iPad, offering multiple payment methods.
Product selection can be done in several ways:
* Using a barcode reader
* Browsing through categories of products or via a text search.
Main Features
-------------
* Fast encoding of the sale
* Choose one payment method (the quick way) or split the payment between several payment methods
* Computation of the amount of money to return
* Create and confirm the picking list automatically
* Allows the user to create an invoice automatically
* Refund previous sales
""",
'depends': ['stock_account', 'barcodes'],
'data': [
'security/point_of_sale_security.xml',
'security/ir.model.access.csv',
'data/default_barcode_patterns.xml',
'wizard/pos_box.xml',
'wizard/pos_details.xml',
'wizard/pos_discount.xml',
'wizard/pos_open_statement.xml',
'wizard/pos_payment.xml',
'views/pos_templates.xml',
'views/point_of_sale_template.xml',
'views/point_of_sale_report.xml',
'views/point_of_sale_view.xml',
'views/pos_order_view.xml',
'views/product_view.xml',
'views/pos_category_view.xml',
'views/account_journal_view.xml',
'views/pos_config_view.xml',
'views/pos_session_view.xml',
'views/point_of_sale_sequence.xml',
'data/point_of_sale_data.xml',
'views/pos_order_report_view.xml',
'views/account_statement_view.xml',
'views/account_statement_report.xml',
'views/res_users_view.xml',
'views/res_partner_view.xml',
'views/res_config_view.xml',
'views/report_statement.xml',
'views/report_userlabel.xml',
'views/report_saledetails.xml',
'views/point_of_sale.xml',
'views/point_of_sale_dashboard.xml',
],
'demo': [
'data/point_of_sale_demo.xml',
],
'installable': True,
'application': True,
'qweb': ['static/src/xml/pos.xml'],
'website': 'https://www.odoo.com/page/point-of-sale',
}
|
chienlieu2017/it_management
|
odoo/addons/point_of_sale/__manifest__.py
|
Python
|
gpl-3.0
| 2,501
| 0.0012
|
# -*- coding: utf-8 -*-
#
import datetime
import hashlib
import json
import logging
import random
import string
import time
import urllib.request
import xml.etree.ElementTree as ET
from LostAndFound.settings import WECHAT_TOKEN, WECHAT_APPID, WECHAT_SECRET
from django.http import Http404, HttpResponse
from django.template.loader import get_template
from LostAndFound import settings
from codex.baseview import BaseView
from wechat.models import Lost, Found, User
__author__ = "Epsirom"
class WeChatHandler(object):
logger = logging.getLogger('WeChat')
def __init__(self, view, msg, user):
"""
:type view: WeChatView
:type msg: dict
:type user: User or None
"""
self.input = msg
self.user = user
self.view = view
def check(self):
raise NotImplementedError('You should implement check() in sub-class of WeChatHandler')
def handle(self):
raise NotImplementedError('You should implement handle() in sub-class of WeChatHandler')
def get_context(self, **extras):
return dict(
FromUserName=self.input['ToUserName'],
ToUserName=self.input['FromUserName'],
**extras
)
def reply_text(self, content):
return get_template('text.xml').render(self.get_context(
Content=content
))
def reply_news(self, articles):
if len(articles) > 10:
self.logger.warn('Reply with %d articles, keep only 10', len(articles))
return get_template('news.xml').render(self.get_context(
Articles=articles[:10]
))
def reply_single_news(self, article):
return self.reply_news([article])
def get_message(self, name, **data):
if name.endswith('.html'):
name = name[: -5]
return get_template('messages/' + name + '.html').render(dict(
handler=self, user=self.user, **data
))
def is_msg_type(self, check_type):
return self.input['MsgType'] == check_type
def is_text(self, *texts):
return self.is_msg_type('text') and (self.input['Content'].lower() in texts)
def is_event_click(self, *event_keys):
return self.is_msg_type('event') and (self.input['Event'] == 'CLICK') and (self.input['EventKey'] in event_keys)
def is_event(self, *events):
return self.is_msg_type('event') and (self.input['Event'] in events)
def is_text_command(self, *commands):
return self.is_msg_type('text') and ((self.input['Content'].split() or [None])[0] in commands)
def url_help(self):
return settings.get_url('u/help')
def url_lost_list(self):
return settings.get_url('u/lost/list', {'user': self.user.open_id})
def url_lost_new(self):
return settings.get_url('u/lost/new', {'user': self.user.open_id})
def url_found_list(self):
return settings.get_url('u/found/list', {'user': self.user.open_id})
def url_mine(self):
return settings.get_url('u/mine',{'user':self.user.open_id})
class WeChatEmptyHandler(WeChatHandler):
def check(self):
return True
def handle(self):
return self.reply_text('The server is busy')
class WeChatError(Exception):
def __init__(self, errcode, errmsg, *args, **kwargs):
super(WeChatError, self).__init__(errmsg, *args, **kwargs)
self.errcode = errcode
self.errmsg = errmsg
def __repr__(self):
return '[errcode=%d] %s' % (self.errcode, self.errmsg)
class Sign:
def __init__(self, jsapi_ticket, url):
self.ret = {
'jsapi_ticket': jsapi_ticket,
'nonceStr': self.__create_nonce_str(),
'timestamp': self.__create_timestamp(),
'url': url
}
def __create_nonce_str(self):
return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(15))
def __create_timestamp(self):
return int(time.time())
def sign(self):
string = '&'.join(['%s=%s' % (key.lower(), self.ret[key]) for key in sorted(self.ret)])
print(string)
self.ret['signature'] = hashlib.sha1(string.encode('utf-8')).hexdigest()
return self.ret
class WeChatLib(object):
logger = logging.getLogger('wechatlib')
access_token = ''
access_token_expire = datetime.datetime.fromtimestamp(0)
jsapi_ticket = ''
jsapi_ticket_expire = datetime.datetime.fromtimestamp(0)
token = WECHAT_TOKEN
appid = WECHAT_APPID
secret = WECHAT_SECRET
def __init__(self, token, appid, secret):
super(WeChatLib, self).__init__()
self.token = token
self.appid = appid
self.secret = secret
def check_signature(self, signature, timestamp, nonce):
tmp_list = sorted([self.token, timestamp, nonce])
tmpstr = hashlib.sha1(''.join(tmp_list).encode('utf-8')).hexdigest()
return tmpstr == signature
@classmethod
def _http_get(cls, url):
req = urllib.request.Request(url=url)
res_data = urllib.request.urlopen(req)
res = res_data.read()
return res.decode()
@classmethod
def _http_post(cls, url, data):
req = urllib.request.Request(
url=url, data=data if isinstance(data, bytes) else data.encode()
)
res_data = urllib.request.urlopen(req)
res = res_data.read()
return res.decode()
@classmethod
def _http_post_dict(cls, url, data):
return cls._http_post(url, json.dumps(data, ensure_ascii=False))
@classmethod
def get_wechat_access_token(cls):
if datetime.datetime.now() >= cls.access_token_expire:
print("appid=%s secret=%s" %(cls.appid, cls.secret))
res = cls._http_get(
'https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&appid=%s&secret=%s' % (
cls.appid, cls.secret
)
)
rjson = json.loads(res)
if rjson.get('errcode'):
raise WeChatError(rjson['errcode'], rjson['errmsg'])
cls.access_token = rjson['access_token']
cls.access_token_expire = datetime.datetime.now() + datetime.timedelta(seconds=rjson['expires_in'] - 300)
cls.logger.info('Got access token %s', cls.access_token)
return cls.access_token
@classmethod
def get_wechat_jsapi_ticket(cls):
if datetime.datetime.now() >= cls.jsapi_ticket_expire:
at = cls.get_wechat_access_token()
print("access token=%s" %(at))
res = cls._http_get(
'https://api.weixin.qq.com/cgi-bin/ticket/getticket?access_token=%s&type=jsapi' % (at)
)
rjson = json.loads(res)
if rjson.get('errcode'):
raise WeChatError(rjson['errcode'], rjson['errmsg'])
cls.jsapi_ticket = rjson['ticket']
cls.jsapi_ticket_expire = datetime.datetime.now() + datetime.timedelta(seconds=rjson['expires_in'] - 300)
cls.logger.info('Got jsapi ticket %s', cls.jsapi_ticket)
return cls.jsapi_ticket
@classmethod
def get_wechat_wx_config(cls, url):
sign = Sign(cls.get_wechat_jsapi_ticket(), url)
config = sign.sign()
wx_config = {
'appId': settings.WECHAT_APPID,
'timestamp': config['timestamp'],
'nonceStr': config['nonceStr'],
'signature': config['signature']
}
return wx_config
def get_wechat_menu(self):
res = self._http_get(
'https://api.weixin.qq.com/cgi-bin/menu/get?access_token=%s' % (
self.get_wechat_access_token()
)
)
rjson = json.loads(res)
return rjson.get('menu', {}).get('button', [])
def set_wechat_menu(self, data):
res = self._http_post_dict(
'https://api.weixin.qq.com/cgi-bin/menu/create?access_token=%s' % (
self.get_wechat_access_token()
), data
)
rjson = json.loads(res)
if rjson.get('errcode'):
raise WeChatError(rjson['errcode'], rjson['errmsg'])
class WeChatView(BaseView):
logger = logging.getLogger('WeChat')
lib = WeChatLib('', '', '')
handlers = []
error_message_handler = WeChatEmptyHandler
default_handler = WeChatEmptyHandler
def _check_signature(self):
query = self.request.GET
return self.lib.check_signature(query['signature'], query['timestamp'], query['nonce'])
def do_dispatch(self, *args, **kwargs):
if not settings.IGNORE_WECHAT_SIGNATURE and not self._check_signature():
self.logger.error('Check WeChat signature failed')
raise Http404()
if self.request.method == 'GET':
return HttpResponse(self.request.GET['echostr'])
elif self.request.method == 'POST':
return HttpResponse(self.handle_wechat_msg(), content_type='application/xml')
else:
return self.http_method_not_allowed()
def handle_wechat_msg(self):
msg = self.parse_msg_xml(ET.fromstring(self.request.body))
if 'FromUserName' not in msg:
return self.error_message_handler(self, msg, None).handle()
user, created = User.objects.get_or_create(open_id=msg['FromUserName'])
if created:
self.logger.info('New user: %s', user.open_id)
try:
for handler in self.handlers:
inst = handler(self, msg, user)
if inst.check():
return inst.handle()
return self.default_handler(self, msg, user).handle()
except:
self.logger.exception('Error occurred when handling WeChat message %s', msg)
return self.error_message_handler(self, msg, user).handle()
@classmethod
def parse_msg_xml(cls, root_elem):
msg = dict()
if root_elem.tag == 'xml':
for child in root_elem:
msg[child.tag] = child.text
return msg
|
zzz14/LOST-FOUND
|
wechat/wrapper.py
|
Python
|
gpl-3.0
| 10,069
| 0.002582
|
from yowsup.layers import YowLayer, YowLayerEvent, YowProtocolLayer
from .protocolentities import *
class YowIbProtocolLayer(YowProtocolLayer):
def __init__(self):
handleMap = {
"ib": (self.recvIb, self.sendIb),
"iq": (None, self.sendIb)
}
super(YowIbProtocolLayer, self).__init__(handleMap)
def __str__(self):
return "Ib Layer"
def sendIb(self, entity):
if entity.__class__ == CleanIqProtocolEntity:
self.toLower(entity.toProtocolTreeNode())
def recvIb(self, node):
if node.getChild("dirty"):
self.toUpper(DirtyIbProtocolEntity.fromProtocolTreeNode(node))
elif node.getChild("offline"):
self.toUpper(OfflineIbProtocolEntity.fromProtocolTreeNode(node))
else:
raise ValueError("Unkown ib node %s" % node)
|
felix-dumit/campusbot
|
yowsup2/yowsup/layers/protocol_ib/layer.py
|
Python
|
mit
| 861
| 0.002323
|
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Bar',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
],
),
]
|
nesdis/djongo
|
tests/django_tests/tests/v22/tests/admin_scripts/app_waiting_migration/migrations/0001_initial.py
|
Python
|
agpl-3.0
| 434
| 0.002304
|
import datetime
import release
def test_release():
rel = release.Release("mysql-3.23.22-beta", "1234-05-06")
print(vars(rel))
assert vars(rel) == {
"raw_label": "mysql-3.23.22-beta",
"raw_date": "1234-05-06",
"majormin": "3.23",
"pre": "mysql-",
"post": ".22-beta",
"date": datetime.datetime(1234, 5, 6, 0, 0),
}
|
johntellsall/shotglass
|
dec/test_release.py
|
Python
|
mit
| 380
| 0
|
# -*- coding: utf-8 -*-
class Solution(object):
''' https://leetcode.com/problems/count-primes/
'''
def countPrimes(self, n):
if n <= 2:
return 0
is_prime = [True] * n
ret = 0
for i in range(2, n):
if not is_prime[i]:
continue
ret += 1
for m in range(2, n):
if i * m >= n:
continue
is_prime[i*m] = False
return ret
|
lycheng/leetcode
|
others/count_primes.py
|
Python
|
mit
| 488
| 0
|
#=======================================================================
# RegIncrSC.py
#=======================================================================
from pymtl import *
class RegIncrSC( SystemCModel ):
sclinetrace = True
def __init__( s ):
s.in_ = InPort ( Bits(32) )
s.out = OutPort( Bits(32) )
s.set_ports({
"clk" : s.clk,
"rst" : s.reset,
"in_" : s.in_,
"out" : s.out,
})
|
cornell-brg/pymtl
|
pymtl/tools/integration/systemc_tests/sequential/RegIncrSC.py
|
Python
|
bsd-3-clause
| 446
| 0.049327
|
import time, os
from autotest.client import test, os_dep, utils
from autotest.client.shared import error
class btreplay(test.test):
version = 1
# http://brick.kernel.dk/snaps/blktrace-git-latest.tar.gz
def setup(self, tarball = 'blktrace-git-latest.tar.gz'):
tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir)
utils.extract_tarball_to_dir(tarball, self.srcdir)
self.job.setup_dep(['libaio'])
libs = '-L' + self.autodir + '/deps/libaio/lib -laio'
cflags = '-I ' + self.autodir + '/deps/libaio/include'
var_libs = 'LIBS="' + libs + '"'
var_cflags = 'CFLAGS="' + cflags + '"'
self.make_flags = var_libs + ' ' + var_cflags
os.chdir(self.srcdir)
utils.system('patch -p1 < ../Makefile.patch')
utils.system(self.make_flags + ' make')
def initialize(self):
self.job.require_gcc()
self.ldlib = 'LD_LIBRARY_PATH=%s/deps/libaio/lib'%(self.autodir)
self.results = []
def run_once(self, dev="", devices="", extra_args='', tmpdir=None):
# @dev: The device against which the trace will be replayed.
# e.g. "sdb" or "md_d1"
# @devices: A space-separated list of the underlying devices
# which make up dev, e.g. "sdb sdc". You only need to set
# devices if dev is an MD, LVM, or similar device;
# otherwise leave it as an empty string.
if not tmpdir:
tmpdir = self.tmpdir
os.chdir(self.srcdir)
alldevs = "-d /dev/" + dev
alldnames = dev
for d in devices.split():
alldevs += " -d /dev/" + d
alldnames += " " + d
# convert the trace (assumed to be in this test's base
# directory) into btreplay's required format
#
# TODO: The test currently halts here as there is no trace in the
# test's base directory.
cmd = "./btreplay/btrecord -d .. -D %s %s" % (tmpdir, dev)
self.results.append(utils.system_output(cmd, retain_output=True))
# time a replay that omits "thinktime" between requests
# (by use of the -N flag)
cmd = self.ldlib + " /usr/bin/time ./btreplay/btreplay -d "+\
tmpdir+" -N -W "+dev+" "+extra_args+" 2>&1"
self.results.append(utils.system_output(cmd, retain_output=True))
# trace a replay that reproduces inter-request delays, and
# analyse the trace with btt to determine the average request
# completion latency
utils.system("./blktrace -D %s %s >/dev/null &" % (tmpdir, alldevs))
cmd = self.ldlib + " ./btreplay/btreplay -d %s -W %s %s" %\
(tmpdir, dev, extra_args)
self.results.append(utils.system_output(cmd, retain_output=True))
utils.system("killall -INT blktrace")
# wait until blktrace is really done
slept = 0.0
while utils.system("ps -C blktrace > /dev/null",
ignore_status=True) == 0:
time.sleep(0.1)
slept += 0.1
if slept > 30.0:
utils.system("killall -9 blktrace")
raise error.TestError("blktrace failed to exit in 30 seconds")
utils.system("./blkparse -q -D %s -d %s/trace.bin -O %s >/dev/null" %
(tmpdir, tmpdir, alldnames))
cmd = "./btt/btt -i %s/trace.bin" % tmpdir
self.results.append(utils.system_output(cmd, retain_output=True))
def postprocess(self):
for n in range(len(self.results)):
if self.results[n].strip() == "==================== All Devices ====================":
words = self.results[n-2].split()
s = words[1].strip('sytem').split(':')
e = words[2].strip('elapsd').split(':')
break
systime = 0.0
for n in range(len(s)):
i = (len(s)-1) - n
systime += float(s[i]) * (60**n)
elapsed = 0.0
for n in range(len(e)):
i = (len(e)-1) - n
elapsed += float(e[i]) * (60**n)
q2c = 0.0
for line in self.results:
words = line.split()
if len(words) < 3:
continue
if words[0] == 'Q2C':
q2c = float(words[2])
break
self.write_perf_keyval({'time':elapsed, 'systime':systime,
'avg_q2c_latency':q2c})
|
nacc/autotest
|
client/tests/btreplay/btreplay.py
|
Python
|
gpl-2.0
| 4,454
| 0.003368
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Tests various schema replication scenarios
#
# Copyright (C) Kamen Mazdrashki <kamenim@samba.org> 2011
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# Usage:
# export DC1=dc1_dns_name
# export DC2=dc2_dns_name
# export SUBUNITRUN=$samba4srcdir/scripting/bin/subunitrun
# PYTHONPATH="$PYTHONPATH:$samba4srcdir/torture/drs/python" $SUBUNITRUN replica_sync -U"$DOMAIN/$DC_USERNAME"%"$DC_PASSWORD"
#
import drs_base
import samba.tests
import time
import ldb
from ldb import (
SCOPE_BASE, LdbError, ERR_NO_SUCH_OBJECT)
class DrsReplicaSyncTestCase(drs_base.DrsBaseTestCase):
"""Intended as a black box test case for DsReplicaSync
implementation. It should test the behavior of this
case in cases when inbound replication is disabled"""
def setUp(self):
super(DrsReplicaSyncTestCase, self).setUp()
self.ou1 = None
self.ou2 = None
def tearDown(self):
self._cleanup_object(self.ou1)
self._cleanup_object(self.ou2)
# re-enable replication
self._enable_inbound_repl(self.dnsname_dc1)
self._enable_inbound_repl(self.dnsname_dc2)
super(DrsReplicaSyncTestCase, self).tearDown()
def _cleanup_object(self, guid):
"""Cleans up a test object, if it still exists"""
if guid is not None:
try:
self.ldb_dc2.delete('<GUID=%s>' % guid, ["tree_delete:1"])
except LdbError, (num, _):
self.assertEquals(num, ERR_NO_SUCH_OBJECT)
try:
self.ldb_dc1.delete('<GUID=%s>' % guid, ["tree_delete:1"])
except LdbError, (num, _):
self.assertEquals(num, ERR_NO_SUCH_OBJECT)
def test_ReplEnabled(self):
"""Tests we can replicate when replication is enabled"""
self._enable_inbound_repl(self.dnsname_dc1)
self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=False)
def test_ReplDisabled(self):
"""Tests we cann't replicate when replication is disabled"""
self._disable_inbound_repl(self.dnsname_dc1)
ccache_name = self.get_creds_ccache_name()
# Tunnel the command line credentials down to the
# subcommand to avoid a new kinit
cmdline_auth = "--krb5-ccache=%s" % ccache_name
# bin/samba-tool drs <drs_command> <cmdline_auth>
cmd_list = ["drs", "replicate", cmdline_auth]
nc_dn = self.domain_dn
# bin/samba-tool drs replicate <Dest_DC_NAME> <Src_DC_NAME> <Naming Context>
cmd_list += [self.dnsname_dc1, self.dnsname_dc2, nc_dn]
(result, out, err) = self.runsubcmd(*cmd_list)
self.assertCmdFail(result)
self.assertTrue('WERR_DS_DRA_SINK_DISABLED' in err)
def test_ReplDisabledForced(self):
"""Tests we can force replicate when replication is disabled"""
self._disable_inbound_repl(self.dnsname_dc1)
out = self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
def test_ReplLocal(self):
"""Tests we can replicate direct to the local db"""
self._enable_inbound_repl(self.dnsname_dc1)
self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=False, local=True, full_sync=True)
def _create_ou(self, samdb, name):
ldif = """
dn: %s,%s
objectClass: organizationalUnit
""" % (name, self.domain_dn)
samdb.add_ldif(ldif)
res = samdb.search(base="%s,%s" % (name, self.domain_dn),
scope=SCOPE_BASE, attrs=["objectGUID"])
return self._GUID_string(res[0]["objectGUID"][0])
def _check_deleted(self, sam_ldb, guid):
# search the user by guid as it may be deleted
res = sam_ldb.search(base='<GUID=%s>' % guid,
controls=["show_deleted:1"],
attrs=["isDeleted", "objectCategory", "ou"])
self.assertEquals(len(res), 1)
ou_cur = res[0]
# Deleted Object base DN
dodn = self._deleted_objects_dn(sam_ldb)
# now check properties of the user
name_cur = ou_cur["ou"][0]
self.assertEquals(ou_cur["isDeleted"][0],"TRUE")
self.assertTrue(not("objectCategory" in ou_cur))
self.assertTrue(dodn in str(ou_cur["dn"]),
"OU %s is deleted but it is not located under %s!" % (name_cur, dodn))
def test_ReplConflictsFullSync(self):
"""Tests that objects created in conflict become conflict DNs (honour full sync override)"""
# First confirm local replication (so when we test against windows, this fails fast without creating objects)
self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, local=True, forced=True, full_sync=True)
self._disable_inbound_repl(self.dnsname_dc1)
self._disable_inbound_repl(self.dnsname_dc2)
# Create conflicting objects on DC1 and DC2, with DC1 object created first
self.ou1 = self._create_ou(self.ldb_dc1, "OU=Test Full Sync")
# We have to sleep to ensure that the two objects have different timestamps
time.sleep(1)
self.ou2 = self._create_ou(self.ldb_dc2, "OU=Test Full Sync")
self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, local=True, forced=True, full_sync=True)
# Check that DC2 got the DC1 object, and OU1 was make into conflict
res1 = self.ldb_dc2.search(base="<GUID=%s>" % self.ou1,
scope=SCOPE_BASE, attrs=["name"])
res2 = self.ldb_dc2.search(base="<GUID=%s>" % self.ou2,
scope=SCOPE_BASE, attrs=["name"])
print res1[0]["name"][0]
print res2[0]["name"][0]
self.assertFalse('CNF:%s' % self.ou2 in str(res2[0]["name"][0]))
self.assertTrue('CNF:%s' % self.ou1 in str(res1[0]["name"][0]))
self.assertTrue(self._lost_and_found_dn(self.ldb_dc2, self.domain_dn) not in str(res1[0].dn))
self.assertTrue(self._lost_and_found_dn(self.ldb_dc2, self.domain_dn) not in str(res2[0].dn))
self.assertEqual(str(res1[0]["name"][0]), res1[0].dn.get_rdn_value())
self.assertEqual(str(res2[0]["name"][0]), res2[0].dn.get_rdn_value())
# Delete both objects by GUID on DC2
self.ldb_dc2.delete('<GUID=%s>' % self.ou1)
self.ldb_dc2.delete('<GUID=%s>' % self.ou2)
self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=True)
self._check_deleted(self.ldb_dc1, self.ou1)
self._check_deleted(self.ldb_dc1, self.ou2)
# Check deleted on DC2
self._check_deleted(self.ldb_dc2, self.ou1)
self._check_deleted(self.ldb_dc2, self.ou2)
def test_ReplConflictsRemoteWin(self):
"""Tests that objects created in conflict become conflict DNs"""
self._disable_inbound_repl(self.dnsname_dc1)
self._disable_inbound_repl(self.dnsname_dc2)
# Create conflicting objects on DC1 and DC2, with DC1 object created first
self.ou1 = self._create_ou(self.ldb_dc1, "OU=Test Remote Conflict")
# We have to sleep to ensure that the two objects have different timestamps
time.sleep(1)
self.ou2 = self._create_ou(self.ldb_dc2, "OU=Test Remote Conflict")
self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
# Check that DC2 got the DC1 object, and OU1 was make into conflict
res1 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou1,
scope=SCOPE_BASE, attrs=["name"])
res2 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou2,
scope=SCOPE_BASE, attrs=["name"])
print res1[0]["name"][0]
print res2[0]["name"][0]
self.assertTrue('CNF:%s' % self.ou1 in str(res1[0]["name"][0]))
self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res1[0].dn))
self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res2[0].dn))
self.assertEqual(str(res1[0]["name"][0]), res1[0].dn.get_rdn_value())
self.assertEqual(str(res2[0]["name"][0]), res2[0].dn.get_rdn_value())
# Delete both objects by GUID on DC1
self.ldb_dc1.delete('<GUID=%s>' % self.ou1)
self.ldb_dc1.delete('<GUID=%s>' % self.ou2)
self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True, full_sync=False)
self._check_deleted(self.ldb_dc1, self.ou1)
self._check_deleted(self.ldb_dc1, self.ou2)
# Check deleted on DC2
self._check_deleted(self.ldb_dc2, self.ou1)
self._check_deleted(self.ldb_dc2, self.ou2)
def test_ReplConflictsLocalWin(self):
"""Tests that objects created in conflict become conflict DNs"""
self._disable_inbound_repl(self.dnsname_dc1)
self._disable_inbound_repl(self.dnsname_dc2)
# Create conflicting objects on DC1 and DC2, with DC2 object created first
self.ou2 = self._create_ou(self.ldb_dc2, "OU=Test Local Conflict")
# We have to sleep to ensure that the two objects have different timestamps
time.sleep(1)
self.ou1 = self._create_ou(self.ldb_dc1, "OU=Test Local Conflict")
self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
# Check that DC2 got the DC1 object, and OU2 was make into conflict
res1 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou1,
scope=SCOPE_BASE, attrs=["name"])
res2 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou2,
scope=SCOPE_BASE, attrs=["name"])
print res1[0]["name"][0]
print res2[0]["name"][0]
self.assertTrue('CNF:%s' % self.ou2 in str(res2[0]["name"][0]), "Got %s for %s" % (str(res2[0]["name"][0]), self.ou2))
self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res1[0].dn))
self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res2[0].dn))
self.assertEqual(str(res1[0]["name"][0]), res1[0].dn.get_rdn_value())
self.assertEqual(str(res2[0]["name"][0]), res2[0].dn.get_rdn_value())
# Delete both objects by GUID on DC1
self.ldb_dc1.delete('<GUID=%s>' % self.ou1)
self.ldb_dc1.delete('<GUID=%s>' % self.ou2)
self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True, full_sync=False)
self._check_deleted(self.ldb_dc1, self.ou1)
self._check_deleted(self.ldb_dc1, self.ou2)
# Check deleted on DC2
self._check_deleted(self.ldb_dc2, self.ou1)
self._check_deleted(self.ldb_dc2, self.ou2)
def test_ReplConflictsRemoteWin_with_child(self):
"""Tests that objects created in conflict become conflict DNs"""
self._disable_inbound_repl(self.dnsname_dc1)
self._disable_inbound_repl(self.dnsname_dc2)
# Create conflicting objects on DC1 and DC2, with DC1 object created first
self.ou1 = self._create_ou(self.ldb_dc1, "OU=Test Parent Remote Conflict")
# We have to sleep to ensure that the two objects have different timestamps
time.sleep(1)
self.ou2 = self._create_ou(self.ldb_dc2, "OU=Test Parent Remote Conflict")
# Create children on DC2
ou1_child = self._create_ou(self.ldb_dc1, "OU=Test Child,OU=Test Parent Remote Conflict")
ou2_child = self._create_ou(self.ldb_dc2, "OU=Test Child,OU=Test Parent Remote Conflict")
self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
# Check that DC2 got the DC1 object, and SELF.OU1 was make into conflict
res1 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou1,
scope=SCOPE_BASE, attrs=["name"])
res2 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou2,
scope=SCOPE_BASE, attrs=["name"])
print res1[0]["name"][0]
print res2[0]["name"][0]
self.assertTrue('CNF:%s' % self.ou1 in str(res1[0]["name"][0]))
self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res1[0].dn))
self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res2[0].dn))
self.assertEqual(str(res1[0]["name"][0]), res1[0].dn.get_rdn_value())
self.assertEqual(str(res2[0]["name"][0]), res2[0].dn.get_rdn_value())
# Delete both objects by GUID on DC1
self.ldb_dc1.delete('<GUID=%s>' % self.ou1, ["tree_delete:1"])
self.ldb_dc1.delete('<GUID=%s>' % self.ou2, ["tree_delete:1"])
self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True, full_sync=False)
self._check_deleted(self.ldb_dc1, self.ou1)
self._check_deleted(self.ldb_dc1, self.ou2)
# Check deleted on DC2
self._check_deleted(self.ldb_dc2, self.ou1)
self._check_deleted(self.ldb_dc2, self.ou2)
self._check_deleted(self.ldb_dc1, ou1_child)
self._check_deleted(self.ldb_dc1, ou2_child)
# Check deleted on DC2
self._check_deleted(self.ldb_dc2, ou1_child)
self._check_deleted(self.ldb_dc2, ou2_child)
def test_ReplConflictsRenamedVsNewRemoteWin(self):
"""Tests resolving a DN conflict between a renamed object and a new object"""
self._disable_inbound_repl(self.dnsname_dc1)
self._disable_inbound_repl(self.dnsname_dc2)
# Create an OU and rename it on DC1
self.ou1 = self._create_ou(self.ldb_dc1, "OU=Test Remote Rename Conflict orig")
self.ldb_dc1.rename("<GUID=%s>" % self.ou1, "OU=Test Remote Rename Conflict,%s" % self.domain_dn)
# We have to sleep to ensure that the two objects have different timestamps
time.sleep(1)
# create a conflicting object with the same DN on DC2
self.ou2 = self._create_ou(self.ldb_dc2, "OU=Test Remote Rename Conflict")
self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
# Check that DC2 got the DC1 object, and SELF.OU1 was made into conflict
res1 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou1,
scope=SCOPE_BASE, attrs=["name"])
res2 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou2,
scope=SCOPE_BASE, attrs=["name"])
print res1[0]["name"][0]
print res2[0]["name"][0]
self.assertTrue('CNF:%s' % self.ou1 in str(res1[0]["name"][0]))
self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res1[0].dn))
self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res2[0].dn))
self.assertEqual(str(res1[0]["name"][0]), res1[0].dn.get_rdn_value())
self.assertEqual(str(res2[0]["name"][0]), res2[0].dn.get_rdn_value())
# Delete both objects by GUID on DC1
self.ldb_dc1.delete('<GUID=%s>' % self.ou1)
self.ldb_dc1.delete('<GUID=%s>' % self.ou2)
self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True, full_sync=False)
self._check_deleted(self.ldb_dc1, self.ou1)
self._check_deleted(self.ldb_dc1, self.ou2)
# Check deleted on DC2
self._check_deleted(self.ldb_dc2, self.ou1)
self._check_deleted(self.ldb_dc2, self.ou2)
def test_ReplConflictsRenamedVsNewLocalWin(self):
"""Tests resolving a DN conflict between a renamed object and a new object"""
self._disable_inbound_repl(self.dnsname_dc1)
self._disable_inbound_repl(self.dnsname_dc2)
# Create conflicting objects on DC1 and DC2, where the DC2 object has been renamed
self.ou2 = self._create_ou(self.ldb_dc2, "OU=Test Rename Local Conflict orig")
self.ldb_dc2.rename("<GUID=%s>" % self.ou2, "OU=Test Rename Local Conflict,%s" % self.domain_dn)
# We have to sleep to ensure that the two objects have different timestamps
time.sleep(1)
self.ou1 = self._create_ou(self.ldb_dc1, "OU=Test Rename Local Conflict")
self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
# Check that DC2 got the DC1 object, and OU2 was made into conflict
res1 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou1,
scope=SCOPE_BASE, attrs=["name"])
res2 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou2,
scope=SCOPE_BASE, attrs=["name"])
print res1[0]["name"][0]
print res2[0]["name"][0]
self.assertTrue('CNF:%s' % self.ou2 in str(res2[0]["name"][0]))
self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res1[0].dn))
self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res2[0].dn))
self.assertEqual(str(res1[0]["name"][0]), res1[0].dn.get_rdn_value())
self.assertEqual(str(res2[0]["name"][0]), res2[0].dn.get_rdn_value())
# Delete both objects by GUID on DC1
self.ldb_dc1.delete('<GUID=%s>' % self.ou1)
self.ldb_dc1.delete('<GUID=%s>' % self.ou2)
self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True, full_sync=False)
self._check_deleted(self.ldb_dc1, self.ou1)
self._check_deleted(self.ldb_dc1, self.ou2)
# Check deleted on DC2
self._check_deleted(self.ldb_dc2, self.ou1)
self._check_deleted(self.ldb_dc2, self.ou2)
def test_ReplConflictsRenameRemoteWin(self):
"""Tests that objects created in conflict become conflict DNs"""
self._disable_inbound_repl(self.dnsname_dc1)
self._disable_inbound_repl(self.dnsname_dc2)
# Create conflicting objects on DC1 and DC2, with DC1 object created first
self.ou1 = self._create_ou(self.ldb_dc1, "OU=Test Remote Rename Conflict")
self.ou2 = self._create_ou(self.ldb_dc2, "OU=Test Remote Rename Conflict 2")
self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
self.ldb_dc1.rename("<GUID=%s>" % self.ou1, "OU=Test Remote Rename Conflict 3,%s" % self.domain_dn)
# We have to sleep to ensure that the two objects have different timestamps
time.sleep(1)
self.ldb_dc2.rename("<GUID=%s>" % self.ou2, "OU=Test Remote Rename Conflict 3,%s" % self.domain_dn)
self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
# Check that DC2 got the DC1 object, and SELF.OU1 was make into conflict
res1 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou1,
scope=SCOPE_BASE, attrs=["name"])
res2 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou2,
scope=SCOPE_BASE, attrs=["name"])
print res1[0]["name"][0]
print res2[0]["name"][0]
self.assertTrue('CNF:%s' % self.ou1 in str(res1[0]["name"][0]))
self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res1[0].dn))
self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res2[0].dn))
self.assertEqual(str(res1[0]["name"][0]), res1[0].dn.get_rdn_value())
self.assertEqual(str(res2[0]["name"][0]), res2[0].dn.get_rdn_value())
# Delete both objects by GUID on DC1
self.ldb_dc1.delete('<GUID=%s>' % self.ou1)
self.ldb_dc1.delete('<GUID=%s>' % self.ou2)
self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True, full_sync=False)
self._check_deleted(self.ldb_dc1, self.ou1)
self._check_deleted(self.ldb_dc1, self.ou2)
# Check deleted on DC2
self._check_deleted(self.ldb_dc2, self.ou1)
self._check_deleted(self.ldb_dc2, self.ou2)
def test_ReplConflictsRenameRemoteWin_with_child(self):
"""Tests that objects created in conflict become conflict DNs"""
self._disable_inbound_repl(self.dnsname_dc1)
self._disable_inbound_repl(self.dnsname_dc2)
# Create conflicting objects on DC1 and DC2, with DC1 object created first
self.ou1 = self._create_ou(self.ldb_dc1, "OU=Test Parent Remote Rename Conflict")
self.ou2 = self._create_ou(self.ldb_dc2, "OU=Test Parent Remote Rename Conflict 2")
# Create children on DC2
ou1_child = self._create_ou(self.ldb_dc1, "OU=Test Child,OU=Test Parent Remote Rename Conflict")
ou2_child = self._create_ou(self.ldb_dc2, "OU=Test Child,OU=Test Parent Remote Rename Conflict 2")
self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
self.ldb_dc1.rename("<GUID=%s>" % self.ou1, "OU=Test Parent Remote Rename Conflict 3,%s" % self.domain_dn)
# We have to sleep to ensure that the two objects have different timestamps
time.sleep(1)
self.ldb_dc2.rename("<GUID=%s>" % self.ou2, "OU=Test Parent Remote Rename Conflict 3,%s" % self.domain_dn)
self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
# Check that DC2 got the DC1 object, and SELF.OU1 was make into conflict
res1 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou1,
scope=SCOPE_BASE, attrs=["name"])
res2 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou2,
scope=SCOPE_BASE, attrs=["name"])
print res1[0]["name"][0]
print res2[0]["name"][0]
self.assertTrue('CNF:%s' % self.ou1 in str(res1[0]["name"][0]))
self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res1[0].dn))
self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res2[0].dn))
self.assertEqual(str(res1[0]["name"][0]), res1[0].dn.get_rdn_value())
self.assertEqual(str(res2[0]["name"][0]), res2[0].dn.get_rdn_value())
# Delete both objects by GUID on DC1
self.ldb_dc1.delete('<GUID=%s>' % self.ou1, ["tree_delete:1"])
self.ldb_dc1.delete('<GUID=%s>' % self.ou2, ["tree_delete:1"])
self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True, full_sync=False)
self._check_deleted(self.ldb_dc1, self.ou1)
self._check_deleted(self.ldb_dc1, self.ou2)
# Check deleted on DC2
self._check_deleted(self.ldb_dc2, self.ou1)
self._check_deleted(self.ldb_dc2, self.ou2)
self._check_deleted(self.ldb_dc1, ou1_child)
self._check_deleted(self.ldb_dc1, ou2_child)
# Check deleted on DC2
self._check_deleted(self.ldb_dc2, ou1_child)
self._check_deleted(self.ldb_dc2, ou2_child)
def test_ReplConflictsRenameLocalWin(self):
"""Tests that objects created in conflict become conflict DNs"""
self._disable_inbound_repl(self.dnsname_dc1)
self._disable_inbound_repl(self.dnsname_dc2)
# Create conflicting objects on DC1 and DC2, with DC1 object created first
self.ou1 = self._create_ou(self.ldb_dc1, "OU=Test Rename Local Conflict")
self.ou2 = self._create_ou(self.ldb_dc2, "OU=Test Rename Local Conflict 2")
self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
self.ldb_dc2.rename("<GUID=%s>" % self.ou2, "OU=Test Rename Local Conflict 3,%s" % self.domain_dn)
# We have to sleep to ensure that the two objects have different timestamps
time.sleep(1)
self.ldb_dc1.rename("<GUID=%s>" % self.ou1, "OU=Test Rename Local Conflict 3,%s" % self.domain_dn)
self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
# Check that DC2 got the DC1 object, and OU2 was make into conflict
res1 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou1,
scope=SCOPE_BASE, attrs=["name"])
res2 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou2,
scope=SCOPE_BASE, attrs=["name"])
print res1[0]["name"][0]
print res2[0]["name"][0]
self.assertTrue('CNF:%s' % self.ou2 in str(res2[0]["name"][0]))
self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res1[0].dn))
self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res2[0].dn))
self.assertEqual(str(res1[0]["name"][0]), res1[0].dn.get_rdn_value())
self.assertEqual(str(res2[0]["name"][0]), res2[0].dn.get_rdn_value())
# Delete both objects by GUID on DC1
self.ldb_dc1.delete('<GUID=%s>' % self.ou1)
self.ldb_dc1.delete('<GUID=%s>' % self.ou2)
self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True, full_sync=False)
self._check_deleted(self.ldb_dc1, self.ou1)
self._check_deleted(self.ldb_dc1, self.ou2)
# Check deleted on DC2
self._check_deleted(self.ldb_dc2, self.ou1)
self._check_deleted(self.ldb_dc2, self.ou2)
def test_ReplLostAndFound(self):
"""Tests that objects created under a OU deleted eleswhere end up in lostAndFound"""
self._disable_inbound_repl(self.dnsname_dc1)
self._disable_inbound_repl(self.dnsname_dc2)
# Create two OUs on DC2
self.ou1 = self._create_ou(self.ldb_dc2, "OU=Deleted parent")
self.ou2 = self._create_ou(self.ldb_dc2, "OU=Deleted parent 2")
# replicate them from DC2 to DC1
self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
# Delete both objects by GUID on DC1
self.ldb_dc1.delete('<GUID=%s>' % self.ou1)
self.ldb_dc1.delete('<GUID=%s>' % self.ou2)
# Create children on DC2
ou1_child = self._create_ou(self.ldb_dc2, "OU=Test Child,OU=Deleted parent")
ou2_child = self._create_ou(self.ldb_dc2, "OU=Test Child,OU=Deleted parent 2")
# Replicate from DC2
self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
# Check the sub-OUs are now in lostAndFound and the first one is a conflict DN
# Check that DC2 got the DC1 object, and one or other object was make into conflict
res1 = self.ldb_dc1.search(base="<GUID=%s>" % ou1_child,
scope=SCOPE_BASE, attrs=["name"])
res2 = self.ldb_dc1.search(base="<GUID=%s>" % ou2_child,
scope=SCOPE_BASE, attrs=["name"])
print res1[0]["name"][0]
print res2[0]["name"][0]
self.assertTrue('CNF:%s' % ou1_child in str(res1[0]["name"][0]) or 'CNF:%s' % ou2_child in str(res2[0]["name"][0]))
self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) in str(res1[0].dn))
self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) in str(res2[0].dn))
self.assertEqual(str(res1[0]["name"][0]), res1[0].dn.get_rdn_value())
self.assertEqual(str(res2[0]["name"][0]), res2[0].dn.get_rdn_value())
# Delete all objects by GUID on DC1
self.ldb_dc1.delete('<GUID=%s>' % ou1_child)
self.ldb_dc1.delete('<GUID=%s>' % ou2_child)
self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True, full_sync=False)
# Check all deleted on DC1
self._check_deleted(self.ldb_dc1, self.ou1)
self._check_deleted(self.ldb_dc1, self.ou2)
self._check_deleted(self.ldb_dc1, ou1_child)
self._check_deleted(self.ldb_dc1, ou2_child)
# Check all deleted on DC2
self._check_deleted(self.ldb_dc2, self.ou1)
self._check_deleted(self.ldb_dc2, self.ou2)
self._check_deleted(self.ldb_dc2, ou1_child)
self._check_deleted(self.ldb_dc2, ou2_child)
def test_ReplRenames(self):
"""Tests that objects created under a OU deleted eleswhere end up in lostAndFound"""
self._disable_inbound_repl(self.dnsname_dc1)
self._disable_inbound_repl(self.dnsname_dc2)
# Create two OUs on DC2
self.ou1 = self._create_ou(self.ldb_dc2, "OU=Original parent")
self.ou2 = self._create_ou(self.ldb_dc2, "OU=Original parent 2")
# replicate them from DC2 to DC1
self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
# Create children on DC1
ou1_child = self._create_ou(self.ldb_dc1, "OU=Test Child,OU=Original parent")
ou2_child = self._create_ou(self.ldb_dc1, "OU=Test Child 2,OU=Original parent")
ou3_child = self._create_ou(self.ldb_dc1, "OU=Test Case Child,OU=Original parent")
# replicate them from DC1 to DC2
self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True, full_sync=False)
self.ldb_dc1.rename("<GUID=%s>" % ou2_child, "OU=Test Child 3,OU=Original parent 2,%s" % self.domain_dn)
self.ldb_dc1.rename("<GUID=%s>" % ou1_child, "OU=Test Child 2,OU=Original parent 2,%s" % self.domain_dn)
self.ldb_dc1.rename("<GUID=%s>" % ou2_child, "OU=Test Child,OU=Original parent 2,%s" % self.domain_dn)
self.ldb_dc1.rename("<GUID=%s>" % ou3_child, "OU=Test CASE Child,OU=Original parent,%s" % self.domain_dn)
self.ldb_dc2.rename("<GUID=%s>" % self.ou2, "OU=Original parent 3,%s" % self.domain_dn)
self.ldb_dc2.rename("<GUID=%s>" % self.ou1, "OU=Original parent 2,%s" % self.domain_dn)
# replicate them from DC1 to DC2
self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True, full_sync=False)
# Check the sub-OUs are now under Original Parent 3 (original
# parent 2 for Test CASE Child), and both have the right names
# Check that DC2 got the DC1 object, and the renames are all correct
res1 = self.ldb_dc2.search(base="<GUID=%s>" % ou1_child,
scope=SCOPE_BASE, attrs=["name"])
res2 = self.ldb_dc2.search(base="<GUID=%s>" % ou2_child,
scope=SCOPE_BASE, attrs=["name"])
res3 = self.ldb_dc2.search(base="<GUID=%s>" % ou3_child,
scope=SCOPE_BASE, attrs=["name"])
print res1[0].dn
print res2[0].dn
print res3[0].dn
self.assertEqual('Test Child 2', res1[0]["name"][0])
self.assertEqual('Test Child', res2[0]["name"][0])
self.assertEqual('Test CASE Child', res3[0]["name"][0])
self.assertEqual(str(res1[0].dn), "OU=Test Child 2,OU=Original parent 3,%s" % self.domain_dn)
self.assertEqual(str(res2[0].dn), "OU=Test Child,OU=Original parent 3,%s" % self.domain_dn)
self.assertEqual(str(res3[0].dn), "OU=Test CASE Child,OU=Original parent 2,%s" % self.domain_dn)
# replicate them from DC2 to DC1
self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
# Check that DC1 got the DC2 object, and the renames are all correct
res1 = self.ldb_dc1.search(base="<GUID=%s>" % ou1_child,
scope=SCOPE_BASE, attrs=["name"])
res2 = self.ldb_dc1.search(base="<GUID=%s>" % ou2_child,
scope=SCOPE_BASE, attrs=["name"])
res3 = self.ldb_dc1.search(base="<GUID=%s>" % ou3_child,
scope=SCOPE_BASE, attrs=["name"])
print res1[0].dn
print res2[0].dn
print res3[0].dn
self.assertEqual('Test Child 2', res1[0]["name"][0])
self.assertEqual('Test Child', res2[0]["name"][0])
self.assertEqual('Test CASE Child', res3[0]["name"][0])
self.assertEqual(str(res1[0].dn), "OU=Test Child 2,OU=Original parent 3,%s" % self.domain_dn)
self.assertEqual(str(res2[0].dn), "OU=Test Child,OU=Original parent 3,%s" % self.domain_dn)
self.assertEqual(str(res3[0].dn), "OU=Test CASE Child,OU=Original parent 2,%s" % self.domain_dn)
# Delete all objects by GUID on DC1
self.ldb_dc1.delete('<GUID=%s>' % ou1_child)
self.ldb_dc1.delete('<GUID=%s>' % ou2_child)
self.ldb_dc1.delete('<GUID=%s>' % ou3_child)
self.ldb_dc1.delete('<GUID=%s>' % self.ou1)
self.ldb_dc1.delete('<GUID=%s>' % self.ou2)
self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True, full_sync=False)
# Check all deleted on DC1
self._check_deleted(self.ldb_dc1, self.ou1)
self._check_deleted(self.ldb_dc1, self.ou2)
self._check_deleted(self.ldb_dc1, ou1_child)
self._check_deleted(self.ldb_dc1, ou2_child)
self._check_deleted(self.ldb_dc1, ou3_child)
# Check all deleted on DC2
self._check_deleted(self.ldb_dc2, self.ou1)
self._check_deleted(self.ldb_dc2, self.ou2)
self._check_deleted(self.ldb_dc2, ou1_child)
self._check_deleted(self.ldb_dc2, ou2_child)
self._check_deleted(self.ldb_dc2, ou3_child)
def reanimate_object(self, samdb, guid, new_dn):
"""Re-animates a deleted object"""
res = samdb.search(base="<GUID=%s>" % guid, attrs=["isDeleted"],
controls=['show_deleted:1'], scope=SCOPE_BASE)
if len(res) != 1:
return
msg = ldb.Message()
msg.dn = res[0].dn
msg["isDeleted"] = ldb.MessageElement([], ldb.FLAG_MOD_DELETE, "isDeleted")
msg["distinguishedName"] = ldb.MessageElement([new_dn], ldb.FLAG_MOD_REPLACE, "distinguishedName")
samdb.modify(msg, ["show_deleted:1"])
def test_ReplReanimationConflict(self):
"""
Checks that if a reanimated object conflicts with a new object, then
the conflict is resolved correctly.
"""
self._disable_inbound_repl(self.dnsname_dc1)
self._disable_inbound_repl(self.dnsname_dc2)
# create an object, "accidentally" delete it, and replicate the changes to both DCs
self.ou1 = self._create_ou(self.ldb_dc2, "OU=Conflict object")
self.ldb_dc2.delete('<GUID=%s>' % self.ou1)
self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
# Now pretend that the admin for one DC resolves the problem by
# re-animating the object...
self.reanimate_object(self.ldb_dc1, self.ou1, "OU=Conflict object,%s" % self.domain_dn)
# ...whereas another admin just creates a user with the same name
# again on a different DC
time.sleep(1)
self.ou2 = self._create_ou(self.ldb_dc2, "OU=Conflict object")
# Now sync the DCs to resolve the conflict
self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
# Check the latest change won and SELF.OU1 was made into a conflict
res1 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou1,
scope=SCOPE_BASE, attrs=["name"])
res2 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou2,
scope=SCOPE_BASE, attrs=["name"])
print res1[0]["name"][0]
print res2[0]["name"][0]
self.assertTrue('CNF:%s' % self.ou1 in str(res1[0]["name"][0]))
self.assertFalse('CNF:%s' % self.ou2 in str(res2[0]["name"][0]))
# Delete both objects by GUID on DC1
self.ldb_dc1.delete('<GUID=%s>' % self.ou1)
self.ldb_dc1.delete('<GUID=%s>' % self.ou2)
self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True, full_sync=False)
self._check_deleted(self.ldb_dc1, self.ou1)
self._check_deleted(self.ldb_dc1, self.ou2)
# Check deleted on DC2
self._check_deleted(self.ldb_dc2, self.ou1)
self._check_deleted(self.ldb_dc2, self.ou2)
|
lidan-fnst/samba
|
source4/torture/drs/python/replica_sync.py
|
Python
|
gpl-3.0
| 36,840
| 0.004642
|
# Module 'ntpath' -- common operations on WinNT/Win95 pathnames
"""Common pathname manipulations, WindowsNT/95 version.
Instead of importing this module directly, import os and refer to this
module as os.path.
"""
import os
import sys
import stat
import genericpath
import warnings
from genericpath import *
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","getctime", "islink","exists","lexists","isdir","isfile",
"ismount","walk","expanduser","expandvars","normpath","abspath",
"splitunc","curdir","pardir","sep","pathsep","defpath","altsep",
"extsep","devnull","realpath","supports_unicode_filenames","relpath"]
# strings representing various path-related bits and pieces
curdir = '.'
pardir = '..'
extsep = '.'
sep = '\\'
pathsep = ';'
altsep = '/'
defpath = '.;C:\\bin'
if 'ce' in sys.builtin_module_names:
defpath = '\\Windows'
elif 'os2' in sys.builtin_module_names:
# OS/2 w/ VACPP
altsep = '/'
devnull = 'nul'
# Normalize the case of a pathname and map slashes to backslashes.
# Other normalizations (such as optimizing '../' away) are not done
# (this is done by normpath).
def normcase(s):
"""Normalize case of pathname.
Makes all characters lowercase and all slashes into backslashes."""
return s.replace("/", "\\").lower()
# Return whether a path is absolute.
# Trivial in Posix, harder on the Mac or MS-DOS.
# For DOS it is absolute if it starts with a slash or backslash (current
# volume), or if a pathname after the volume letter and colon / UNC resource
# starts with a slash or backslash.
def isabs(s):
"""Test whether a path is absolute"""
s = splitdrive(s)[1]
return s != '' and s[:1] in '/\\'
# Join two (or more) paths.
def join(a, *p):
"""Join two or more pathname components, inserting "\\" as needed.
If any component is an absolute path, all previous path components
will be discarded."""
path = a
for b in p:
b_wins = 0 # set to 1 iff b makes path irrelevant
if path == "":
b_wins = 1
elif isabs(b):
# This probably wipes out path so far. However, it's more
# complicated if path begins with a drive letter:
# 1. join('c:', '/a') == 'c:/a'
# 2. join('c:/', '/a') == 'c:/a'
# But
# 3. join('c:/a', '/b') == '/b'
# 4. join('c:', 'd:/') = 'd:/'
# 5. join('c:/', 'd:/') = 'd:/'
if path[1:2] != ":" or b[1:2] == ":":
# Path doesn't start with a drive letter, or cases 4 and 5.
b_wins = 1
# Else path has a drive letter, and b doesn't but is absolute.
elif len(path) > 3 or (len(path) == 3 and
path[-1] not in "/\\"):
# case 3
b_wins = 1
if b_wins:
path = b
else:
# Join, and ensure there's a separator.
assert len(path) > 0
if path[-1] in "/\\":
if b and b[0] in "/\\":
path += b[1:]
else:
path += b
elif path[-1] == ":":
path += b
elif b:
if b[0] in "/\\":
path += b
else:
path += "\\" + b
else:
# path is not empty and does not end with a backslash,
# but b is empty; since, e.g., split('a/') produces
# ('a', ''), it's best if join() adds a backslash in
# this case.
path += '\\'
return path
# Split a path in a drive specification (a drive letter followed by a
# colon) and the path specification.
# It is always true that drivespec + pathspec == p
def splitdrive(p):
"""Split a pathname into drive and path specifiers. Returns a 2-tuple
"(drive,path)"; either part may be empty"""
if p[1:2] == ':':
return p[0:2], p[2:]
return '', p
# Parse UNC paths
def splitunc(p):
"""Split a pathname into UNC mount point and relative path specifiers.
Return a 2-tuple (unc, rest); either part may be empty.
If unc is not empty, it has the form '//host/mount' (or similar
using backslashes). unc+rest is always the input path.
Paths containing drive letters never have an UNC part.
"""
if p[1:2] == ':':
return '', p # Drive letter present
firstTwo = p[0:2]
if firstTwo == '//' or firstTwo == '\\\\':
# is a UNC path:
# vvvvvvvvvvvvvvvvvvvv equivalent to drive letter
# \\machine\mountpoint\directories...
# directory ^^^^^^^^^^^^^^^
normp = normcase(p)
index = normp.find('\\', 2)
if index == -1:
##raise RuntimeError, 'illegal UNC path: "' + p + '"'
return ("", p)
index = normp.find('\\', index + 1)
if index == -1:
index = len(p)
return p[:index], p[index:]
return '', p
# Split a path in head (everything up to the last '/') and tail (the
# rest). After the trailing '/' is stripped, the invariant
# join(head, tail) == p holds.
# The resulting head won't end in '/' unless it is the root.
def split(p):
"""Split a pathname.
Return tuple (head, tail) where tail is everything after the final slash.
Either part may be empty."""
d, p = splitdrive(p)
# set i to index beyond p's last slash
i = len(p)
while i and p[i-1] not in '/\\':
i = i - 1
head, tail = p[:i], p[i:] # now tail has no slashes
# remove trailing slashes from head, unless it's all slashes
head2 = head
while head2 and head2[-1] in '/\\':
head2 = head2[:-1]
head = head2 or head
return d + head, tail
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
def splitext(p):
return genericpath._splitext(p, sep, altsep, extsep)
splitext.__doc__ = genericpath._splitext.__doc__
# Return the tail (basename) part of a path.
def basename(p):
"""Returns the final component of a pathname"""
return split(p)[1]
# Return the head (dirname) part of a path.
def dirname(p):
"""Returns the directory component of a pathname"""
return split(p)[0]
# Is a path a symbolic link?
# This will always return false on systems where posix.lstat doesn't exist.
def islink(path):
"""Test for symbolic link.
On WindowsNT/95 and OS/2 always returns false
"""
return False
# alias exists to lexists
lexists = exists
# Is a path a mount point? Either a root (with or without drive letter)
# or an UNC path with at most a / or \ after the mount point.
def ismount(path):
"""Test whether a path is a mount point (defined as root of drive)"""
unc, rest = splitunc(path)
if unc:
return rest in ("", "/", "\\")
p = splitdrive(path)[1]
return len(p) == 1 and p[0] in '/\\'
# Directory tree walk.
# For each directory under top (including top itself, but excluding
# '.' and '..'), func(arg, dirname, filenames) is called, where
# dirname is the name of the directory and filenames is the list
# of files (and subdirectories etc.) in the directory.
# The func may modify the filenames list, to implement a filter,
# or to impose a different order of visiting.
def walk(top, func, arg):
"""Directory tree walk with callback function.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), call func(arg, dirname, fnames).
dirname is the name of the directory, and fnames a list of the names of
the files and subdirectories in dirname (excluding '.' and '..'). func
may modify the fnames list in-place (e.g. via del or slice assignment),
and walk will only recurse into the subdirectories whose names remain in
fnames; this can be used to implement a filter, or to impose a specific
order of visiting. No semantics are defined for, or required of, arg,
beyond that arg is always passed to func. It can be used, e.g., to pass
a filename pattern, or a mutable object designed to accumulate
statistics. Passing None for arg is common."""
warnings.warnpy3k("In 3.x, os.path.walk is removed in favor of os.walk.",
stacklevel=2)
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
for name in names:
name = join(top, name)
if isdir(name):
walk(name, func, arg)
# Expand paths beginning with '~' or '~user'.
# '~' means $HOME; '~user' means that user's home directory.
# If the path doesn't begin with '~', or if the user or $HOME is unknown,
# the path is returned unchanged (leaving error reporting to whatever
# function is called with the expanded path as argument).
# See also module 'glob' for expansion of *, ? and [...] in pathnames.
# (A function should also be defined to do full *sh-style environment
# variable expansion.)
def expanduser(path):
"""Expand ~ and ~user constructs.
If user or $HOME is unknown, do nothing."""
if path[:1] != '~':
return path
i, n = 1, len(path)
while i < n and path[i] not in '/\\':
i = i + 1
if 'HOME' in os.environ:
userhome = os.environ['HOME']
elif 'USERPROFILE' in os.environ:
userhome = os.environ['USERPROFILE']
elif not 'HOMEPATH' in os.environ:
return path
else:
try:
drive = os.environ['HOMEDRIVE']
except KeyError:
drive = ''
userhome = join(drive, os.environ['HOMEPATH'])
if i != 1: #~user
userhome = join(dirname(userhome), path[1:i])
return userhome + path[i:]
# Expand paths containing shell variable substitutions.
# The following rules apply:
# - no expansion within single quotes
# - '$$' is translated into '$'
# - '%%' is translated into '%' if '%%' are not seen in %var1%%var2%
# - ${varname} is accepted.
# - $varname is accepted.
# - %varname% is accepted.
# - varnames can be made out of letters, digits and the characters '_-'
# (though is not verified in the ${varname} and %varname% cases)
# XXX With COMMAND.COM you can use any characters in a variable name,
# XXX except '^|<>='.
def expandvars(path):
"""Expand shell variables of the forms $var, ${var} and %var%.
Unknown variables are left unchanged."""
if '$' not in path and '%' not in path:
return path
import string
varchars = string.ascii_letters + string.digits + '_-'
res = ''
index = 0
pathlen = len(path)
while index < pathlen:
c = path[index]
if c == '\'': # no expansion within single quotes
path = path[index + 1:]
pathlen = len(path)
try:
index = path.index('\'')
res = res + '\'' + path[:index + 1]
except ValueError:
res = res + path
index = pathlen - 1
elif c == '%': # variable or '%'
if path[index + 1:index + 2] == '%':
res = res + c
index = index + 1
else:
path = path[index+1:]
pathlen = len(path)
try:
index = path.index('%')
except ValueError:
res = res + '%' + path
index = pathlen - 1
else:
var = path[:index]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '%' + var + '%'
elif c == '$': # variable or '$$'
if path[index + 1:index + 2] == '$':
res = res + c
index = index + 1
elif path[index + 1:index + 2] == '{':
path = path[index+2:]
pathlen = len(path)
try:
index = path.index('}')
var = path[:index]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '${' + var + '}'
except ValueError:
res = res + '${' + path
index = pathlen - 1
else:
var = ''
index = index + 1
c = path[index:index + 1]
while c != '' and c in varchars:
var = var + c
index = index + 1
c = path[index:index + 1]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '$' + var
if c != '':
index = index - 1
else:
res = res + c
index = index + 1
return res
# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A\B.
# Previously, this function also truncated pathnames to 8+3 format,
# but as this module is called "ntpath", that's obviously wrong!
def normpath(path):
"""Normalize path, eliminating double slashes, etc."""
# Preserve unicode (if path is unicode)
backslash, dot = (u'\\', u'.') if isinstance(path, unicode) else ('\\', '.')
if path.startswith(('\\\\.\\', '\\\\?\\')):
# in the case of paths with these prefixes:
# \\.\ -> device names
# \\?\ -> literal paths
# do not do any normalization, but return the path unchanged
return path
path = path.replace("/", "\\")
prefix, path = splitdrive(path)
# We need to be careful here. If the prefix is empty, and the path starts
# with a backslash, it could either be an absolute path on the current
# drive (\dir1\dir2\file) or a UNC filename (\\server\mount\dir1\file). It
# is therefore imperative NOT to collapse multiple backslashes blindly in
# that case.
# The code below preserves multiple backslashes when there is no drive
# letter. This means that the invalid filename \\\a\b is preserved
# unchanged, where a\\\b is normalised to a\b. It's not clear that there
# is any better behaviour for such edge cases.
if prefix == '':
# No drive letter - preserve initial backslashes
while path[:1] == "\\":
prefix = prefix + backslash
path = path[1:]
else:
# We have a drive letter - collapse initial backslashes
if path.startswith("\\"):
prefix = prefix + backslash
path = path.lstrip("\\")
comps = path.split("\\")
i = 0
while i < len(comps):
if comps[i] in ('.', ''):
del comps[i]
elif comps[i] == '..':
if i > 0 and comps[i-1] != '..':
del comps[i-1:i+1]
i -= 1
elif i == 0 and prefix.endswith("\\"):
del comps[i]
else:
i += 1
else:
i += 1
# If the path is now empty, substitute '.'
if not prefix and not comps:
comps.append(dot)
return prefix + backslash.join(comps)
# Return an absolute path.
try:
from nt import _getfullpathname
except ImportError: # not running on Windows - mock up something sensible
def abspath(path):
"""Return the absolute version of a path."""
if not isabs(path):
if isinstance(path, unicode):
cwd = os.getcwdu()
else:
cwd = os.getcwd()
path = join(cwd, path)
return normpath(path)
else: # use native Windows method on Windows
def abspath(path):
"""Return the absolute version of a path."""
if path: # Empty path must return current working directory.
try:
path = _getfullpathname(path)
except WindowsError:
pass # Bad path - return unchanged.
elif isinstance(path, unicode):
path = os.getcwdu()
else:
path = os.getcwd()
return normpath(path)
# realpath is a no-op on systems without islink support
realpath = abspath
# Win9x family and earlier have no Unicode filename support.
supports_unicode_filenames = (hasattr(sys, "getwindowsversion") and
sys.getwindowsversion()[3] >= 2)
def _abspath_split(path):
abs = abspath(normpath(path))
prefix, rest = splitunc(abs)
is_unc = bool(prefix)
if not is_unc:
prefix, rest = splitdrive(abs)
return is_unc, prefix, [x for x in rest.split(sep) if x]
def relpath(path, start=curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_is_unc, start_prefix, start_list = _abspath_split(start)
path_is_unc, path_prefix, path_list = _abspath_split(path)
if path_is_unc ^ start_is_unc:
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
% (path, start))
if path_prefix.lower() != start_prefix.lower():
if path_is_unc:
raise ValueError("path is on UNC root %s, start on UNC root %s"
% (path_prefix, start_prefix))
else:
raise ValueError("path is on drive %s, start on drive %s"
% (path_prefix, start_prefix))
# Work out how much of the filepath is shared by start and path.
i = 0
for e1, e2 in zip(start_list, path_list):
if e1.lower() != e2.lower():
break
i += 1
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
|
ktan2020/legacy-automation
|
win/Lib/ntpath.py
|
Python
|
mit
| 18,605
| 0.002634
|
#!/usr/bin/python
import urllib
print dir(urllib)
help(urllib.urlopen)
|
richardcardona/learnpython-exercises
|
dirhelp.py
|
Python
|
apache-2.0
| 75
| 0.013333
|
#!/usr/bin/python
#
# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
# Copyright (c) 2019 Matti Ranta, (@techknowlogick)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_mariadbdatabase_info
version_added: "2.9"
short_description: Get Azure MariaDB Database facts
description:
- Get facts of MariaDB Database.
options:
resource_group:
description:
- The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
required: True
type: str
server_name:
description:
- The name of the server.
required: True
type: str
name:
description:
- The name of the database.
type: str
extends_documentation_fragment:
- azure
author:
- Zim Kalinowski (@zikalino)
- Matti Ranta (@techknowlogick)
'''
EXAMPLES = '''
- name: Get instance of MariaDB Database
azure_rm_mariadbdatabase_info:
resource_group: myResourceGroup
server_name: server_name
name: database_name
- name: List instances of MariaDB Database
azure_rm_mariadbdatabase_info:
resource_group: myResourceGroup
server_name: server_name
'''
RETURN = '''
databases:
description:
- A list of dictionaries containing facts for MariaDB Databases.
returned: always
type: complex
contains:
id:
description:
- Resource ID.
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/testser
ver/databases/db1"
resource_group:
description:
- Resource group name.
returned: always
type: str
sample: testrg
server_name:
description:
- Server name.
returned: always
type: str
sample: testserver
name:
description:
- Resource name.
returned: always
type: str
sample: db1
charset:
description:
- The charset of the database.
returned: always
type: str
sample: UTF8
collation:
description:
- The collation of the database.
returned: always
type: str
sample: English_United States.1252
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.rdbms.mariadb import MariaDBManagementClient
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class AzureRMMariaDbDatabaseInfo(AzureRMModuleBase):
def __init__(self):
# define user inputs into argument
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
server_name=dict(
type='str',
required=True
),
name=dict(
type='str'
)
)
# store the results of the module operation
self.results = dict(
changed=False
)
self.resource_group = None
self.server_name = None
self.name = None
super(AzureRMMariaDbDatabaseInfo, self).__init__(self.module_arg_spec, supports_tags=False)
def exec_module(self, **kwargs):
is_old_facts = self.module._name == 'azure_rm_mariadbdatabase_facts'
if is_old_facts:
self.module.deprecate("The 'azure_rm_mariadbdatabase_facts' module has been renamed to 'azure_rm_mariadbdatabase_info'", version='2.13')
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
if (self.resource_group is not None and
self.server_name is not None and
self.name is not None):
self.results['databases'] = self.get()
elif (self.resource_group is not None and
self.server_name is not None):
self.results['databases'] = self.list_by_server()
return self.results
def get(self):
response = None
results = []
try:
response = self.mariadb_client.databases.get(resource_group_name=self.resource_group,
server_name=self.server_name,
database_name=self.name)
self.log("Response : {0}".format(response))
except CloudError as e:
self.log('Could not get facts for Databases.')
if response is not None:
results.append(self.format_item(response))
return results
def list_by_server(self):
response = None
results = []
try:
response = self.mariadb_client.databases.list_by_server(resource_group_name=self.resource_group,
server_name=self.server_name)
self.log("Response : {0}".format(response))
except CloudError as e:
self.fail("Error listing for server {0} - {1}".format(self.server_name, str(e)))
if response is not None:
for item in response:
results.append(self.format_item(item))
return results
def format_item(self, item):
d = item.as_dict()
d = {
'resource_group': self.resource_group,
'server_name': self.server_name,
'name': d['name'],
'charset': d['charset'],
'collation': d['collation']
}
return d
def main():
AzureRMMariaDbDatabaseInfo()
if __name__ == '__main__':
main()
|
ilpianista/ansible
|
test/support/integration/plugins/modules/azure_rm_mariadbdatabase_info.py
|
Python
|
gpl-3.0
| 6,304
| 0.001904
|
#
# Copyright (C) 2013 Uninett AS
#
# This file is part of Network Administration Visualized (NAV).
#
# NAV is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 3 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details. You should have received a copy of the GNU General Public
# License along with NAV. If not, see <http://www.gnu.org/licenses/>.
#
"""Django URL configuration for messages tool"""
from django.conf.urls import url
from nav.web.messages import views
from nav.web.messages.feeds import ActiveMessagesFeed
urlpatterns = [
url(r'^$', views.redirect_to_active),
url(r'^active/$', views.active, name='messages-home'),
url(r'^create/$', views.save, name='messages-create'),
url(r'^edit/(?P<message_id>\d+)$', views.save, name='messages-edit'),
url(r'^active/$', views.active, name='messages-active'),
url(r'^scheduled/$', views.planned, name='messages-planned'),
url(r'^archive/$', views.historic, name='messages-historic'),
url(r'^view/(?P<message_id>\d+)$', views.view, name='messages-view'),
url(r'^expire/(?P<message_id>\d+)$', views.expire, name='messages-expire'),
url(r'^followup/(?P<message_id>\d+)$', views.followup, name='messages-followup'),
url(r'^rss/$', ActiveMessagesFeed(), name='messages-rss'),
]
|
hmpf/nav
|
python/nav/web/messages/urls.py
|
Python
|
gpl-3.0
| 1,573
| 0.000636
|
#!/usr/bin/env python
""" cdecl.py - parse c declarations
(c) 2002, 2003, 2004, 2005 Simon Burton <simon@arrowtheory.com>
Released under GNU LGPL license.
version 0.xx
"""
import string
class Node(list):
" A node in a parse tree "
def __init__(self,*items,**kw):
list.__init__( self, items )
self.lock1 = 0 # these two should be properties (simplifies serializing)
self.lock2 = 0
self.verbose = 0
for key in kw.keys():
self.__dict__[key] = kw[key]
def __str__(self):
attrs = []
for item in self:
if isinstance(item,Node):
attrs.append( str(item) )
else:
attrs.append( repr(item) )
attrs = ','.join(attrs)
return "%s(%s)"%(self.__class__.__name__,attrs)
def safe_repr( self, tank ):
tank[ str(self) ] = None
attrs = []
for item in self:
if isinstance(item,Node):
attrs.append( item.safe_repr(tank) ) # can we use repr here ?
else:
attrs.append( repr(item) )
# this is the dangerous bit:
for key, val in self.__dict__.items():
if isinstance(val,Node):
if str(val) not in tank:
attrs.append( '%s=%s'%(key,val.safe_repr(tank)) )
else:
attrs.append( '%s=%s'%(key,repr(val)) )
attrs = ','.join(attrs)
return "%s(%s)"%(self.__class__.__name__,attrs)
def __repr__(self):
#attrs = ','.join( [repr(item) for item in self] + \
# [ '%s=%s'%(key,repr(val)) for key,val in self.__dict__.items() ] )
#return "%s%s"%(self.__class__.__name__,tuple(attrs))
return self.safe_repr({})
def __eq__(self,other):
if not isinstance(other,Node):
return 0
if len(self)!=len(other):
return 0
for i in range(len(self)):
if not self[i]==other[i]:
return 0
return 1
def __ne__(self,other):
return not self==other
def filter(self,cls):
return [x for x in self if isinstance(x,cls)]
#return filter( lambda x:isinstance(x,cls), self )
def deepfilter(self,cls):
" bottom-up "
return [x for x in self.nodes() if isinstance(x,cls)]
def find(self,cls):
for x in self:
if isinstance(x,cls):
return x
return None
def deepfind(self,cls):
" bottom-up isinstance search "
for x in self:
if isinstance(x,Node):
if isinstance(x,cls):
return x
node = x.deepfind(cls)
if node is not None:
return node
if isinstance(self,cls):
return self
return None
def leaves(self):
for i in self:
if isinstance( i, Node ):
for j in i.leaves():
yield j
else:
yield i
def nodes(self):
" bottom-up iteration "
for i in self:
if isinstance( i, Node ):
for j in i.nodes():
yield j
yield self
def deeplen(self):
i=0
if not self.lock2:
self.lock2=1
for item in self:
i+=1
if isinstance(item,Node):
i+=item.deeplen()
self.lock2=0
else:
i+=1
return i
def deepstr(self,level=0,comment=False,nl='\n',indent=' '):
if self.deeplen() < 4:
nl = ""; indent = ""
#else:
#nl="\n"; indent = " "
s = []
if not self.lock1:
self.lock1=1
for item in self:
if isinstance(item,Node):
s.append( indent*(level+1)+item.deepstr(level+1,False,nl,indent) )
else:
s.append( indent*(level+1)+repr(item) )
self.lock1=0
else:
for item in self:
if isinstance(item,Node):
s.append( indent*(level+1)+"<recursion...>" )
else:
s.append( indent*(level+1)+"%s"%repr(item) )
s = "%s(%s)"%(self.__class__.__name__,nl+string.join(s,","+nl))
if comment:
s = '#' + s.replace('\n','\n#')
return s
def clone(self):
items = []
for item in self:
if isinstance(item,Node):
item = item.clone()
items.append(item)
# we skip any attributes...
return self.__class__(*items)
def fastclone(self):
# XX is it faster ???
#print "clone"
nodes = [self]
idxs = [0]
itemss = [ [] ]
while nodes:
assert len(nodes)==len(idxs)==len(itemss)
node = nodes[-1]
items = itemss[-1]
assert idxs[-1] == len(items)
while idxs[-1]==len(node):
# pop
_node = node.__class__( *items )
_node.__dict__.update( node.__dict__ )
nodes.pop(-1)
idxs.pop(-1)
itemss.pop(-1)
if not nodes:
#for node0 in self.nodes():
#for node1 in _node.nodes():
#assert node0 is not node1
#assert _node == self
return _node # Done !!
node = nodes[-1]
items = itemss[-1]
items.append(_node) # set
idxs[-1] += 1
assert idxs[-1] == len(items)
#assert idxs[-1] < len(node), str( (node,nodes,idxs,itemss) )
_node = node[ idxs[-1] ]
# while idxs[-1]<len(node):
if isinstance(_node,Node):
# push
nodes.append( _node )
idxs.append( 0 )
itemss.append( [] )
else:
# next
items.append(_node)
idxs[-1] += 1
assert idxs[-1] == len(items)
def expose(self,cls):
' expose children of any <cls> instance '
# children first
for x in self:
if isinstance(x,Node):
x.expose(cls)
# now the tricky bit
i=0
while i < len(self):
if isinstance(self[i],cls):
node=self.pop(i)
for x in node:
assert not isinstance(x,cls)
# pass on some attributes
if hasattr(node,'lines') and not hasattr(x,'lines'):
x.lines=node.lines
if hasattr(node,'file') and not hasattr(x,'file'):
x.file=node.file
self.insert(i,x) # expose
i=i+1
assert i<=len(self)
else:
i=i+1
def get_parent( self, item ): # XX 25% CPU time here XX
assert self != item
if item in self:
return self
for child in self:
if isinstance(child, Node):
parent = child.get_parent(item)
if parent is not None:
return parent
return None
def expose_node( self, item ):
assert self != item
parent = self.get_parent(item)
idx = parent.index( item )
parent[idx:idx+1] = item[:]
def delete(self,cls):
' delete any <cls> subtree '
for x in self:
if isinstance(x,Node):
x.delete(cls)
# now the tricky bit
i=0
while i < len(self):
if isinstance(self[i],cls):
self.pop(i)
else:
i=i+1
def deeprm(self,item):
' remove any items matching <item> '
for x in self:
if isinstance(x,Node):
x.deeprm(item)
# now the tricky bit
i=0
while i < len(self):
if self[i] == item:
self.pop(i)
else:
i=i+1
def idem(self,cls):
" <cls> is made idempotent "
# children first
for x in self:
if isinstance(x,Node):
x.idem(cls)
if isinstance(self,cls):
# now the tricky bit
i=0
while i < len(self):
if isinstance(self[i],cls):
node = self.pop(i)
for x in node:
assert not isinstance(x,cls)
self.insert(i,x) # idempotent
i=i+1
assert i<=len(self)
else:
i=i+1
if __name__=="__main__":
node = Node( 'a', Node(1,2), Node(Node(Node(),1)) )
print node
print node.clone()
|
jpflori/mpir
|
yasm/tools/python-yasm/pyxelator/node.py
|
Python
|
gpl-3.0
| 8,966
| 0.019295
|
import json, logging, os, re, subprocess, shlex
from tools import get_category_by_status
log = logging.getLogger()
meta_files = ['Disassembly', 'Stacktrace', 'Registers',
'SegvAnalysis', 'ProcMaps', "BootLog" , "CoreDump",
"BootDmesg", "syslog", "UbiquityDebug.gz", "Casper.gz",
"UbiquityPartman.gz", "UbiquityDm.gz", "GdmLog", "XorgLog"
"log", "Log"]
def get(metadata, bugdir):
indicators = {}
# look for file arg; this needs work TODO
cmdline = None
uri = None
for line in metadata['description'].splitlines():
if "proccmdline" in line.lower():
cmdline = ":".join(line.split(":")[1:]).strip()
try:
toks = shlex.split(cmdline)
except ValueError as e:
log.error("error while parsing cmdline: %s" % cmdline)
log.exception(e)
continue
if len(toks) > 1:
if ("//" in toks[-1]) or ("." in toks[-1]):
uri = toks[-1].strip()
indicators['cmdline'] = cmdline
indicators['cmdline_uri'] = uri
# look for interesting attachments; ugly
interesting_files = []
for f in os.listdir(bugdir):
fpath = os.path.join(bugdir, f)
if not os.path.isfile(fpath):
continue
for fn in meta_files:
if fn.lower() in f.lower():
break
else:
# no break in loop above, i.e. still interested
out = subprocess.check_output(["file", fpath])
ftype = out.split(":")[-1]
if ftype.strip() == "empty":
continue
for tstr in ["ASCII", "text", "core file"]:
if tstr in ftype:
break
else:
# only runs if we didn't break, i.e., this might be interesting
interesting_files.append(f)
indicators['files'] = interesting_files
# TODO: look for recv, etc. in stacks (shoudl this be in exploitability maybe (remote?))
return indicators
|
jfoote/vulture
|
vlib/analyzers/reproducibility.py
|
Python
|
mit
| 2,072
| 0.005309
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from ..config import BaseAnsibleContainerConfig
from ..utils.visibility import getLogger
logger = getLogger(__name__)
class K8sBaseConfig(BaseAnsibleContainerConfig):
@property
def image_namespace(self):
namespace = self.project_name
if self._config.get('settings', {}).get('k8s_namespace', {}).get('name'):
namespace = self._config['settings']['k8s_namespace']['name']
return namespace
def set_env(self, env):
super(K8sBaseConfig, self).set_env(env)
if self._config.get('volumes'):
for vol_key in self._config['volumes']:
# Remove settings not meant for this engine
for engine_name in self.remove_engines:
if engine_name in self._config['volumes'][vol_key]:
del self._config['volumes'][vol_key][engine_name]
|
chouseknecht/ansible-container
|
container/k8s/base_config.py
|
Python
|
lgpl-3.0
| 932
| 0.001073
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from . import NeuralLayer
from deepy.utils import build_activation, FLOATX
import numpy as np
import theano
import theano.tensor as T
from collections import OrderedDict
OUTPUT_TYPES = ["sequence", "one"]
INPUT_TYPES = ["sequence", "one"]
class RNN(NeuralLayer):
"""
Recurrent neural network layer.
"""
def __init__(self, hidden_size, input_type="sequence", output_type="sequence", vector_core=None,
hidden_activation="tanh", hidden_init=None, input_init=None, steps=None,
persistent_state=False, reset_state_for_input=None, batch_size=None,
go_backwards=False, mask=None, second_input_size=None, second_input=None):
super(RNN, self).__init__("rnn")
self._hidden_size = hidden_size
self.output_dim = self._hidden_size
self._input_type = input_type
self._output_type = output_type
self._hidden_activation = hidden_activation
self._hidden_init = hidden_init
self._vector_core = vector_core
self._input_init = input_init
self.persistent_state = persistent_state
self.reset_state_for_input = reset_state_for_input
self.batch_size = batch_size
self._steps = steps
self._go_backwards = go_backwards
self._mask = mask.dimshuffle((1,0)) if mask else None
self._second_input_size = second_input_size
self._second_input = second_input
self._sequence_map = OrderedDict()
if input_type not in INPUT_TYPES:
raise Exception("Input type of RNN is wrong: %s" % input_type)
if output_type not in OUTPUT_TYPES:
raise Exception("Output type of RNN is wrong: %s" % output_type)
if self.persistent_state and not self.batch_size:
raise Exception("Batch size must be set for persistent state mode")
if mask and input_type == "one":
raise Exception("Mask only works with sequence input")
def _hidden_preact(self, h):
return T.dot(h, self.W_h) if not self._vector_core else h * self.W_h
def step(self, *vars):
# Parse sequence
sequence_map = dict(zip(self._sequence_map.keys(), vars[:len(self._sequence_map)]))
if self._input_type == "sequence":
x = sequence_map["x"]
h = vars[-1]
# Reset part of the state on condition
if self.reset_state_for_input != None:
h = h * T.neq(x[:, self.reset_state_for_input], 1).dimshuffle(0, 'x')
# RNN core step
z = x + self._hidden_preact(h) + self.B_h
else:
h = vars[-1]
z = self._hidden_preact(h) + self.B_h
# Second input
if "second_input" in sequence_map:
z += sequence_map["second_input"]
new_h = self._hidden_act(z)
# Apply mask
if "mask" in sequence_map:
mask = sequence_map["mask"].dimshuffle(0, 'x')
new_h = mask * new_h + (1 - mask) * h
return new_h
def produce_input_sequences(self, x, mask=None, second_input=None):
self._sequence_map.clear()
if self._input_type == "sequence":
self._sequence_map["x"] = T.dot(x, self.W_i)
# Mask
if mask:
# (batch)
self._sequence_map["mask"] = mask
elif self._mask:
# (time, batch)
self._sequence_map["mask"] = self._mask
# Second input
if second_input:
self._sequence_map["second_input"] = T.dot(second_input, self.W_i2)
elif self._second_input:
self._sequence_map["second_input"] = T.dot(self._second_input, self.W_i2)
return self._sequence_map.values()
def produce_initial_states(self, x):
h0 = T.alloc(np.cast[FLOATX](0.), x.shape[0], self._hidden_size)
if self._input_type == "sequence":
if self.persistent_state:
h0 = self.state
else:
h0 = x
return [h0]
def output(self, x):
if self._input_type == "sequence":
# Move middle dimension to left-most position
# (sequence, batch, value)
sequences = self.produce_input_sequences(x.dimshuffle((1,0,2)))
else:
sequences = self.produce_input_sequences(None)
step_outputs = self.produce_initial_states(x)
hiddens, _ = theano.scan(self.step, sequences=sequences, outputs_info=step_outputs,
n_steps=self._steps, go_backwards=self._go_backwards)
# Save persistent state
if self.persistent_state:
self.register_updates((self.state, hiddens[-1]))
if self._output_type == "one":
return hiddens[-1]
elif self._output_type == "sequence":
return hiddens.dimshuffle((1,0,2))
def setup(self):
if self._input_type == "one" and self.input_dim != self._hidden_size:
raise Exception("For RNN receives one vector as input, "
"the hidden size should be same as last output dimension.")
self._setup_params()
self._setup_functions()
def _setup_functions(self):
self._hidden_act = build_activation(self._hidden_activation)
def _setup_params(self):
if not self._vector_core:
self.W_h = self.create_weight(self._hidden_size, self._hidden_size, suffix="h", initializer=self._hidden_init)
else:
self.W_h = self.create_bias(self._hidden_size, suffix="h")
self.W_h.set_value(self.W_h.get_value() + self._vector_core)
self.B_h = self.create_bias(self._hidden_size, suffix="h")
self.register_parameters(self.W_h, self.B_h)
if self.persistent_state:
self.state = self.create_matrix(self.batch_size, self._hidden_size, "rnn_state")
self.register_free_parameters(self.state)
else:
self.state = None
if self._input_type == "sequence":
self.W_i = self.create_weight(self.input_dim, self._hidden_size, suffix="i", initializer=self._input_init)
self.register_parameters(self.W_i)
if self._second_input_size:
self.W_i2 = self.create_weight(self._second_input_size, self._hidden_size, suffix="i2", initializer=self._input_init)
self.register_parameters(self.W_i2)
|
rldotai/deepy
|
deepy/layers/recurrent.py
|
Python
|
mit
| 6,446
| 0.003258
|
"""
Utility Mixins for unit tests
"""
import json
import sys
from django.conf import settings
from django.urls import clear_url_caches, resolve
from django.test import TestCase
from mock import patch
from util.db import CommitOnSuccessManager, OuterAtomic
class UrlResetMixin(object):
"""Mixin to reset urls.py before and after a test
Django memoizes the function that reads the urls module (whatever module
urlconf names). The module itself is also stored by python in sys.modules.
To fully reload it, we need to reload the python module, and also clear django's
cache of the parsed urls.
However, the order in which we do this doesn't matter, because neither one will
get reloaded until the next request
Doing this is expensive, so it should only be added to tests that modify settings
that affect the contents of urls.py
"""
URLCONF_MODULES = None
def reset_urls(self, urlconf_modules=None):
"""Reset `urls.py` for a set of Django apps."""
if urlconf_modules is None:
urlconf_modules = [settings.ROOT_URLCONF]
if self.URLCONF_MODULES is not None:
urlconf_modules.extend(self.URLCONF_MODULES)
for urlconf in urlconf_modules:
if urlconf in sys.modules:
reload(sys.modules[urlconf])
clear_url_caches()
# Resolve a URL so that the new urlconf gets loaded
resolve('/')
def setUp(self):
"""Reset Django urls before tests and after tests
If you need to reset `urls.py` from a particular Django app (or apps),
specify these modules by setting the URLCONF_MODULES class attribute.
Examples:
# Reload only the root urls.py
URLCONF_MODULES = None
# Reload urls from my_app
URLCONF_MODULES = ['myapp.url']
# Reload urls from my_app and another_app
URLCONF_MODULES = ['myapp.url', 'another_app.urls']
"""
super(UrlResetMixin, self).setUp()
self.reset_urls()
self.addCleanup(self.reset_urls)
class EventTestMixin(object):
"""
Generic mixin for verifying that events were emitted during a test.
"""
def setUp(self, tracker):
super(EventTestMixin, self).setUp()
patcher = patch(tracker)
self.mock_tracker = patcher.start()
self.addCleanup(patcher.stop)
def assert_no_events_were_emitted(self):
"""
Ensures no events were emitted since the last event related assertion.
"""
self.assertFalse(self.mock_tracker.emit.called) # pylint: disable=maybe-no-member
def assert_event_emitted(self, event_name, **kwargs):
"""
Verify that an event was emitted with the given parameters.
"""
self.mock_tracker.emit.assert_any_call( # pylint: disable=maybe-no-member
event_name,
kwargs
)
def assert_event_emission_count(self, event_name, expected_count):
"""
Verify that the event with the given name was emitted
a specific number of times.
"""
actual_count = 0
for call_args in self.mock_tracker.emit.call_args_list:
if call_args[0][0] == event_name:
actual_count += 1
self.assertEqual(actual_count, expected_count)
def reset_tracker(self):
"""
Reset the mock tracker in order to forget about old events.
"""
self.mock_tracker.reset_mock()
def get_latest_call_args(self):
"""
Return the arguments of the latest call to emit.
"""
return self.mock_tracker.emit.call_args[0]
class PatchMediaTypeMixin(object):
"""
Generic mixin for verifying unsupported media type in PATCH
"""
def test_patch_unsupported_media_type(self):
response = self.client.patch(
self.url,
json.dumps({}),
content_type=self.unsupported_media_type
)
self.assertEqual(response.status_code, 415)
def patch_testcase():
"""
Disable commit_on_success decorators for tests in TestCase subclasses.
Since tests in TestCase classes are wrapped in an atomic block, we
cannot use transaction.commit() or transaction.rollback().
https://docs.djangoproject.com/en/1.8/topics/testing/tools/#django.test.TransactionTestCase
"""
def enter_atomics_wrapper(wrapped_func):
"""
Wrapper for TestCase._enter_atomics
"""
wrapped_func = wrapped_func.__func__
def _wrapper(*args, **kwargs):
"""
Method that performs atomic-entering accounting.
"""
CommitOnSuccessManager.ENABLED = False
OuterAtomic.ALLOW_NESTED = True
if not hasattr(OuterAtomic, 'atomic_for_testcase_calls'):
OuterAtomic.atomic_for_testcase_calls = 0
OuterAtomic.atomic_for_testcase_calls += 1
return wrapped_func(*args, **kwargs)
return classmethod(_wrapper)
def rollback_atomics_wrapper(wrapped_func):
"""
Wrapper for TestCase._rollback_atomics
"""
wrapped_func = wrapped_func.__func__
def _wrapper(*args, **kwargs):
"""
Method that performs atomic-rollback accounting.
"""
CommitOnSuccessManager.ENABLED = True
OuterAtomic.ALLOW_NESTED = False
OuterAtomic.atomic_for_testcase_calls -= 1
return wrapped_func(*args, **kwargs)
return classmethod(_wrapper)
# pylint: disable=protected-access
TestCase._enter_atomics = enter_atomics_wrapper(TestCase._enter_atomics)
TestCase._rollback_atomics = rollback_atomics_wrapper(TestCase._rollback_atomics)
def patch_sessions():
"""
Override the Test Client's session and login to support safe cookies.
"""
from openedx.core.djangoapps.safe_sessions.testing import safe_cookie_test_session_patch
safe_cookie_test_session_patch()
|
philanthropy-u/edx-platform
|
common/djangoapps/util/testing.py
|
Python
|
agpl-3.0
| 6,042
| 0.001159
|
#! /usr/bin/env python2
import rift
rift.init("main.so")
print(rift.call(lib.main, rift.c_int))
|
Yayg/rift
|
tests/Acceptance_Tests/main.py
|
Python
|
mit
| 97
| 0
|
# -*- coding: utf-8 -*-
# --------------------------------------------------
# Задача 1
# --------------------------------------------------
"""
Напишите функцию-генератор, которая будет принимать
последовательность, где каждый элемент кортеж с двумя
значениями (длинна катетов треугольника) и возвращать
длинну гипотенузы.
"""
l = [(8, 4), (5, 7), (9, 2), (5, 4), (6, 4)]
# --------------------------------------------------
# Задача 2
# --------------------------------------------------
"""
Напишите генератор-выражение, которое будет вычислять
и возвращать длинну окружности. Каждый элемент является
радиусом.
"""
l = [7, 9.06, 44, 21.3, 6, 10.00001, 53]
# --------------------------------------------------
# Задача 3
# --------------------------------------------------
"""
Напишите пример реализации встроенной функции filter.
"""
def myfilter1(fun, l):
pass
# --------------------------------------------------
# Задача 4
# --------------------------------------------------
"""
Напишите пример реализации встроенной функции reduce.
"""
def myreduce1(fun, l):
pass
# --------------------------------------------------
# Задача 5
# --------------------------------------------------
"""
Перепишите функции из задач 3 и 4 так, чтобы они
стали генераторами.
"""
def myfilter2(fun, l):
pass
def myreduce21(fun, l):
pass
# --------------------------------------------------
# Задача 6
# --------------------------------------------------
"""
Перепишите вашу реализацию функций filter и map из
урока так, чтоб вторым аргументом принималось любое
количество последовательностей.
"""
|
greggy/pylessons
|
exam12.py
|
Python
|
lgpl-2.1
| 2,149
| 0.005573
|
from __future__ import annotations
import contextlib
import os
from typing import Generator
from typing import Sequence
from pre_commit.envcontext import envcontext
from pre_commit.envcontext import PatchesT
from pre_commit.envcontext import Var
from pre_commit.hook import Hook
from pre_commit.languages import helpers
from pre_commit.prefix import Prefix
from pre_commit.util import clean_path_on_failure
ENVIRONMENT_DIR = 'coursier'
get_default_version = helpers.basic_get_default_version
healthy = helpers.basic_healthy
def install_environment(
prefix: Prefix,
version: str,
additional_dependencies: Sequence[str],
) -> None: # pragma: win32 no cover
helpers.assert_version_default('coursier', version)
helpers.assert_no_additional_deps('coursier', additional_dependencies)
envdir = prefix.path(helpers.environment_dir(ENVIRONMENT_DIR, version))
channel = prefix.path('.pre-commit-channel')
with clean_path_on_failure(envdir):
for app_descriptor in os.listdir(channel):
_, app_file = os.path.split(app_descriptor)
app, _ = os.path.splitext(app_file)
helpers.run_setup_cmd(
prefix,
(
'cs',
'install',
'--default-channels=false',
f'--channel={channel}',
app,
f'--dir={envdir}',
),
)
def get_env_patch(target_dir: str) -> PatchesT: # pragma: win32 no cover
return (
('PATH', (target_dir, os.pathsep, Var('PATH'))),
)
@contextlib.contextmanager
def in_env(
prefix: Prefix,
) -> Generator[None, None, None]: # pragma: win32 no cover
target_dir = prefix.path(
helpers.environment_dir(ENVIRONMENT_DIR, get_default_version()),
)
with envcontext(get_env_patch(target_dir)):
yield
def run_hook(
hook: Hook,
file_args: Sequence[str],
color: bool,
) -> tuple[int, bytes]: # pragma: win32 no cover
with in_env(hook.prefix):
return helpers.run_xargs(hook, hook.cmd, file_args, color=color)
|
pre-commit/pre-commit
|
pre_commit/languages/coursier.py
|
Python
|
mit
| 2,157
| 0
|
import time
import threading
import logging
import serial
import io
import sim900
import sys
if __name__ == "__main__":
#this is a bad file for recording the diode temps and voltages
#eventually it will be merged with recording the resistance bridges
#and actually use the sim900 file functions
#create an instance of the sim900 commands
sim = sim900.sim900()
#main function to records temps
try:
timestr = time.strftime("%Y%m%d-%H%M%S")
filename = "/home/heather/SRS/%s.txt" % timestr
f = open(filename, 'w+')
# The column headers for rox 3 were the opposite of the written data until 2014-10-10:
f.write("time, diode ch1 temp, dio ch 2 temp, dio 3 temp, dio 4 temp, dio 1 volts, dio 2 volts, dio 3 volts, dio 4 volts, rox 1 temp, rox 1 res, rox 2 temp, rox 2 res, rox 3 temp, rox 3 res\n")
while 1:
#get diode info
sim.connect_sim922()
dio_temps = sim.get_sim922_temp()
dio_temps = dio_temps.rstrip()
time.sleep(1)
dio_volts = sim.get_sim922_volts()
dio_volts = dio_volts.rstrip()
sim.close_sim922()
print "diode"
time.sleep(1)
#get rox1 info
sim.connect_sim921_1()
rox1_res = sim.get_resistance()
rox1_temp = sim.get_temp()
sim.close_sim921_1()
print "rox1"
time.sleep(1)
sim.connect_sim921()
rox2_res = sim.get_resistance()
rox2_temp = sim.get_temp()
sim.close_sim921()
#get rox3 info
sim.connect_sim921_6()
rox3_res = sim.get_resistance()
rox3_temp = sim.get_temp()
sim.close_sim921_6()
print "rox2"
time.sleep(1)
#write it all to file
current_time = time.strftime("%Y%m%d-%H%M%S")
f.write("%s, %s, %s, %s, %s, %s, %s, %s, %s\n" % (current_time, dio_temps, dio_volts, rox1_temp, rox1_res, rox2_temp, rox2_res, rox3_temp, rox3_res))
f.flush()
except KeyboardInterrupt:
f.close()
print "done writing"
sim.close_sim922()
sim.close_sim900()
print "ports closed"
|
ColumbiaCMB/kid_readout
|
kid_readout/equipment/record_all_old.py
|
Python
|
bsd-2-clause
| 2,287
| 0.006996
|
import virtool.subtractions.files
from sqlalchemy import select
from virtool.subtractions.models import SubtractionFile
async def test_create_subtraction_files(snapshot, tmp_path, pg, pg_session):
test_dir = tmp_path / "subtractions" / "foo"
test_dir.mkdir(parents=True)
test_dir.joinpath("subtraction.fa.gz").write_text("FASTA file")
test_dir.joinpath("subtraction.1.bt2").write_text("Bowtie2 file")
subtraction_files = ["subtraction.fa.gz", "subtraction.1.bt2"]
await virtool.subtractions.files.create_subtraction_files(
pg, "foo", subtraction_files, test_dir
)
rows = list()
async with pg_session as session:
assert (
await session.execute(select(SubtractionFile))
).scalars().all() == snapshot
|
igboyes/virtool
|
tests/subtractions/test_files.py
|
Python
|
mit
| 775
| 0
|
import _plotly_utils.basevalidators
class ViolinmodeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="violinmode", parent_name="layout", **kwargs):
super(ViolinmodeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", ["group", "overlay"]),
**kwargs
)
|
plotly/python-api
|
packages/python/plotly/plotly/validators/layout/_violinmode.py
|
Python
|
mit
| 516
| 0.001938
|
# Configuration file for ipython-qtconsole.
c = get_config()
#------------------------------------------------------------------------------
# IPythonQtConsoleApp configuration
#------------------------------------------------------------------------------
# IPythonQtConsoleApp will inherit config from: BaseIPythonApplication,
# Application, IPythonConsoleApp, ConnectionFileMixin
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.IPythonQtConsoleApp.ip = '127.0.0.1'
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.IPythonQtConsoleApp.verbose_crash = False
# Start the console window maximized.
# c.IPythonQtConsoleApp.maximize = False
# The date format used by logging formatters for %(asctime)s
# c.IPythonQtConsoleApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# set the shell (ROUTER) port [default: random]
# c.IPythonQtConsoleApp.shell_port = 0
# The SSH server to use to connect to the kernel.
# c.IPythonQtConsoleApp.sshserver = ''
# set the stdin (DEALER) port [default: random]
# c.IPythonQtConsoleApp.stdin_port = 0
# Set the log level by value or name.
# c.IPythonQtConsoleApp.log_level = 30
# Path to the ssh key to use for logging in to the ssh server.
# c.IPythonQtConsoleApp.sshkey = ''
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.IPythonQtConsoleApp.extra_config_file = u''
# Whether to create profile dir if it doesn't exist
# c.IPythonQtConsoleApp.auto_create = False
# path to a custom CSS stylesheet
# c.IPythonQtConsoleApp.stylesheet = ''
# set the heartbeat port [default: random]
# c.IPythonQtConsoleApp.hb_port = 0
# Whether to overwrite existing config files when copying
# c.IPythonQtConsoleApp.overwrite = False
# set the iopub (PUB) port [default: random]
# c.IPythonQtConsoleApp.iopub_port = 0
# The IPython profile to use.
# c.IPythonQtConsoleApp.profile = u'default'
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security-
# dir of the current profile, but can be specified by absolute path.
# c.IPythonQtConsoleApp.connection_file = ''
# Set to display confirmation dialog on exit. You can always use 'exit' or
# 'quit', to force a direct exit without any confirmation.
# c.IPythonQtConsoleApp.confirm_exit = True
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This options can also be specified through the environment
# variable IPYTHONDIR.
# c.IPythonQtConsoleApp.ipython_dir = u'/home/devin/.config/ipython'
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.IPythonQtConsoleApp.copy_config_files = False
# Connect to an already running kernel
# c.IPythonQtConsoleApp.existing = ''
# Use a plaintext widget instead of rich text (plain can't print/save).
# c.IPythonQtConsoleApp.plain = False
# Start the console window with the menu bar hidden.
# c.IPythonQtConsoleApp.hide_menubar = False
# The Logging format template
# c.IPythonQtConsoleApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
#
# c.IPythonQtConsoleApp.transport = 'tcp'
#------------------------------------------------------------------------------
# IPythonWidget configuration
#------------------------------------------------------------------------------
# A FrontendWidget for an IPython kernel.
# IPythonWidget will inherit config from: FrontendWidget, HistoryConsoleWidget,
# ConsoleWidget
# The type of completer to use. Valid values are:
#
# 'plain' : Show the available completion as a text list
# Below the editing area.
# 'droplist': Show the completion in a drop down list navigable
# by the arrow keys, and from which you can select
# completion by pressing Return.
# 'ncurses' : Show the completion as a text list which is navigable by
# `tab` and arrow keys.
# c.IPythonWidget.gui_completion = 'ncurses'
# Whether to process ANSI escape codes.
# c.IPythonWidget.ansi_codes = True
# A CSS stylesheet. The stylesheet can contain classes for:
# 1. Qt: QPlainTextEdit, QFrame, QWidget, etc
# 2. Pygments: .c, .k, .o, etc. (see PygmentsHighlighter)
# 3. IPython: .error, .in-prompt, .out-prompt, etc
# c.IPythonWidget.style_sheet = u''
# The height of the console at start time in number of characters (will double
# with `vsplit` paging)
# c.IPythonWidget.height = 25
#
# c.IPythonWidget.out_prompt = 'Out[<span class="out-prompt-number">%i</span>]: '
#
# c.IPythonWidget.input_sep = '\n'
# Whether to draw information calltips on open-parentheses.
# c.IPythonWidget.enable_calltips = True
#
# c.IPythonWidget.in_prompt = 'In [<span class="in-prompt-number">%i</span>]: '
# The width of the console at start time in number of characters (will double
# with `hsplit` paging)
# c.IPythonWidget.width = 81
# A command for invoking a system text editor. If the string contains a
# {filename} format specifier, it will be used. Otherwise, the filename will be
# appended to the end the command.
# c.IPythonWidget.editor = ''
# If not empty, use this Pygments style for syntax highlighting. Otherwise, the
# style sheet is queried for Pygments style information.
# c.IPythonWidget.syntax_style = u''
# The font family to use for the console. On OSX this defaults to Monaco, on
# Windows the default is Consolas with fallback of Courier, and on other
# platforms the default is Monospace.
# c.IPythonWidget.font_family = u''
#
# c.IPythonWidget.output_sep2 = ''
# Whether to automatically execute on syntactically complete input.
#
# If False, Shift-Enter is required to submit each execution. Disabling this is
# mainly useful for non-Python kernels, where the completion check would be
# wrong.
# c.IPythonWidget.execute_on_complete_input = True
# The maximum number of lines of text before truncation. Specifying a non-
# positive number disables text truncation (not recommended).
# c.IPythonWidget.buffer_size = 500
#
# c.IPythonWidget.history_lock = False
#
# c.IPythonWidget.banner = u''
# The type of underlying text widget to use. Valid values are 'plain', which
# specifies a QPlainTextEdit, and 'rich', which specifies a QTextEdit.
# c.IPythonWidget.kind = 'plain'
# Whether to ask for user confirmation when restarting kernel
# c.IPythonWidget.confirm_restart = True
# The font size. If unconfigured, Qt will be entrusted with the size of the
# font.
# c.IPythonWidget.font_size = 0
# The editor command to use when a specific line number is requested. The string
# should contain two format specifiers: {line} and {filename}. If this parameter
# is not specified, the line number option to the %edit magic will be ignored.
# c.IPythonWidget.editor_line = u''
# Whether to clear the console when the kernel is restarted
# c.IPythonWidget.clear_on_kernel_restart = True
# The type of paging to use. Valid values are:
#
# 'inside' : The widget pages like a traditional terminal.
# 'hsplit' : When paging is requested, the widget is split
# horizontally. The top pane contains the console, and the
# bottom pane contains the paged text.
# 'vsplit' : Similar to 'hsplit', except that a vertical splitter
# used.
# 'custom' : No action is taken by the widget beyond emitting a
# 'custom_page_requested(str)' signal.
# 'none' : The text is written directly to the console.
# c.IPythonWidget.paging = 'inside'
#
# c.IPythonWidget.output_sep = ''
#------------------------------------------------------------------------------
# IPKernelApp configuration
#------------------------------------------------------------------------------
# IPython: an enhanced interactive Python shell.
# IPKernelApp will inherit config from: BaseIPythonApplication, Application,
# InteractiveShellApp
# The importstring for the DisplayHook factory
# c.IPKernelApp.displayhook_class = 'IPython.kernel.zmq.displayhook.ZMQDisplayHook'
# Set the IP or interface on which the kernel will listen.
# c.IPKernelApp.ip = u''
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.IPKernelApp.pylab = None
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.IPKernelApp.verbose_crash = False
# The Kernel subclass to be used.
#
# This should allow easy re-use of the IPKernelApp entry point to configure and
# launch kernels other than IPython's own.
# c.IPKernelApp.kernel_class = 'IPython.kernel.zmq.ipkernel.Kernel'
# Run the module as a script.
# c.IPKernelApp.module_to_run = ''
# The date format used by logging formatters for %(asctime)s
# c.IPKernelApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# set the shell (ROUTER) port [default: random]
# c.IPKernelApp.shell_port = 0
# set the control (ROUTER) port [default: random]
# c.IPKernelApp.control_port = 0
# Whether to overwrite existing config files when copying
# c.IPKernelApp.overwrite = False
# Execute the given command string.
# c.IPKernelApp.code_to_run = ''
# set the stdin (ROUTER) port [default: random]
# c.IPKernelApp.stdin_port = 0
# Set the log level by value or name.
# c.IPKernelApp.log_level = 30
# lines of code to run at IPython startup.
# c.IPKernelApp.exec_lines = []
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.IPKernelApp.extra_config_file = u''
# The importstring for the OutStream factory
# c.IPKernelApp.outstream_class = 'IPython.kernel.zmq.iostream.OutStream'
# Whether to create profile dir if it doesn't exist
# c.IPKernelApp.auto_create = False
# set the heartbeat port [default: random]
# c.IPKernelApp.hb_port = 0
#
# c.IPKernelApp.transport = 'tcp'
# redirect stdout to the null device
# c.IPKernelApp.no_stdout = False
# dotted module name of an IPython extension to load.
# c.IPKernelApp.extra_extension = ''
# A file to be run
# c.IPKernelApp.file_to_run = ''
# The IPython profile to use.
# c.IPKernelApp.profile = u'default'
#
# c.IPKernelApp.parent_appname = u''
# kill this process if its parent dies. On Windows, the argument specifies the
# HANDLE of the parent process, otherwise it is simply boolean.
# c.IPKernelApp.parent_handle = 0
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.IPKernelApp.connection_file = ''
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an 'import *' is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.IPKernelApp.pylab_import_all = True
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This options can also be specified through the environment
# variable IPYTHONDIR.
# c.IPKernelApp.ipython_dir = u'/home/devin/.config/ipython'
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.IPKernelApp.matplotlib = None
# ONLY USED ON WINDOWS Interrupt this process when the parent is signaled.
# c.IPKernelApp.interrupt = 0
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.IPKernelApp.copy_config_files = False
# List of files to run at IPython startup.
# c.IPKernelApp.exec_files = []
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'none',
# 'osx', 'pyglet', 'qt', 'qt4', 'tk', 'wx').
# c.IPKernelApp.gui = None
# A list of dotted module names of IPython extensions to load.
# c.IPKernelApp.extensions = []
# redirect stderr to the null device
# c.IPKernelApp.no_stderr = False
# The Logging format template
# c.IPKernelApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
# set the iopub (PUB) port [default: random]
# c.IPKernelApp.iopub_port = 0
#------------------------------------------------------------------------------
# ZMQInteractiveShell configuration
#------------------------------------------------------------------------------
# A subclass of InteractiveShell for ZMQ.
# ZMQInteractiveShell will inherit config from: InteractiveShell
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.ZMQInteractiveShell.color_info = True
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.ZMQInteractiveShell.ast_transformers = []
#
# c.ZMQInteractiveShell.history_length = 10000
# Don't call post-execute functions that have failed in the past.
# c.ZMQInteractiveShell.disable_failing_post_execute = False
# Show rewritten input, e.g. for autocall.
# c.ZMQInteractiveShell.show_rewritten_input = True
# Set the color scheme (NoColor, Linux, or LightBG).
# c.ZMQInteractiveShell.colors = 'Linux'
#
# c.ZMQInteractiveShell.separate_in = '\n'
# Deprecated, use PromptManager.in2_template
# c.ZMQInteractiveShell.prompt_in2 = ' .\\D.: '
#
# c.ZMQInteractiveShell.separate_out = ''
# Deprecated, use PromptManager.in_template
# c.ZMQInteractiveShell.prompt_in1 = 'In [\\#]: '
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.ZMQInteractiveShell.deep_reload = False
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.ZMQInteractiveShell.autocall = 0
#
# c.ZMQInteractiveShell.separate_out2 = ''
# Deprecated, use PromptManager.justify
# c.ZMQInteractiveShell.prompts_pad_left = True
#
# c.ZMQInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
# Enable magic commands to be called without the leading %.
# c.ZMQInteractiveShell.automagic = True
#
# c.ZMQInteractiveShell.debug = False
#
# c.ZMQInteractiveShell.object_info_string_level = 0
#
# c.ZMQInteractiveShell.ipython_dir = ''
#
# c.ZMQInteractiveShell.readline_remove_delims = '-/~'
# Start logging to the default log file.
# c.ZMQInteractiveShell.logstart = False
# The name of the logfile to use.
# c.ZMQInteractiveShell.logfile = ''
#
# c.ZMQInteractiveShell.wildcards_case_sensitive = True
# Save multi-line entries as one entry in readline history
# c.ZMQInteractiveShell.multiline_history = True
# Start logging to the given file in append mode.
# c.ZMQInteractiveShell.logappend = ''
#
# c.ZMQInteractiveShell.xmode = 'Context'
#
# c.ZMQInteractiveShell.quiet = False
# Deprecated, use PromptManager.out_template
# c.ZMQInteractiveShell.prompt_out = 'Out[\\#]: '
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.ZMQInteractiveShell.cache_size = 1000
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.ZMQInteractiveShell.ast_node_interactivity = 'last_expr'
# Automatically call the pdb debugger after every exception.
# c.ZMQInteractiveShell.pdb = False
#------------------------------------------------------------------------------
# KernelManager configuration
#------------------------------------------------------------------------------
# Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
# KernelManager will inherit config from: ConnectionFileMixin
# The Popen Command to launch the kernel. Override this if you have a custom
# c.KernelManager.kernel_cmd = []
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.KernelManager.ip = '127.0.0.1'
#
# c.KernelManager.transport = 'tcp'
# Should we autorestart the kernel if it dies.
# c.KernelManager.autorestart = False
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = u''
#------------------------------------------------------------------------------
# Session configuration
#------------------------------------------------------------------------------
# Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialiization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
# Username for the Session. Default is your system username.
# c.Session.username = u'devin'
# The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
# c.Session.unpacker = 'json'
# Threshold (in bytes) beyond which a buffer should be sent without copying.
# c.Session.copy_threshold = 65536
# The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
# c.Session.packer = 'json'
# The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
# c.Session.digest_history_size = 65536
# The UUID identifying this session.
# c.Session.session = u''
# The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
# c.Session.signature_scheme = 'hmac-sha256'
# execution key, for extra authentication.
# c.Session.key = ''
# Debug output in the Session
# c.Session.debug = False
# The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
# c.Session.item_threshold = 64
# path to file containing execution key.
# c.Session.keyfile = ''
# Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
# c.Session.buffer_threshold = 1024
# Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
# c.Session.metadata = {}
#------------------------------------------------------------------------------
# InlineBackend configuration
#------------------------------------------------------------------------------
# An object to store configuration of the inline backend.
# The image format for figures with the inline backend.
# c.InlineBackend.figure_format = 'png'
# Close all figures at the end of each cell.
#
# When True, ensures that each cell starts with no active figures, but it also
# means that one must keep track of references in order to edit or redraw
# figures in subsequent cells. This mode is ideal for the notebook, where
# residual plots from other cells might be surprising.
#
# When False, one must call figure() to create new figures. This means that
# gcf() and getfigs() can reference figures created in other cells, and the
# active figure can continue to be edited with pylab/pyplot methods that
# reference the current active figure. This mode facilitates iterative editing
# of figures, and behaves most consistently with other matplotlib backends, but
# figure barriers between cells must be explicit.
# c.InlineBackend.close_figures = True
# Subset of matplotlib rcParams that should be different for the inline backend.
# c.InlineBackend.rc = {'font.size': 10, 'figure.figsize': (6.0, 4.0), 'figure.facecolor': 'white', 'savefig.dpi': 72, 'figure.subplot.bottom': 0.125, 'figure.edgecolor': 'white'}
|
dwwkelly/configs
|
srv/salt/laptop/config/ipython/profile_default/ipython_qtconsole_config.py
|
Python
|
gpl-2.0
| 23,719
| 0.003289
|
import logging
import socket
from functools import wraps
from django.conf import settings
from django.http import (
Http404,
HttpResponse,
HttpResponseForbidden,
HttpResponseRedirect
)
from django.shortcuts import render
from django.utils.http import is_safe_url
from django.views.decorators.cache import never_cache
from celery.messaging import establish_connection
from elasticsearch.exceptions import ConnectionError, NotFoundError
from mobility.decorators import mobile_template
from fjord.base.models import Profile
from fjord.base.urlresolvers import reverse
from fjord.search.index import get_index, get_index_stats
log = logging.getLogger('i.services')
@mobile_template('{mobile/}new_user.html')
def new_user_view(request, template=None):
if request.user.is_anonymous():
# This is the AnonymousUser and they shouldn't be here
# so push them to the dashboard.
return HttpResponseRedirect(reverse('dashboard'))
try:
# If they have a profile, then this doesn't throw an error
# and we can let them see the new user view again, but it's
# not particularly interesting.
request.user.profile
except Profile.DoesNotExist:
# They aren't anonymous and don't have a profile, so create
# a profile for them.
#
# We could do more with this, but we're not at the moment.
Profile.objects.create(user=request.user)
next_url = request.GET.get('next', reverse('dashboard'))
if not is_safe_url(next_url):
next_url = reverse('dashboard')
return render(request, template, {
'next_url': next_url,
})
@mobile_template('{mobile/}login_failure.html')
def login_failure(request, template=None):
return render(request, template)
@mobile_template('{mobile/}csrf_failure.html')
def csrf_failure(request, reason='', template=None):
return HttpResponseForbidden(
render(request, template),
content_type='text/html'
)
def about_view(request):
template = 'about.html'
return render(request, template)
def robots_view(request):
"""Generate a robots.txt."""
template = render(request, 'robots.txt')
return HttpResponse(template, content_type='text/plain')
def contribute_view(request):
"""Generate a contribute.json."""
template = render(request, 'contribute.json')
return HttpResponse(template, content_type='application/json')
def test_memcached(host, port):
"""Connect to memcached.
:returns: True if test passed, False if test failed.
"""
try:
s = socket.socket()
s.connect((host, port))
return True
except Exception as exc:
log.critical('Failed to connect to memcached (%r): %s' %
((host, port), exc))
return False
finally:
s.close()
def dev_or_authorized(func):
"""Show view for admin and developer instances, else 404"""
@wraps(func)
def _dev_or_authorized(request, *args, **kwargs):
if (request.user.is_superuser
or settings.SHOW_STAGE_NOTICE
or settings.DEBUG):
return func(request, *args, **kwargs)
raise Http404
return _dev_or_authorized
ERROR = 'ERROR'
INFO = 'INFO'
@dev_or_authorized
@never_cache
def monitor_view(request):
"""View for services monitor."""
# Dict of infrastructure name -> list of output tuples of (INFO,
# msg) or (ERROR, msg)
status = {}
# Note: To add a new component, do your testing and then add a
# name -> list of output tuples map to status.
# Check memcached.
memcache_results = []
try:
for cache_name, cache_props in settings.CACHES.items():
result = True
backend = cache_props['BACKEND']
location = cache_props['LOCATION']
# LOCATION can be a string or a list of strings
if isinstance(location, basestring):
location = location.split(';')
if 'memcache' in backend:
for loc in location:
# TODO: this doesn't handle unix: variant
ip, port = loc.split(':')
result = test_memcached(ip, int(port))
memcache_results.append(
(INFO, '%s:%s %s' % (ip, port, result)))
if not memcache_results:
memcache_results.append((ERROR, 'memcache is not configured.'))
elif len(memcache_results) < 2:
memcache_results.append(
(ERROR, ('You should have at least 2 memcache servers. '
'You have %s.' % len(memcache_results))))
else:
memcache_results.append((INFO, 'memcached servers look good.'))
except Exception as exc:
memcache_results.append(
(ERROR, 'Exception while looking at memcached: %s' % str(exc)))
status['memcached'] = memcache_results
# Check ES.
es_results = []
try:
get_index_stats()
es_results.append(
(INFO, ('Successfully connected to ElasticSearch and index '
'exists.')))
except ConnectionError as exc:
es_results.append(
(ERROR, 'Cannot connect to ElasticSearch: %s' % str(exc)))
except NotFoundError:
es_results.append(
(ERROR, 'Index "%s" missing.' % get_index()))
except Exception as exc:
es_results.append(
(ERROR, 'Exception while looking at ElasticSearch: %s' % str(exc)))
status['ElasticSearch'] = es_results
# Check RabbitMQ.
rabbitmq_results = []
try:
rabbit_conn = establish_connection(connect_timeout=2)
rabbit_conn.connect()
rabbitmq_results.append(
(INFO, 'Successfully connected to RabbitMQ.'))
except (socket.error, IOError) as exc:
rabbitmq_results.append(
(ERROR, 'Error connecting to RabbitMQ: %s' % str(exc)))
except Exception as exc:
rabbitmq_results.append(
(ERROR, 'Exception while looking at RabbitMQ: %s' % str(exc)))
status['RabbitMQ'] = rabbitmq_results
status_code = 200
status_summary = {}
for component, output in status.items():
if ERROR in [item[0] for item in output]:
status_code = 500
status_summary[component] = False
else:
status_summary[component] = True
return render(request, 'services/monitor.html',
{'component_status': status,
'status_summary': status_summary},
status=status_code)
class IntentionalException(Exception):
pass
@dev_or_authorized
def throw_error(request):
"""Throw an error for testing purposes."""
raise IntentionalException("Error raised for testing purposes.")
|
DESHRAJ/fjord
|
fjord/base/views.py
|
Python
|
bsd-3-clause
| 6,831
| 0
|
# Copyright 2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
'''
Blocks and utilities for Video SDL module
'''
# The presence of this file turns this directory into a Python package
import os
try:
from video_sdl_swig import *
except ImportError:
dirname, filename = os.path.split(os.path.abspath(__file__))
__path__.append(os.path.join(dirname, "..", "..", "swig"))
from video_sdl_swig import *
|
trdean/grEME
|
gr-video-sdl/python/video_sdl/__init__.py
|
Python
|
gpl-3.0
| 1,138
| 0.004394
|
#!/usr/bin/env python
#encoding:utf-8
import os
import sys
import requests
import MySQLdb
from bs4 import BeautifulSoup
from bs4 import SoupStrainer
if len(sys.argv) != 4:
print 'Invalid parameters!'
exit(1)
print '=' * 60
print 'start:', sys.argv
aim_category_id = int(sys.argv[1])
start_point = (int(sys.argv[2]), int(sys.argv[3]))
immediate_download = False
base_url = 'http://www.3che.com'
session = requests.Session()
username = ''
password = ''
record = {
'category': '',
'detail_category': '',
'post_url': '',
'filename': '',
'url': ''
}
sql_cnt = 0
connection = None
cursor = None
def record_to_mysql():
global sql_cnt, connection, cursor
if sql_cnt % 20 == 0:
if connection:
connection.commit()
connection.close()
cursor.close()
connection = MySQLdb.connect(host='', user='', passwd='', db='', port=3306, charset='utf8')
cursor = connection.cursor()
sql_cnt += 1
cursor.execute('insert into san_che(`category`, `detail_category`, `post_url`, `filename`, `url`) values (%s, %s, %s, %s, %s)',
(record['category'], record['detail_category'], record['post_url'], record['filename'], record['url']))
def login():
login_path = '/member.php?mod=logging&action=login&loginsubmit=yes&infloat=yes&lssubmit=yes&inajax=1'
session.post(base_url + login_path, {'username': username, 'password': password})
def enter_directory(name):
if immediate_download:
if not os.path.exists(name):
os.mkdir(name)
os.chdir(name)
def get_soup(url, parse_only=None):
text = session.get(url).text
return BeautifulSoup(text, 'lxml', parse_only=parse_only)
def download_file(url, filename):
print 'Downloading:', filename, '=>', url
record['url'] = url
record['filename'] = filename
if immediate_download:
with open(filename, 'w') as fp:
res = requests.get(url)
fp.write(res.content)
fp.close()
else:
record_to_mysql()
def crawl_file(url, filename):
try:
soup = get_soup(url, SoupStrainer(id='attachpayform'))
attach_form = soup.find('form', id='attachpayform')
link = attach_form.table.find_all('a')[-1]
except Exception as e:
print 'Error! file url:', url
else:
download_file(link['href'], filename)
# Crawl detail data of one post.
def crawl_detail(detail_category, title, detail_url):
print '-' * 100
print 'Crawling Post:', detail_category, title, '=>', detail_url
record['detail_category'] = detail_category
record['post_url'] = detail_url
# Enter detail directory.
enter_directory(detail_category)
prefix = detail_url.rsplit('/', 1)[-1].split('.', 1)[0]
enter_directory(prefix + title)
soup = get_soup(detail_url, SoupStrainer('p', {'class': 'attnm'}))
attnms = soup.find_all('p', {'class': 'attnm'})
for attnm in attnms:
url = '{0}/{1}'.format(base_url, attnm.a['href'])
crawl_file(url, attnm.a.text.strip(u'[下载]'))
# Leave detail directory.
if immediate_download:
os.chdir('../..')
# Crawl data of one category.
def crawl_category(category, list_url):
print '=' * 100
print 'Crawling category:', category, '=>', list_url
record['category'] = category
# Create corresponding directory and enter.
enter_directory(category)
cur_page_id = 0
url = list_url
while url is not None:
cur_page_id += 1
print 'Crawling page url:', url
soup = get_soup(url, SoupStrainer('span'))
xsts = soup.find_all('span', {'class': 'xst'})
if cur_page_id >= start_point[0]:
cur_in_page_id = 0
for xst in xsts:
cur_in_page_id += 1
detail = xst.find('a', {'class': 'xst'})
if cur_page_id > start_point[0] or cur_in_page_id >= start_point[1]:
crawl_detail(xst.em and xst.em.a.text or '', detail.text, detail['href'])
page_footer = soup.find('span', id='fd_page_top')
next_link = page_footer.label.next_sibling
if next_link is not None:
url = next_link['href']
else:
url = None
# Leave the directory.
if immediate_download:
os.chdir('..')
if __name__ == '__main__':
login()
# Extract categories from home page.
soup = get_soup(base_url, SoupStrainer(id='nv'))
category_lis = soup.find('div', id='nv').ul.find_all('li')
categories = map(lambda x: (x.a.text, x.a['href']), category_lis)
categories = filter(lambda x: x[1] != '/', categories)
crawl_category(categories[aim_category_id][0], categories[aim_category_id][1])
# for category in categories:
# crawl_category(category[0], category[1])
|
JayvicWen/Crawler
|
3che/crawler.py
|
Python
|
mit
| 4,816
| 0.004156
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "huntnet.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
srohatgi/cloud
|
huntnet/manage.py
|
Python
|
apache-2.0
| 250
| 0
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from ggrc import db
from ggrc.models.mixins import CustomAttributable, BusinessObject, Timeboxed
from ggrc.models.object_document import Documentable
from ggrc.models.object_person import Personable
from ggrc.models.object_owner import Ownable
from ggrc.models.relationship import Relatable
from ggrc.models.track_object_state import HasObjectState, track_state_for_class
class Threat(
HasObjectState, CustomAttributable, Documentable, Personable,
Relatable, Timeboxed, Ownable, BusinessObject, db.Model):
__tablename__ = 'threats'
_aliases = {
"contact": {
"display_name": "Contact",
"filter_by": "_filter_by_contact",
},
"secondary_contact": None,
"url": "Threat URL",
}
|
edofic/ggrc-core
|
src/ggrc_risks/models/threat.py
|
Python
|
apache-2.0
| 847
| 0.004723
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('member', '0007_auto_20150501_2124'),
]
operations = [
migrations.AddField(
model_name='member',
name='avatar',
field=models.URLField(default='https://dn-tmp.qbox.me/chendian/cat_mouse_reading.jpg', verbose_name='\u5934\u50cf', blank=True),
),
migrations.AlterField(
model_name='member',
name='description',
field=models.TextField(default='', verbose_name='\u4e2a\u4eba\u4ecb\u7ecd', blank=True),
),
]
|
mozillazg/chendian-plus
|
chendian/member/migrations/0008_auto_20150502_1013.py
|
Python
|
mit
| 697
| 0.002869
|
"""
I came up with this the first try. So, that's why this is posted in duplicate.
"""
import sys
try:
columns = int(input("How many columns? "))
rows = int(input("How many rows? "))
tall = int(input("How tall should the boxes be? "))
wide = int(input("How wide should the boxes be? "))
except Exception as e:
print(e)
print("You have fail")
print("Try type valid integer")
sys.exit(1)
i = 0
j = 0
k = 0
m = 0
while j <= rows:
print("+",end="")
while k < columns:
while i < wide:
print("-",end="")
i += 1
print("+",end="")
i = 0
k += 1
print('\r')
k = 0
if j < rows:
while m < tall:
print("|",end="")
while k < columns:
print(" "*wide,end="")
print("|",end="")
k += 1
k = 0
m += 1
print("\r")
m = 0
j += 1
sys.exit(0)
|
MattD830/Python-INFO1-CE9990
|
graphpaper2.py
|
Python
|
gpl-3.0
| 968
| 0.007231
|
def extractTranslasiSanusiMe(item):
'''
Parser for 'translasi.sanusi.me'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractTranslasiSanusiMe.py
|
Python
|
bsd-3-clause
| 550
| 0.034545
|
# -*- coding: UTF-8 -*-
# CCcam Info by AliAbdul
from base64 import encodestring
from os import listdir, remove, rename, system, path
from enigma import eListboxPythonMultiContent, eTimer, gFont, loadPNG, RT_HALIGN_RIGHT, getDesktop
from Components.ActionMap import ActionMap, NumberActionMap
from Components.config import config, getConfigListEntry
from Components.ConfigList import ConfigListScreen
from Components.Console import Console
from Components.Label import Label
from Components.MenuList import MenuList
from Components.MultiContent import MultiContentEntryText, MultiContentEntryPixmapAlphaTest, MultiContentEntryPixmapAlphaBlend
from Components.ScrollLabel import ScrollLabel
from Screens.HelpMenu import HelpableScreen
#from Screens.InfoBar import InfoBar
from Screens.LocationBox import LocationBox
from Screens.MessageBox import MessageBox
from Screens.Screen import Screen
from Screens.VirtualKeyBoard import VirtualKeyBoard
from Tools.Directories import fileExists, SCOPE_ACTIVE_SKIN, resolveFilename
from twisted.internet import reactor
from twisted.web.client import HTTPClientFactory
from urlparse import urlparse, urlunparse
#TOGGLE_SHOW = InfoBar.toggleShow
VERSION = "v2"
DATE = "21.11.2014"
CFG = "/etc/CCcam.cfg"
#############################################################
def _parse(url):
url = url.strip()
parsed = urlparse(url)
scheme = parsed[0]
path = urlunparse(('','') + parsed[2:])
host, port = parsed[1], 80
if '@' in host:
username, host = host.split('@')
if ':' in username:
username, password = username.split(':')
else:
password = ""
else:
username = ""
password = ""
if ':' in host:
host, port = host.split(':')
port = int(port)
if path == "":
path = "/"
return scheme, host, port, path, username, password
def getPage(url, contextFactory=None, *args, **kwargs):
scheme, host, port, path, username, password = _parse(url)
if username and password:
url = scheme + '://' + host + ':' + str(port) + path
basicAuth = encodestring("%s:%s" % (username, password))
authHeader = "Basic " + basicAuth.strip()
AuthHeaders = {"Authorization": authHeader}
if kwargs.has_key("headers"):
kwargs["headers"].update(AuthHeaders)
else:
kwargs["headers"] = AuthHeaders
factory = HTTPClientFactory(url, *args, **kwargs)
reactor.connectTCP(host, port, factory)
return factory.deferred
#############################################################
class HelpableNumberActionMap(NumberActionMap):
def __init__(self, parent, context, actions, prio):
alist = []
adict = {}
for (action, funchelp) in actions.iteritems():
alist.append((action, funchelp[1]))
adict[action] = funchelp[0]
NumberActionMap.__init__(self, [context], adict, prio)
parent.helpList.append((self, context, alist))
#############################################################
TranslationHelper = [
["Current time", _("Current time")],
["NodeID", _("NodeID")],
["Uptime", _("Uptime")],
["Connected clients", _("Connected clients")],
["Active clients", _("Active clients")],
["Total handled client ecm's", _("Total handled client ecm's")],
["Total handled client emm's", _("Total handled client emm's")],
["Peak load (max queued requests per workerthread)", _("Peak load (max queued requests per workerthread)")],
["card reader", _("card reader")],
["no or unknown card inserted", _("no or unknown card inserted")],
["system:", _("system:")],
["caid:", _("caid:")],
["provider:", _("provider:")],
["provid:", _("provid:")],
["using:", _("using:")],
["address:", _("address:")],
["hops:", _("hops:")],
["pid:", _("pid:")],
["share:", _("share:")],
["handled", _("handled")],
[" and", _(" and")],
["card", _("card")],
["Cardserial", _("Cardserial")],
["ecm time:", _("ecm time:")]]
def translateBlock(block):
for x in TranslationHelper:
if block.__contains__(x[0]):
block = block.replace(x[0], x[1])
return block
#############################################################
def getConfigValue(l):
list = l.split(":")
ret = ""
if len(list) > 1:
ret = (list[1]).replace("\n", "").replace("\r", "")
if ret.__contains__("#"):
idx = ret.index("#")
ret = ret[:idx]
while ret.startswith(" "):
ret = ret[1:]
while ret.endswith(" "):
ret = ret[:-1]
return ret
#############################################################
def notBlackListed(entry):
try:
f = open(config.cccaminfo.blacklist.value, "r")
content = f.read().split("\n")
f.close()
except:
content = []
ret = True
for x in content:
if x == entry:
ret = False
return ret
#############################################################
menu_list = [
_("General"),
_("Clients"),
_("Active clients"),
_("Servers"),
_("Shares"),
_("Share View"),
_("Extended Shares"),
_("Providers"),
_("Entitlements"),
_("ecm.info"),
_("Menu config"),
_("Local box"),
_("Remote box"),
_("Free memory"),
_("Switch config"),
_("About")]
#############################################################
if path.exists(resolveFilename(SCOPE_ACTIVE_SKIN, "icons/lock_on.png")):
lock_on = loadPNG(resolveFilename(SCOPE_ACTIVE_SKIN, "icons/lock_on.png"))
else:
lock_on = loadPNG("/usr/share/enigma2/skin_default/icons/lock_on.png")
if path.exists(resolveFilename(SCOPE_ACTIVE_SKIN, "icons/lock_off.png")):
lock_off = loadPNG(resolveFilename(SCOPE_ACTIVE_SKIN, "icons/lock_off.png"))
else:
lock_off = loadPNG("/usr/share/enigma2/skin_default/icons/lock_off.png")
def getConfigNameAndContent(fileName):
try:
f = open(fileName, "r")
content = f.read()
f.close()
except:
content = ""
if content.startswith("#CONFIGFILE NAME="):
content = content.replace("\r", "\n")
name = content[17:]
idx = name.index("\n")
name = name[:idx]
else:
name = fileName.replace("/var/etc/", "")
return name, content
#############################################################
class CCcamList(MenuList):
def __init__(self, list):
MenuList.__init__(self, list, False, eListboxPythonMultiContent)
self.l.setItemHeight(25)
self.l.setFont(0, gFont("Regular", 20))
self.l.setFont(1, gFont("Regular", 32))
class CCcamShareList(MenuList):
def __init__(self, list):
MenuList.__init__(self, list, False, eListboxPythonMultiContent)
self.l.setItemHeight(60)
self.l.setFont(0, gFont("Regular", 18))
self.l.setFont(1, gFont("Regular", 32))
class CCcamConfigList(MenuList):
def __init__(self, list):
MenuList.__init__(self, list, False, eListboxPythonMultiContent)
self.l.setItemHeight(30)
self.l.setFont(0, gFont("Regular", 20))
self.l.setFont(1, gFont("Regular", 32))
class CCcamShareViewList(MenuList):
def __init__(self, list):
MenuList.__init__(self, list, False, eListboxPythonMultiContent)
self.l.setItemHeight(20)
self.l.setFont(0, gFont("Regular", 18))
self.l.setFont(1, gFont("Regular", 32))
def CCcamListEntry(name, idx):
screenwidth = getDesktop(0).size().width()
res = [name]
if idx == 10:
idx = "red"
elif idx == 11:
idx = "green"
elif idx == 12:
idx = "yellow"
elif idx == 13:
idx = "blue"
elif idx == 14:
idx = "menu"
elif idx == 15:
idx = "info"
if path.exists(resolveFilename(SCOPE_ACTIVE_SKIN, "buttons/key_%s.png" % str(idx))):
png = resolveFilename(SCOPE_ACTIVE_SKIN, "buttons/key_%s.png" % str(idx))
else:
png = "/usr/share/enigma2/skin_default/buttons/key_%s.png" % str(idx)
if screenwidth and screenwidth == 1920:
if fileExists(png):
res.append(MultiContentEntryPixmapAlphaBlend(pos=(10, 3), size=(67, 48), png=loadPNG(png)))
res.append(MultiContentEntryText(pos=(90, 7), size=(900, 50), font=1, text=name))
else:
if fileExists(png):
res.append(MultiContentEntryPixmapAlphaBlend(pos=(0, 0), size=(35, 25), png=loadPNG(png)))
res.append(MultiContentEntryText(pos=(40, 3), size=(500, 25), font=0, text=name))
return res
def CCcamServerListEntry(name, color):
screenwidth = getDesktop(0).size().width()
res = [name]
if path.exists(resolveFilename(SCOPE_ACTIVE_SKIN, "buttons/key_%s.png" % color)):
png = resolveFilename(SCOPE_ACTIVE_SKIN, "buttons/key_%s.png" % color)
else:
png = "/usr/share/enigma2/skin_default/buttons/key_%s.png" % color
if screenwidth and screenwidth == 1920:
if fileExists(png):
res.append(MultiContentEntryPixmapAlphaBlend(pos=(10, 3), size=(67, 48), png=loadPNG(png)))
res.append(MultiContentEntryText(pos=(90, 7), size=(900, 50), font=1, text=name))
else:
if fileExists(png):
res.append(MultiContentEntryPixmapAlphaBlend(pos=(0, 0), size=(35, 25), png=loadPNG(png)))
res.append(MultiContentEntryText(pos=(40, 3), size=(500, 25), font=0, text=name))
return res
def CCcamShareListEntry(hostname, type, caid, system, uphops, maxdown):
screenwidth = getDesktop(0).size().width()
if screenwidth and screenwidth == 1920:
res = [(hostname, type, caid, system, uphops, maxdown),
MultiContentEntryText(pos=(10, 0), size=(550, 35), font=1, text=hostname),
MultiContentEntryText(pos=(650, 0), size=(500, 35), font=1, text=_("Type: ") + type, flags=RT_HALIGN_RIGHT),
MultiContentEntryText(pos=(10, 40), size=(250, 35), font=1, text=_("CaID: ") + caid),
MultiContentEntryText(pos=(230, 40), size=(250, 35), font=1, text=_("System: ") + system, flags=RT_HALIGN_RIGHT),
MultiContentEntryText(pos=(520, 40), size=(250, 35), font=1, text=_("Uphops: ") + uphops, flags=RT_HALIGN_RIGHT),
MultiContentEntryText(pos=(900, 40), size=(250, 35), font=1, text=_("Maxdown: ") + maxdown, flags=RT_HALIGN_RIGHT)]
return res
else:
res = [(hostname, type, caid, system, uphops, maxdown),
MultiContentEntryText(pos=(0, 0), size=(250, 20), font=0, text=hostname),
MultiContentEntryText(pos=(250, 0), size=(250, 20), font=0, text=_("Type: ") + type, flags=RT_HALIGN_RIGHT),
MultiContentEntryText(pos=(0, 20), size=(250, 20), font=0, text=_("CaID: ") + caid),
MultiContentEntryText(pos=(250, 20), size=(250, 20), font=0, text=_("System: ") + system, flags=RT_HALIGN_RIGHT),
MultiContentEntryText(pos=(0, 40), size=(250, 20), font=0, text=_("Uphops: ") + uphops),
MultiContentEntryText(pos=(250, 40), size=(250, 20), font=0, text=_("Maxdown: ") + maxdown, flags=RT_HALIGN_RIGHT)]
return res
def CCcamShareViewListEntry(caidprovider, providername, numberofcards, numberofreshare):
screenwidth = getDesktop(0).size().width()
if screenwidth and screenwidth == 1920:
res = [(caidprovider, providername, numberofcards),
MultiContentEntryText(pos=(10, 5), size=(800, 35), font=1, text=providername),
MultiContentEntryText(pos=(1050, 5), size=(50, 35), font=1, text=numberofcards, flags=RT_HALIGN_RIGHT),
MultiContentEntryText(pos=(1100, 5), size=(50, 35), font=1, text=numberofreshare, flags=RT_HALIGN_RIGHT)]
return res
else:
res = [(caidprovider, providername, numberofcards),
MultiContentEntryText(pos=(0, 0), size=(430, 20), font=0, text=providername),
MultiContentEntryText(pos=(430, 0), size=(50, 20), font=0, text=numberofcards, flags=RT_HALIGN_RIGHT),
MultiContentEntryText(pos=(480, 0), size=(50, 20), font=0, text=numberofreshare, flags=RT_HALIGN_RIGHT)]
return res
def CCcamConfigListEntry(file):
screenwidth = getDesktop(0).size().width()
res = [file]
try:
f = open(CFG, "r")
org = f.read()
f.close()
except:
org = ""
(name, content) = getConfigNameAndContent(file)
if content == org:
png = lock_on
else:
png = lock_off
if screenwidth and screenwidth == 1920:
res.append(MultiContentEntryPixmapAlphaBlend(pos=(5, 5), size=(50, 50), png=png))
res.append(MultiContentEntryText(pos=(85, 5), size=(800, 35), font=1, text=name))
else:
res.append(MultiContentEntryPixmapAlphaBlend(pos=(2, 2), size=(25, 25), png=png))
res.append(MultiContentEntryText(pos=(35, 2), size=(550, 25), font=0, text=name))
return res
def CCcamMenuConfigListEntry(name, blacklisted):
screenwidth = getDesktop(0).size().width()
res = [name]
if blacklisted:
png = lock_off
else:
png = lock_on
if screenwidth and screenwidth == 1920:
res.append(MultiContentEntryPixmapAlphaBlend(pos=(5, 5), size=(50, 50), png=png))
res.append(MultiContentEntryText(pos=(85, 5), size=(800, 35), font=1, text=name))
else:
res.append(MultiContentEntryPixmapAlphaBlend(pos=(2, 2), size=(25, 25), png=png))
res.append(MultiContentEntryText(pos=(35, 2), size=(550, 25), font=0, text=name))
return res
#############################################################
class CCcamInfoMain(Screen):
def __init__(self, session):
Screen.__init__(self, session)
Screen.setTitle(self, _("CCcam Info"))
self.session = session
self["menu"] = CCcamList([])
self.working = False
self.Console = Console()
if config.cccaminfo.profile.value == "":
self.readConfig()
else:
self.url = config.cccaminfo.profile.value
self["actions"] = NumberActionMap(["CCcamInfoActions"],
{
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal,
"0": self.keyNumberGlobal,
"red": self.red,
"green": self.green,
"yellow": self.yellow,
"blue": self.blue,
"menu": self.menu,
"info": self.info,
"ok": self.okClicked,
"cancel": self.close,
"up": self.up,
"down": self.down,
"left": self.left,
"right": self.right
}, -2)
self.onLayoutFinish.append(self.updateMenuList)
def updateMenuList(self):
self.working = True
self.menu_list = []
for x in self.menu_list:
del self.menu_list[0]
list = []
idx = 0
for x in menu_list:
if notBlackListed(x):
list.append(CCcamListEntry(x, idx))
self.menu_list.append(x)
idx += 1
self["menu"].setList(list)
self.working = False
def readConfig(self):
self.url = "http://127.0.0.1:16001"
username = None
password = None
try:
f = open(CFG, 'r')
for l in f:
if l.startswith('WEBINFO LISTEN PORT :'):
port = getConfigValue(l)
if port != "":
self.url = self.url.replace('16001', port)
elif l.startswith('WEBINFO USERNAME :'):
username = getConfigValue(l)
elif l.startswith('WEBINFO PASSWORD :'):
password = getConfigValue(l)
f.close()
except:
pass
if (username is not None) and (password is not None) and (username != "") and (password != ""):
self.url = self.url.replace('http://', ("http://%s:%s@" % (username, password)))
config.cccaminfo.profile.value = ""
config.cccaminfo.profile.save()
def profileSelected(self, url=None):
if url is not None:
self.url = url
config.cccaminfo.profile.value = self.url
config.cccaminfo.profile.save()
self.showInfo(_("New profile: ") + url, _("Profile"))
else:
self.showInfo(_("Using old profile: ") + self.url, _("Profile"))
def keyNumberGlobal(self, idx):
if self.working == False and (idx < len(self.menu_list)):
self.working = True
sel = self.menu_list[idx]
if sel == _("General"):
getPage(self.url).addCallback(self.showCCcamGeneral).addErrback(self.getWebpageError)
elif sel == _("Clients"):
getPage(self.url + "/clients").addCallback(self.showCCcamClients).addErrback(self.getWebpageError)
elif sel == _("Active clients"):
getPage(self.url + "/activeclients").addCallback(self.showCCcamClients).addErrback(self.getWebpageError)
elif sel == _("Servers"):
getPage(self.url + "/servers").addCallback(self.showCCcamServers).addErrback(self.getWebpageError)
elif sel == _("Shares"):
getPage(self.url + "/shares").addCallback(self.showCCcamShares).addErrback(self.getWebpageError)
elif sel == _("Share View"):
self.session.openWithCallback(self.workingFinished, CCcamShareViewMenu, self.url)
elif sel == _("Extended Shares"):
self.session.openWithCallback(self.workingFinished, CCcamInfoShareInfo, "None", self.url)
elif sel == _("Providers"):
getPage(self.url + "/providers").addCallback(self.showCCcamProviders).addErrback(self.getWebpageError)
elif sel == _("Entitlements"):
getPage(self.url + "/entitlements").addCallback(self.showCCcamEntitlements).addErrback(self.getWebpageError)
elif sel == _("ecm.info"):
self.session.openWithCallback(self.showEcmInfoFile, CCcamInfoEcmInfoSelection)
elif sel == _("Menu config"):
self.session.openWithCallback(self.updateMenuList, CCcamInfoMenuConfig)
elif sel == _("Local box"):
self.readConfig()
self.showInfo(_("Profile: Local box"), _("Local box"))
elif sel == _("Remote box"):
self.session.openWithCallback(self.profileSelected, CCcamInfoRemoteBoxMenu)
elif sel == _("Free memory"):
if not self.Console:
self.Console = Console()
self.working = True
self.Console.ePopen("free", self.showFreeMemory)
elif sel == _("Switch config"):
self.session.openWithCallback(self.workingFinished, CCcamInfoConfigSwitcher)
else:
self.showInfo(_("CCcam Info %s\nby AliAbdul %s\n\nThis plugin shows you the status of your CCcam.") % (VERSION, DATE), _("About"))
def red(self):
self.keyNumberGlobal(10)
def green(self):
self.keyNumberGlobal(11)
def yellow(self):
self.keyNumberGlobal(12)
def blue(self):
self.keyNumberGlobal(13)
def menu(self):
self.keyNumberGlobal(14)
def info(self):
self.keyNumberGlobal(15)
def okClicked(self):
self.keyNumberGlobal(self["menu"].getSelectedIndex())
def up(self):
if not self.working:
self["menu"].up()
def down(self):
if not self.working:
self["menu"].down()
def left(self):
if not self.working:
self["menu"].pageUp()
def right(self):
if not self.working:
self["menu"].pageDown()
def getWebpageError(self, error=""):
print str(error)
self.session.openWithCallback(self.workingFinished, MessageBox, _("Error reading webpage!"), MessageBox.TYPE_ERROR)
def showFile(self, file):
try:
f = open(file, "r")
content = f.read()
f.close()
except:
content = _("Could not open the file %s!") % file
self.showInfo(translateBlock(content), " ")
def showEcmInfoFile(self, file=None):
if file is not None:
self.showFile("/tmp/"+file)
self.workingFinished()
def showCCcamGeneral(self, html):
if html.__contains__('<BR><BR>'):
idx = html.index('<BR><BR>')
idx2 = html.index('<BR></BODY>')
html = html[idx+8:idx2].replace("<BR>", "\n").replace("\n\n", "\n")
self.infoToShow = html
getPage(self.url + "/shares").addCallback(self.showCCcamGeneral2).addErrback(self.getWebpageError)
else:
self.showInfo(_("Error reading webpage!"), _("Error"))
def showCCcamGeneral2(self, html):
if html.__contains__("Welcome to CCcam"):
idx = html.index("Welcome to CCcam")
html = html[idx+17:]
idx = html.index(" ")
version = html[:idx]
self.infoToShow = "%s%s\n%s" % (_("Version: "), version, self.infoToShow)
if html.__contains__("Available shares:"):
idx = html.index("Available shares:")
html = html[idx+18:]
idx = html.index("\n")
html = html[:idx]
self.showInfo(translateBlock("%s %s\n%s" % (_("Available shares:"), html, self.infoToShow)), _("General"))
else:
self.showInfo(translateBlock(self.infoToShow), _("General"))
def showCCcamClients(self, html):
firstLine = True
clientList = []
infoList = []
lines = html.split("\n")
for l in lines:
if l.__contains__('|'):
if firstLine:
firstLine = False
else:
list = l.split('|')
if len(list) > 8:
username = list[1].replace(" ", "")
if username != "":
hostname = list[2].replace(" ", "")
connected = list[3].replace(" ", "")
idleTime = list[4].replace(" ", "")
ecm = list[5].replace(" ", "")
emm = list[6].replace(" ", "")
version = list[7].replace(" ", "")
share = list[8].replace(" ", "")
if version == "":
version = "N/A"
ecmEmm = "ECM: " + ecm + " - EMM: " + emm
infoList.append([username, _("Hostname: ") + hostname, _("Connected: ") + connected, _("Idle Time: ") + idleTime, _("Version: ") + version, _("Last used share: ") + share, ecmEmm])
clientList.append(username)
self.set_title = _("CCcam Client Info")
self.openSubMenu(clientList, infoList, self.set_title)
def showCCcamServers(self, html):
firstLine = True
infoList = []
lines = html.split("\n")
for l in lines:
if l.__contains__('|'):
if firstLine:
firstLine = False
else:
list = l.split('|')
if len(list) > 7:
hostname = list[1].replace(" ", "")
if hostname != "":
connected = list[2].replace(" ", "")
type = list[3].replace(" ", "")
version = list[4].replace(" ", "")
nodeid = list[5].replace(" ", "")
cards = list[6].replace(" ", "")
if version == "":
version = "N/A"
if nodeid == "":
nodeid = "N/A"
infoList.append([hostname, _("Cards: ") + cards, _("Type: ") + type, _("Version: ") + version, _("NodeID: ") + nodeid, _("Connected: ") + connected])
self.session.openWithCallback(self.workingFinished, CCcamInfoServerMenu, infoList, self.url)
def showCCcamShares(self, html):
firstLine = True
sharesList = []
infoList = []
lines = html.split("\n")
for l in lines:
if l.__contains__('|'):
if firstLine:
firstLine = False
else:
list = l.split('|')
if len(list) > 7:
hostname = list[1].replace(" ", "")
if hostname != "":
type = list[2].replace(" ", "")
caid = list[3].replace(" ", "")
system = list[4].replace(" ", "")
string = list[6]
while string.startswith(" "):
string = string[1:]
while string.endswith(" "):
string = string[:-1]
idx = string.index(" ")
uphops = string[:idx]
string = string[idx+1:]
while string.startswith(" "):
string = string[1:]
maxdown = string
if len(caid) == 3:
caid = "0" + caid
infoList.append([hostname, _("Type: ") + type, _("CaID: ") + caid, _("System: ") + system, _("Uphops: ") + uphops, _("Maxdown: ") + maxdown])
sharesList.append(hostname + " - " + _("CaID: ") + caid)
self.set_title = _("CCcam Shares Info")
self.openSubMenu(sharesList, infoList, self.set_title)
def showCCcamProviders(self, html):
firstLine = True
providersList = []
infoList = []
lines = html.split("\n")
for l in lines:
if l.__contains__('|'):
if firstLine:
firstLine = False
else:
list = l.split('|')
if len(list) > 5:
caid = list[1].replace(" ", "")
if caid != "":
provider = list[2].replace(" ", "")
providername = list[3].replace(" ", "")
system = list[4].replace(" ", "")
infoList.append([_("CaID: ") + caid, _("Provider: ") + provider, _("Provider Name: ") + providername, _("System: ") + system])
providersList.append(_("CaID: ") + caid + " - " + _("Provider: ") + provider)
self.set_title = _("CCcam Provider Info")
self.openSubMenu(providersList, infoList, self.set_title)
def showCCcamEntitlements(self, html):
if html.__contains__('<PRE>'):
idx = html.index('<PRE>')
idx2 = html.index('</PRE>')
html = html[idx+5:idx2].replace("\n\n", "\n")
if html == "":
html = _("No card inserted!")
self.showInfo(translateBlock(html), _("Entitlements"))
else:
self.showInfo(_("Error reading webpage!"), _("Entitlements"))
def showInfo(self, info, set_title):
self.session.openWithCallback(self.workingFinished, CCcamInfoInfoScreen, info, set_title)
def openSubMenu(self, list, infoList, set_title):
self.session.openWithCallback(self.workingFinished, CCcamInfoSubMenu, list, infoList, set_title)
def workingFinished(self, callback=None):
self.working = False
def showFreeMemory(self, result, retval, extra_args):
if retval == 0:
if result.__contains__("Total:"):
idx = result.index("Total:")
result = result[idx+6:]
tmpList = result.split(" ")
list = []
for x in tmpList:
if x != "":
list.append(x)
self.showInfo("%s\n\n %s %s\n %s %s\n %s %s" % (_("Free memory:"), _("Total:"), list[0], _("Used:"), list[1], _("Free:"), list[2]), _("Free memory"))
else:
self.showInfo(result, _("Free memory"))
else:
self.showInfo(str(result), _("Free memory"))
#############################################################
class CCcamInfoEcmInfoSelection(Screen):
def __init__(self, session):
Screen.__init__(self, session)
Screen.setTitle(self, _("CCcam ECM Info"))
list = []
tmp = listdir("/tmp/")
for x in tmp:
if x.endswith('.info') and x.startswith('ecm'):
list.append(x)
self["list"] = MenuList(list)
self["actions"] = ActionMap(["CCcamInfoActions"], {"ok": self.ok, "cancel": self.close}, -1)
def ok(self):
self.close(self["list"].getCurrent())
#############################################################
class CCcamInfoInfoScreen(Screen):
def __init__(self, session, info, set_title):
Screen.__init__(self, session)
Screen.setTitle(self, set_title)
self["text"] = ScrollLabel(info)
self["actions"] = ActionMap(["CCcamInfoActions"],
{
"ok": self.close,
"cancel": self.close,
"up": self["text"].pageUp,
"down": self["text"].pageDown,
"left": self["text"].pageUp,
"right": self["text"].pageDown,
}, -1)
#############################################################
class CCcamShareViewMenu(Screen, HelpableScreen):
def __init__(self, session, url):
Screen.__init__(self, session)
HelpableScreen.__init__(self)
self.session = session
Screen.setTitle(self, _("CCcam Share Info"))
self.url = url
self.list = []
self.providers = {}
self.uphop = -1
self.working = True
self["list"] = CCcamShareViewList([])
self["uphops"] = Label()
self["cards"] = Label()
self["providers"] = Label()
self["reshare"] = Label()
self["title"] = Label()
self["actions"] = HelpableNumberActionMap(self, "CCcamInfoActions",
{
"cancel": (self.exit, _("close share view")),
"0": (self.getUphop, _("show cards with uphop 0")),
"1": (self.getUphop, _("show cards with uphop 1")),
"2": (self.getUphop, _("show cards with uphop 2")),
"3": (self.getUphop, _("show cards with uphop 3")),
"4": (self.getUphop, _("show cards with uphop 4")),
"5": (self.getUphop, _("show cards with uphop 5")),
"6": (self.getUphop, _("show cards with uphop 6")),
"7": (self.getUphop, _("show cards with uphop 7")),
"8": (self.getUphop, _("show cards with uphop 8")),
"9": (self.getUphop, _("show cards with uphop 9")),
"green": (self.showAll, _("show all cards")),
"incUphop": (self.incUphop, _("increase uphop by 1")),
"decUphop": (self.decUphop, _("decrease uphop by 1")),
"ok": (self.getServer, _("get the cards' server")),
}, -1)
self.onLayoutFinish.append(self.getProviders)
def exit(self):
if not self.working:
self.close()
def getProviders(self):
getPage(self.url + "/providers").addCallback(self.readProvidersCallback).addErrback(self.readError)
def readError(self, error=None):
self.session.open(MessageBox, _("Error reading webpage!"), MessageBox.TYPE_ERROR)
self.working = False
def readSharesCallback(self, html):
firstLine = True
providerList = []
countList = []
shareList = []
reshareList = []
self.hostList = []
self.caidList = []
count = 0
totalcards = 0
totalproviders = 0
resharecards = 0
numberofreshare = 0
lines = html.split("\n")
for l in lines:
if l.__contains__('|'):
if firstLine:
firstLine = False
else:
list = l.split("|")
if len(list) > 7:
hostname = list[1].replace(" ", "")
if hostname != "":
if self.uphop == -1:
caid = list[3].replace(" ", "")
provider = list[5].replace(" ", "")
caidprovider = self.formatCaidProvider(caid, provider)
string = list[6]
while string.startswith(" "):
string = string[1:]
while string.endswith(" "):
string = string[:-1]
idx = string.index(" ")
maxdown = string[idx+1:]
while maxdown.startswith(" "):
maxdown = maxdown[1:]
down = maxdown
if caidprovider not in providerList:
providerList.append(caidprovider)
count = 1
countList.append(count)
numberofcards = count
providername = self.providers.get(caidprovider, 'Multiple Providers given')
#if providername == 'Multiple Providers given':
# print caidprovider
numberofreshare = 0
if int(down)>0:
resharecards += 1
numberofreshare = 1
reshareList.append(numberofreshare)
shareList.append(CCcamShareViewListEntry(caidprovider, providername, str(numberofcards), str(numberofreshare)))
self.list.append([caidprovider, providername, numberofcards, numberofreshare])
totalproviders += 1
elif caidprovider in providerList:
i = providerList.index(caidprovider)
count = countList[i]
count += 1
countList[i] = count
numberofcards = count
if int(down)>0:
reshare = reshareList[i]
reshare += 1
reshareList[i] = reshare
numberofreshare = 0
numberofreshare = reshare
resharecards +=1
elif int(down)==0:
numberofreshare = reshareList[i]
providername = self.providers.get(caidprovider, 'Multiple Providers given')
shareList[i] = CCcamShareViewListEntry(caidprovider, providername, str(numberofcards), str(numberofreshare))
self.hostList.append(hostname)
self.caidList.append(caidprovider)
totalcards += 1
ulevel = _("All")
else:
updown = list[6]
while updown.startswith(" "):
updown = updown[1:]
while updown.endswith(" "):
updown = updown[:-1]
idx = updown.index(" ")
up = updown[:idx]
maxdown = updown[idx+1:]
while maxdown.startswith(" "):
maxdown = maxdown[1:]
down = maxdown
ulevel = str(self.uphop)
if int(up) == self.uphop:
caid = list[3].replace(" ", "")
provider = list[5].replace(" ", "")
caidprovider = self.formatCaidProvider(caid, provider)
if caidprovider not in providerList:
providerList.append(caidprovider)
count = 1
countList.append(count)
numberofcards = count
providername = self.providers.get(caidprovider, 'Multiple Providers given')
#if providername == 'Multiple Providers given':
# print caidprovider
numberofreshare = 0
if int(down)>0:
resharecards += 1
numberofreshare = 1
reshareList.append(numberofreshare)
shareList.append(CCcamShareViewListEntry(caidprovider, providername, str(numberofcards), str(numberofreshare)))
self.list.append([caidprovider, providername, numberofcards, numberofreshare])
totalproviders += 1
elif caidprovider in providerList:
i = providerList.index(caidprovider)
count = countList[i]
count += 1
countList[i] = count
numberofcards = count
if int(down)>0:
reshare = reshareList[i]
reshare += 1
#if caidprovider == "05021700":
# print "re: %d" %(reshare)
reshareList[i] = reshare
numberofreshare = 0
numberofreshare = reshare
resharecards +=1
elif int(down)==0:
numberofreshare = reshareList[i]
providername = self.providers.get(caidprovider, 'Multiple Providers given')
shareList[i] = CCcamShareViewListEntry(caidprovider, providername, str(numberofcards), str(numberofreshare))
self.hostList.append(hostname)
self.caidList.append(caidprovider)
totalcards += 1
#maxdown = list[6]
#while maxdown.startswith(" "):
#maxdown = maxdown[1:]
#down = maxdown
#if int(down)>0:
#resharecards +=1
self.instance.setTitle("%s (%s %d) %s %s" % (_("Share View"), _("Total cards:"), totalcards, _("Hops:"), ulevel))
self["title"].setText("%s (%s %d) %s %s" % (_("Share View"), _("Total cards:"), totalcards, _("Hops:"), ulevel))
self["list"].setList(shareList)
self["uphops"].setText("%s %s" %(_("Hops:"), ulevel))
self["cards"].setText("%s %s" %(_("Total cards:"), totalcards))
self["providers"].setText("%s %s" %(_("Providers:"), totalproviders))
self["reshare"].setText("%s %d" %(_("Reshare:"), resharecards))
self.working = False
def readProvidersCallback(self, html):
firstLine = True
lines = html.split("\n")
for l in lines:
if l.__contains__('|'):
if firstLine:
firstLine = False
else:
list = l.split('|')
if len(list) > 5:
caid = list[1].replace(" ", "")
if caid != "":
provider = list[2].replace(" ", "")
providername = list[3]
caidprovider = self.formatCaidProvider(caid, provider)
self.providers.setdefault(caidprovider, providername)
getPage(self.url + "/shares").addCallback(self.readSharesCallback).addErrback(self.readError)
def formatCaidProvider(self, caid, provider):
pos = provider.find(",")
if pos != -1:
provider = provider[pos+1:]
pos = provider.find(",")
if pos != -1:
provider = provider[0:pos]
if len(provider) == 0:
provider = "0000"
elif len(provider) == 1:
provider = "000" + provider
elif len(provider) == 2:
provider = "00" + provider
elif len(provider) == 3:
provider = "0" + provider
if len(caid) == 3:
caid = "0" + caid
if caid.startswith("0500") and len(provider) == 5:
caid = "050"
elif caid.startswith("0500") and len(provider) == 6:
caid = "05"
if caid.startswith("06"):
caidprovider = caid
elif caid.startswith("0d22"):
caidprovider = caid
elif caid.startswith("0d05"):
caidprovider = caid
elif caid.startswith("09"):
caidprovider = caid
elif caid.startswith("17"):
caidprovider = caid
elif caid.startswith("18"):
caidprovider = caid
elif caid.startswith("4a"):
caidprovider = caid
else:
caidprovider = caid + provider
return caidprovider
def getUphop(self, uphop):
self.uphop = uphop
self.getProviders()
def showAll(self):
self.uphop = -1
self.getProviders()
def incUphop(self):
if self.uphop < 9:
self.uphop += 1
self.getProviders()
def decUphop(self):
if self.uphop > -1:
self.uphop -= 1
self.getProviders()
def getServer(self):
server = _("Servers:") + " \n"
sel = self["list"].getCurrent()
if sel is not None:
e = 0
while e < len(self.caidList):
if sel[0][0] == self.caidList[e]:
pos = self.hostList[e].find(":")
if pos != -1:
server += self.hostList[e][0:pos] + "\n"
else:
server += self.hostList[e] + "\n"
e += 1
self.session.open(CCcamInfoInfoScreen, server, _("Servers"))
#############################################################
class CCcamInfoSubMenu(Screen):
def __init__(self, session, list, infoList, set_title):
Screen.__init__(self, session)
self.session = session
Screen.setTitle(self, _(set_title))
self.infoList = infoList
self["list"] = MenuList(list)
self["info"] = Label()
self["actions"] = ActionMap(["CCcamInfoActions"], {"ok": self.okClicked, "cancel": self.close}, -1)
self["list"].onSelectionChanged.append(self.showInfo)
self.onLayoutFinish.append(self.showInfo)
def okClicked(self):
info = self.getInfo()
if info != "":
self.session.open(MessageBox, info, MessageBox.TYPE_INFO)
def showInfo(self):
info = self.getInfo()
self["info"].setText(info)
def getInfo(self):
try:
idx = self["list"].getSelectedIndex()
info = ""
infoList = self.infoList[idx]
for x in infoList:
info += x + "\n"
return info
except:
return ""
#############################################################
class CCcamInfoServerMenu(Screen):
def __init__(self, session, infoList, url):
Screen.__init__(self, session)
self.session = session
Screen.setTitle(self, _("CCcam Server Info"))
self.infoList = infoList
self.url = url
list = []
for x in self.infoList:
if x[5].replace(_("Connected: "), "") == "": #offline - red
list.append(CCcamServerListEntry(x[0], "red"))
elif x[1] == _("Cards: 0"): #online with no card - blue
list.append(CCcamServerListEntry(x[0], "blue"))
else: #online with cards - green
list.append(CCcamServerListEntry(x[0], "green"))
self["list"] = CCcamList(list)
self["info"] = Label()
self["actions"] = ActionMap(["CCcamInfoActions"], {"ok": self.okClicked, "cancel": self.close}, -1)
self["list"].onSelectionChanged.append(self.showInfo)
self.onLayoutFinish.append(self.showInfo)
def showInfo(self):
info = self.getInfo()
self["info"].setText(info)
def getInfo(self):
try:
idx = self["list"].getSelectedIndex()
info = ""
infoList = self.infoList[idx]
for x in infoList:
info += x + "\n"
return info
except:
return ""
def okClicked(self):
sel = self["list"].getCurrent()
if sel is not None:
self.session.open(CCcamInfoShareInfo, sel[0], self.url)
#############################################################
class CCcamInfoRemoteBox:
def __init__(self, name, ip, username, password, port):
self.name = name
self.ip = ip
self.username = username
self.password = password
self.port = port
#############################################################
class CCcamInfoConfigMenu(ConfigListScreen, Screen):
def __init__(self, session, profile):
Screen.__init__(self, session)
Screen.setTitle(self, _("CCcam Info Setup"))
config.cccaminfo.name.value = profile.name
config.cccaminfo.ip.value = profile.ip
config.cccaminfo.username.value = profile.username
config.cccaminfo.password.value = profile.password
config.cccaminfo.port.value = profile.port
ConfigListScreen.__init__(self, [
getConfigListEntry(_("Name:"), config.cccaminfo.name),
getConfigListEntry(_("IP:"), config.cccaminfo.ip),
getConfigListEntry(_("Username:"), config.cccaminfo.username),
getConfigListEntry(_("Password:"), config.cccaminfo.password),
getConfigListEntry(_("Port:"), config.cccaminfo.port)])
self["actions"] = ActionMap(["CCcamInfoActions"], {"ok": self.okClicked, "cancel": self.exit}, -2)
def okClicked(self):
self.close(CCcamInfoRemoteBox(config.cccaminfo.name.value, config.cccaminfo.ip.value, config.cccaminfo.username.value, config.cccaminfo.password.value, config.cccaminfo.port.value))
def exit(self):
self.close(None)
#############################################################
class CCcamInfoRemoteBoxMenu(Screen):
def __init__(self, session):
Screen.__init__(self, session)
self.session = session
Screen.setTitle(self, _("CCcam Remote Info"))
self.list = []
self.profiles = []
self["key_red"] = Label(_("Delete"))
self["key_green"] = Label(_("New"))
self["key_yellow"] = Label(_("Location"))
self["key_blue"] = Label(_("Edit"))
self["list"] = MenuList([])
self["actions"] = ActionMap(["CCcamInfoActions"],
{
"cancel": self.exit,
"ok": self.profileSelected,
"red": self.delete,
"green": self.new,
"yellow": self.location,
"blue": self.edit
}, -1)
self.onLayoutFinish.append(self.readProfiles)
def readProfiles(self):
try:
f = open(config.cccaminfo.profiles.value, "r")
content = f.read()
f.close()
except:
content = ""
profiles = content.split("\n")
for profile in profiles:
if profile.__contains__("|"):
tmp = profile.split("|")
if len(tmp) == 5:
name = tmp[0]
ip = tmp[1]
username = tmp[2]
password = tmp[3]
port = int(tmp[4])
self.list.append(name)
self.profiles.append(CCcamInfoRemoteBox(name, ip, username, password, port))
self["list"].setList(self.list)
def saveConfigs(self):
content = ""
for x in self.profiles:
content = "%s\n%s|%s|%s|%s|%d" % (content, x.name, x.ip, x.username, x.password, x.port)
try:
f = open(config.cccaminfo.profiles.value, "w")
f.write(content)
f.close()
except:
pass
def exit(self):
self.saveConfigs()
self.close(None)
def profileSelected(self):
self.saveConfigs()
if len(self.list) > 0:
idx = self["list"].getSelectionIndex()
cur = self.profiles[idx]
if cur.ip == "":
url = None
else:
if cur.username != "" and cur.password != "":
url = "http://%s:%s@%s:%d" % (cur.username, cur.password, cur.ip, cur.port)
else:
url = "http://%s:%d" % (cur.ip, cur.port)
self.close(url)
def delete(self):
if len(self.list) > 0:
idx = self["list"].getSelectionIndex()
del self.list[idx]
del self.profiles[idx]
self["list"].setList(self.list)
def new(self):
self.session.openWithCallback(self.newCallback, CCcamInfoConfigMenu, CCcamInfoRemoteBox("Profile", "192.168.2.12", "", "", 16001))
def newCallback(self, callback):
if callback:
self.list.append(callback.name)
self.profiles.append(callback)
self["list"].setList(self.list)
def location(self):
self.session.openWithCallback(self.locationCallback, LocationBox)
def locationCallback(self, callback):
if callback:
config.cccaminfo.profiles.value = ("%s/CCcamInfo.profiles"%callback).replace("//", "/")
config.cccaminfo.profiles.save()
del self.list
self.list = []
del self.profiles
self.profiles = []
self.readProfiles()
def edit(self):
if len(self.list) > 0:
idx = self["list"].getSelectionIndex()
self.session.openWithCallback(self.editCallback, CCcamInfoConfigMenu, self.profiles[idx])
def editCallback(self, callback):
if callback:
idx = self["list"].getSelectionIndex()
del self.list[idx]
del self.profiles[idx]
self.list.append(callback.name)
self.profiles.append(callback)
self["list"].setList(self.list)
#############################################################
class CCcamInfoShareInfo(Screen):
def __init__(self, session, hostname, url):
Screen.__init__(self, session)
self.session = session
Screen.setTitle(self, _("CCcam Share Info"))
self.hostname = hostname
self.url = url
self.list = []
self.uphops = -1
self.maxdown = -1
self.working = True
self["key_red"] = Label(_("Uphops +"))
self["key_green"] = Label(_("Uphops -"))
self["key_yellow"] = Label(_("Maxdown +"))
self["key_blue"] = Label(_("Maxdown -"))
self["list"] = CCcamShareList([])
self["actions"] = ActionMap(["CCcamInfoActions"],
{
"cancel": self.exit,
"red": self.uhopsPlus,
"green": self.uhopsMinus,
"yellow": self.maxdownPlus,
"blue": self.maxdownMinus
}, -1)
self.onLayoutFinish.append(self.readShares)
def exit(self):
if not self.working:
self.close()
def readShares(self):
getPage(self.url + "/shares").addCallback(self.readSharesCallback).addErrback(self.readSharesError)
def readSharesError(self, error=None):
self.session.open(MessageBox, _("Error reading webpage!"), MessageBox.TYPE_ERROR)
self.working = False
def readSharesCallback(self, html):
firstLine = True
shareList = []
count = 0
lines = html.split("\n")
for l in lines:
if l.__contains__('|'):
if firstLine:
firstLine = False
else:
list = l.split("|")
if len(list) > 7:
hostname = list[1].replace(" ", "")
if (self.hostname == "None" or self.hostname == hostname) and hostname != "":
type = list[2].replace(" ", "")
caid = list[3].replace(" ", "")
system = list[4].replace(" ", "")
string = list[6]
while string.startswith(" "):
string = string[1:]
while string.endswith(" "):
string = string[:-1]
idx = string.index(" ")
uphops = string[:idx]
string = string[idx+1:]
while string.startswith(" "):
string = string[1:]
maxdown = string
if len(caid) == 3:
caid = "0" + caid
shareList.append(CCcamShareListEntry(hostname, type, caid, system, uphops, maxdown))
self.list.append([hostname, type, caid, system, uphops, maxdown])
count += 1
if self.uphops < 0:
textUhops = _("All")
else:
textUhops = str(self.uphops)
if self.maxdown < 0:
textMaxdown = _("All")
else:
textMaxdown = str(self.maxdown)
self.instance.setTitle("%s %d (%s%s / %s%s)" % (_("Available shares:"), count, _("Uphops: "), textUhops, _("Maxdown: "), textMaxdown))
self["list"].setList(shareList)
self.working = False
def uhopsPlus(self):
if not self.working:
self.uphops += 1
if self.uphops > 9:
self.uphops = -1
self.refreshList()
def uhopsMinus(self):
if not self.working:
self.uphops -= 1
if self.uphops < -1:
self.uphops = 9
self.refreshList()
def maxdownPlus(self):
if not self.working:
self.maxdown += 1
if self.maxdown > 9:
self.maxdown = -1
self.refreshList()
def maxdownMinus(self):
if not self.working:
self.maxdown -= 1
if self.maxdown < -1:
self.maxdown = 9
self.refreshList()
def refreshList(self):
shareList = []
count = 0
self.working = True
for x in self.list:
(hostname, type, caid, system, uphops, maxdown) = x
if (uphops == str(self.uphops) or self.uphops == -1) and (maxdown == str(self.maxdown) or self.maxdown == -1):
shareList.append(CCcamShareListEntry(hostname, type, caid, system, uphops, maxdown))
count += 1
if self.uphops < 0:
textUhops = _("All")
else:
textUhops = str(self.uphops)
if self.maxdown < 0:
textMaxdown = _("All")
else:
textMaxdown = str(self.maxdown)
self.instance.setTitle("%s %d (%s%s / %s%s)" % (_("Available shares:"), count, _("Uphops: "), textUhops, _("Maxdown: "), textMaxdown))
self["list"].setList(shareList)
self.working = False
#############################################################
class CCcamInfoConfigSwitcher(Screen):
def __init__(self, session):
Screen.__init__(self, session)
self.session = session
Screen.setTitle(self, _("CCcam Config Switcher"))
self["key_red"] = Label(_("Delete"))
self["key_green"] = Label(_("Activate"))
self["key_yellow"] = Label(_("Rename"))
self["key_blue"] = Label(_("Content"))
self["list"] = CCcamConfigList([])
self["actions"] = ActionMap(["CCcamInfoActions"],
{
"ok": self.activate,
"cancel": self.close,
"red": self.delete,
"green": self.activate,
"yellow": self.rename,
"blue": self.showContent
}, -1)
self.onLayoutFinish.append(self.showConfigs)
def showConfigs(self):
list = []
try:
files = listdir("/var/etc")
except:
files = []
for file in files:
if file.startswith("CCcam_") and file.endswith(".cfg"):
list.append(CCcamConfigListEntry("/var/etc/"+file))
self["list"].setList(list)
def delete(self):
fileName = self["list"].getCurrent()
if fileName is not None:
self.fileToDelete = fileName[0]
self.session.openWithCallback(self.deleteConfirmed, MessageBox, (_("Delete %s?") % self.fileToDelete))
def deleteConfirmed(self, yesno):
if yesno:
remove(self.fileToDelete)
if fileExists(self.fileToDelete):
self.session.open(MessageBox, _("Delete failed!"), MessageBox.TYPE_ERROR)
else:
self.session.open(MessageBox, _("Deleted %s!") % self.fileToDelete, MessageBox.TYPE_INFO)
self.showConfigs()
def activate(self):
fileName = self["list"].getCurrent()
if fileName is not None:
fileName = fileName[0]
# Delete old backup
backupFile = "%s.backup" % CFG
if fileExists(backupFile):
remove(backupFile)
# Create a backup of the original /var/etc/CCcam.cfg file
rename(CFG, backupFile)
# Now copy the selected cfg file
system("cp -f %s %s" % (fileName, CFG))
self.showConfigs()
def rename(self):
fileName = self["list"].getCurrent()
if fileName is not None:
self.fileToRename = fileName[0]
(name, sel) = getConfigNameAndContent(self.fileToRename)
self.session.openWithCallback(self.renameCallback, VirtualKeyBoard, title=_("Rename to:"), text=name)
def renameCallback(self, callback):
if callback is not None:
try:
f = open(self.fileToRename, "r")
content = f.read()
f.close()
except:
content = None
if content is not None:
content = content.replace("\r", "\n")
if content.startswith("#CONFIGFILE NAME=") and content.__contains__("\n"):
idx = content.index("\n")
content = content[:idx+2]
content = "#CONFIGFILE NAME=%s\n%s" % (callback, content)
try:
f = open(self.fileToRename, "w")
f.write(content)
f.close()
self.session.open(MessageBox, _("Renamed %s!") % self.fileToRename, MessageBox.TYPE_INFO)
self.showConfigs()
except:
self.session.open(MessageBox, _("Rename failed!"), MessageBox.TYPE_ERROR)
else:
self.session.open(MessageBox, _("Rename failed!"), MessageBox.TYPE_ERROR)
def showContent(self):
fileName = self["list"].getCurrent()
if fileName is not None:
try:
f = open(fileName[0], "r")
content = f.read()
f.close()
except:
content = _("Could not open the file %s!") % fileName[0]
self.session.open(CCcamInfoInfoScreen, content, _("CCcam Config Switcher"))
#############################################################
class CCcamInfoMenuConfig(Screen):
def __init__(self, session):
Screen.__init__(self, session)
self.session = session
Screen.setTitle(self, _("CCcam Info Config"))
self["key_red"] = Label(_("Cancel"))
self["key_green"] = Label(_("Save"))
self["key_yellow"] = Label(_("Location"))
self["list"] = CCcamConfigList([])
self.getBlacklistedMenuEntries()
self["actions"] = ActionMap(["CCcamInfoActions"],
{
"ok": self.changeState,
"cancel": self.close,
"red": self.close,
"green": self.save,
"yellow": self.location
}, -1)
self.onLayoutFinish.append(self.showConfigs)
def getBlacklistedMenuEntries(self):
try:
f = open(config.cccaminfo.blacklist.value, "r")
content = f.read()
f.close()
self.blacklisted = content.split("\n")
except:
self.blacklisted = []
def changeState(self):
cur = self["list"].getCurrent()
if cur is not None:
cur = cur[0]
if cur in self.blacklisted:
idx = 0
for x in self.blacklisted:
if x == cur:
del self.blacklisted[idx]
break
idx += 1
else:
self.blacklisted.append(cur)
self.showConfigs()
def showConfigs(self):
list = []
for x in menu_list:
if x != _("Menu config"):
if x in self.blacklisted:
list.append(CCcamMenuConfigListEntry(x, True))
else:
list.append(CCcamMenuConfigListEntry(x, False))
self["list"].setList(list)
def save(self):
content = ""
for x in self.blacklisted:
content = content + x + "\n"
content = content.replace("\n\n", "\n")
try:
f = open(config.cccaminfo.blacklist.value, "w")
f.write(content)
f.close()
self.session.open(MessageBox, _("Configfile %s saved.") % config.cccaminfo.blacklist.value, MessageBox.TYPE_INFO)
except:
self.session.open(MessageBox, _("Could not save configfile %s!") % config.cccaminfo.blacklist.value, MessageBox.TYPE_ERROR)
def location(self):
self.session.openWithCallback(self.locationCallback, LocationBox)
def locationCallback(self, callback):
if callback:
config.cccaminfo.blacklist.value = ("%s/CCcamInfo.blacklisted"%callback).replace("//", "/")
config.cccaminfo.blacklist.save()
|
kingvuplus/PKT-gui2
|
lib/python/Screens/CCcamInfo.py
|
Python
|
gpl-2.0
| 51,455
| 0.030609
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2016-11-28 13:41
# flake8: noqa
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import normandy.recipes.validators
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('recipes', '0033_migrate_surveys'),
]
operations = [
migrations.CreateModel(
name='RecipeRevision',
fields=[
('id', models.CharField(max_length=64, primary_key=True, serialize=False)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('updated', models.DateTimeField(default=django.utils.timezone.now)),
('comment', models.TextField()),
('name', models.CharField(max_length=255)),
('arguments_json', models.TextField(default='{}', validators=[normandy.recipes.validators.validate_json])),
('filter_expression', models.TextField()),
('action', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='recipe_revisions', to='recipes.Action')),
('parent', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='child', to='recipes.RecipeRevision')),
],
),
migrations.RemoveField(
model_name='approval',
name='creator',
),
migrations.RemoveField(
model_name='approvalrequest',
name='approval',
),
migrations.RemoveField(
model_name='approvalrequest',
name='creator',
),
migrations.RemoveField(
model_name='approvalrequest',
name='recipe',
),
migrations.RemoveField(
model_name='approvalrequestcomment',
name='approval_request',
),
migrations.RemoveField(
model_name='approvalrequestcomment',
name='creator',
),
migrations.AlterModelOptions(
name='recipe',
options={'ordering': ['-enabled', '-latest_revision__updated']},
),
migrations.RemoveField(
model_name='recipe',
name='approval',
),
migrations.DeleteModel(
name='Approval',
),
migrations.DeleteModel(
name='ApprovalRequest',
),
migrations.DeleteModel(
name='ApprovalRequestComment',
),
migrations.AddField(
model_name='reciperevision',
name='recipe',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='revisions', to='recipes.Recipe'),
),
migrations.AddField(
model_name='reciperevision',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL,related_name='recipe_revisions', to=settings.AUTH_USER_MODEL)
),
migrations.AddField(
model_name='recipe',
name='latest_revision',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='latest_for_recipe', to='recipes.RecipeRevision'),
),
migrations.AlterField(
model_name='recipe',
name='action',
field=models.ForeignKey(to='recipes.Action', null=True),
),
migrations.AlterField(
model_name='recipe',
name='name',
field=models.CharField(max_length=255, unique=False, null=True),
),
]
|
Osmose/normandy
|
recipe-server/normandy/recipes/migrations/0034_recipe_revisions.py
|
Python
|
mpl-2.0
| 3,780
| 0.002646
|
"""
Creates a list of studies currently being used for synthesis.
"""
import re
#from stephen_desktop_conf import *
from microbes import studytreelist as microbelist
from plants import studytreelist as plantslist
from metazoa import studytreelist as metalist
from fungi import studytreelist as fungilist
studytreelist = []
studytreelist.extend(plantslist)
studytreelist.extend(metalist)
studytreelist.extend(fungilist)
studytreelist.extend(microbelist)
for i in studytreelist:
studyid=i.split('_')[0]
print studyid+".json"
|
OpenTreeOfLife/gcmdr
|
collect_study_ids.py
|
Python
|
bsd-2-clause
| 537
| 0.007449
|
# Copyright (c) 2010-2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: More tests
import socket
import unittest
from StringIO import StringIO
from urlparse import urlparse
# TODO: mock http connection class with more control over headers
from test.unit.proxy.test_server import fake_http_connect
from swift.common import client as c
class TestHttpHelpers(unittest.TestCase):
def test_quote(self):
value = 'standard string'
self.assertEquals('standard%20string', c.quote(value))
value = u'\u0075nicode string'
self.assertEquals('unicode%20string', c.quote(value))
def test_http_connection(self):
url = 'http://www.test.com'
_junk, conn = c.http_connection(url)
self.assertTrue(isinstance(conn, c.HTTPConnection))
url = 'https://www.test.com'
_junk, conn = c.http_connection(url)
self.assertTrue(isinstance(conn, c.HTTPSConnection))
url = 'ftp://www.test.com'
self.assertRaises(c.ClientException, c.http_connection, url)
class TestClientException(unittest.TestCase):
def test_is_exception(self):
self.assertTrue(issubclass(c.ClientException, Exception))
def test_format(self):
exc = c.ClientException('something failed')
self.assertTrue('something failed' in str(exc))
test_kwargs = (
'scheme',
'host',
'port',
'path',
'query',
'status',
'reason',
'device',
)
for value in test_kwargs:
kwargs = {
'http_%s' % value: value,
}
exc = c.ClientException('test', **kwargs)
self.assertTrue(value in str(exc))
class TestJsonImport(unittest.TestCase):
def tearDown(self):
try:
import json
except ImportError:
pass
else:
reload(json)
try:
import simplejson
except ImportError:
pass
else:
reload(simplejson)
def test_any(self):
self.assertTrue(hasattr(c, 'json_loads'))
def test_no_simplejson(self):
# break simplejson
try:
import simplejson
except ImportError:
# not installed, so we don't have to break it for these tests
pass
else:
delattr(simplejson, 'loads')
reload(c)
try:
from json import loads
except ImportError:
# this case is stested in _no_json
pass
else:
self.assertEquals(loads, c.json_loads)
def test_no_json(self):
# first break simplejson
try:
import simplejson
except ImportError:
# not installed, so we don't have to break it for these tests
pass
else:
delattr(simplejson, 'loads')
# then break json
try:
import json
except ImportError:
# not installed, so we don't have to break it for these tests
_orig_dumps = None
else:
# before we break json, grab a copy of the orig_dumps function
_orig_dumps = json.dumps
delattr(json, 'loads')
reload(c)
if _orig_dumps:
# basic test of swift.common.client.json_loads using json.loads
data = {
'string': 'value',
'int': 0,
'bool': True,
'none': None,
}
json_string = _orig_dumps(data)
else:
# even more basic test using a hand encoded json string
data = ['value1', 'value2']
json_string = "['value1', 'value2']"
self.assertEquals(data, c.json_loads(json_string))
self.assertRaises(AttributeError, c.json_loads, self)
class MockHttpTest(unittest.TestCase):
def setUp(self):
def fake_http_connection(*args, **kwargs):
_orig_http_connection = c.http_connection
def wrapper(url, proxy=None):
parsed, _conn = _orig_http_connection(url, proxy=proxy)
conn = fake_http_connect(*args, **kwargs)()
def request(*args, **kwargs):
return
conn.request = request
conn.has_been_read = False
_orig_read = conn.read
def read(*args, **kwargs):
conn.has_been_read = True
return _orig_read(*args, **kwargs)
conn.read = read
return parsed, conn
return wrapper
self.fake_http_connection = fake_http_connection
def tearDown(self):
reload(c)
# TODO: following tests are placeholders, need more tests, better coverage
class TestGetAuth(MockHttpTest):
def test_ok(self):
c.http_connection = self.fake_http_connection(200)
url, token = c.get_auth('http://www.test.com', 'asdf', 'asdf')
self.assertEquals(url, None)
self.assertEquals(token, None)
class TestGetAccount(MockHttpTest):
def test_no_content(self):
c.http_connection = self.fake_http_connection(204)
value = c.get_account('http://www.test.com', 'asdf')[1]
self.assertEquals(value, [])
class TestHeadAccount(MockHttpTest):
def test_ok(self):
c.http_connection = self.fake_http_connection(200)
value = c.head_account('http://www.tests.com', 'asdf')
# TODO: Hmm. This doesn't really test too much as it uses a fake that
# always returns the same dict. I guess it "exercises" the code, so
# I'll leave it for now.
self.assertEquals(type(value), dict)
def test_server_error(self):
c.http_connection = self.fake_http_connection(500)
self.assertRaises(c.ClientException, c.head_account,
'http://www.tests.com', 'asdf')
class TestGetContainer(MockHttpTest):
def test_no_content(self):
c.http_connection = self.fake_http_connection(204)
value = c.get_container('http://www.test.com', 'asdf', 'asdf')[1]
self.assertEquals(value, [])
class TestHeadContainer(MockHttpTest):
def test_server_error(self):
c.http_connection = self.fake_http_connection(500)
self.assertRaises(c.ClientException, c.head_container,
'http://www.test.com', 'asdf', 'asdf',
)
class TestPutContainer(MockHttpTest):
def test_ok(self):
c.http_connection = self.fake_http_connection(200)
value = c.put_container('http://www.test.com', 'asdf', 'asdf')
self.assertEquals(value, None)
class TestDeleteContainer(MockHttpTest):
def test_ok(self):
c.http_connection = self.fake_http_connection(200)
value = c.delete_container('http://www.test.com', 'asdf', 'asdf')
self.assertEquals(value, None)
class TestGetObject(MockHttpTest):
def test_server_error(self):
c.http_connection = self.fake_http_connection(500)
self.assertRaises(c.ClientException, c.get_object,
'http://www.test.com', 'asdf', 'asdf', 'asdf')
class TestHeadObject(MockHttpTest):
def test_server_error(self):
c.http_connection = self.fake_http_connection(500)
self.assertRaises(c.ClientException, c.head_object,
'http://www.test.com', 'asdf', 'asdf', 'asdf')
class TestPutObject(MockHttpTest):
def test_ok(self):
c.http_connection = self.fake_http_connection(200)
args = ('http://www.test.com', 'asdf', 'asdf', 'asdf', 'asdf')
value = c.put_object(*args)
self.assertTrue(isinstance(value, basestring))
def test_server_error(self):
c.http_connection = self.fake_http_connection(500)
args = ('http://www.test.com', 'asdf', 'asdf', 'asdf', 'asdf')
self.assertRaises(c.ClientException, c.put_object, *args)
class TestPostObject(MockHttpTest):
def test_ok(self):
c.http_connection = self.fake_http_connection(200)
args = ('http://www.test.com', 'asdf', 'asdf', 'asdf', {})
value = c.post_object(*args)
def test_server_error(self):
c.http_connection = self.fake_http_connection(500)
self.assertRaises(c.ClientException, c.post_object,
'http://www.test.com', 'asdf', 'asdf', 'asdf', {})
class TestDeleteObject(MockHttpTest):
def test_ok(self):
c.http_connection = self.fake_http_connection(200)
value = c.delete_object('http://www.test.com', 'asdf', 'asdf', 'asdf')
def test_server_error(self):
c.http_connection = self.fake_http_connection(500)
self.assertRaises(c.ClientException, c.delete_object,
'http://www.test.com', 'asdf', 'asdf', 'asdf')
class TestConnection(MockHttpTest):
def test_instance(self):
conn = c.Connection('http://www.test.com', 'asdf', 'asdf')
self.assertEquals(conn.retries, 5)
def test_retry(self):
c.http_connection = self.fake_http_connection(500)
def quick_sleep(*args):
pass
c.sleep = quick_sleep
conn = c.Connection('http://www.test.com', 'asdf', 'asdf')
self.assertRaises(c.ClientException, conn.head_account)
self.assertEquals(conn.attempts, conn.retries + 1)
def test_resp_read_on_server_error(self):
c.http_connection = self.fake_http_connection(500)
conn = c.Connection('http://www.test.com', 'asdf', 'asdf', retries=0)
def get_auth(*args, **kwargs):
return 'http://www.new.com', 'new'
conn.get_auth = get_auth
self.url, self.token = conn.get_auth()
method_signatures = (
(conn.head_account, []),
(conn.get_account, []),
(conn.head_container, ('asdf',)),
(conn.get_container, ('asdf',)),
(conn.put_container, ('asdf',)),
(conn.delete_container, ('asdf',)),
(conn.head_object, ('asdf', 'asdf')),
(conn.get_object, ('asdf', 'asdf')),
(conn.put_object, ('asdf', 'asdf', 'asdf')),
(conn.post_object, ('asdf', 'asdf', {})),
(conn.delete_object, ('asdf', 'asdf')),
)
for method, args in method_signatures:
self.assertRaises(c.ClientException, method, *args)
try:
self.assertTrue(conn.http_conn[1].has_been_read)
except AssertionError:
msg = '%s did not read resp on server error' % method.__name__
self.fail(msg)
except Exception, e:
raise e.__class__("%s - %s" % (method.__name__, e))
def test_reauth(self):
c.http_connection = self.fake_http_connection(401)
def get_auth(*args, **kwargs):
return 'http://www.new.com', 'new'
def swap_sleep(*args):
self.swap_sleep_called = True
c.get_auth = get_auth
c.http_connection = self.fake_http_connection(200)
c.sleep = swap_sleep
self.swap_sleep_called = False
conn = c.Connection('http://www.test.com', 'asdf', 'asdf',
preauthurl='http://www.old.com',
preauthtoken='old',
)
self.assertEquals(conn.attempts, 0)
self.assertEquals(conn.url, 'http://www.old.com')
self.assertEquals(conn.token, 'old')
value = conn.head_account()
self.assertTrue(self.swap_sleep_called)
self.assertEquals(conn.attempts, 2)
self.assertEquals(conn.url, 'http://www.new.com')
self.assertEquals(conn.token, 'new')
def test_reset_stream(self):
class LocalContents(object):
def __init__(self, tell_value=0):
self.already_read = False
self.seeks = []
self.tell_value = tell_value
def tell(self):
return self.tell_value
def seek(self, position):
self.seeks.append(position)
self.already_read = False
def read(self, size=-1):
if self.already_read:
return ''
else:
self.already_read = True
return 'abcdef'
class LocalConnection(object):
def putrequest(self, *args, **kwargs):
return
def putheader(self, *args, **kwargs):
return
def endheaders(self, *args, **kwargs):
return
def send(self, *args, **kwargs):
raise socket.error('oops')
def request(self, *args, **kwargs):
return
def getresponse(self, *args, **kwargs):
self.status = 200
return self
def getheader(self, *args, **kwargs):
return ''
def read(self, *args, **kwargs):
return ''
def local_http_connection(url, proxy=None):
parsed = urlparse(url)
return parsed, LocalConnection()
orig_conn = c.http_connection
try:
c.http_connection = local_http_connection
conn = c.Connection('http://www.example.com', 'asdf', 'asdf',
retries=1, starting_backoff=.0001)
contents = LocalContents()
exc = None
try:
conn.put_object('c', 'o', contents)
except socket.error, err:
exc = err
self.assertEquals(contents.seeks, [0])
self.assertEquals(str(exc), 'oops')
contents = LocalContents(tell_value=123)
exc = None
try:
conn.put_object('c', 'o', contents)
except socket.error, err:
exc = err
self.assertEquals(contents.seeks, [123])
self.assertEquals(str(exc), 'oops')
contents = LocalContents()
contents.tell = None
exc = None
try:
conn.put_object('c', 'o', contents)
except c.ClientException, err:
exc = err
self.assertEquals(contents.seeks, [])
self.assertEquals(str(exc), "put_object('c', 'o', ...) failure "
"and no ability to reset contents for reupload.")
finally:
c.http_connection = orig_conn
if __name__ == '__main__':
unittest.main()
|
Intel-bigdata/swift
|
test/unit/common/test_client.py
|
Python
|
apache-2.0
| 15,155
| 0.000264
|
#!/usr/bin/python
# *****************************************************************************
#
# Copyright (c) 2016, EPAM SYSTEMS INC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ******************************************************************************
from fabric.api import *
import argparse
import os
import sys
import time
from fabric.api import lcd
from fabric.contrib.files import exists
from fabvenv import virtualenv
from dlab.notebook_lib import *
from dlab.actions_lib import *
from dlab.fab import *
from dlab.common_lib import *
parser = argparse.ArgumentParser()
parser.add_argument('--cluster_name', type=str, default='')
parser.add_argument('--dry_run', type=str, default='false')
parser.add_argument('--spark_version', type=str, default='')
parser.add_argument('--hadoop_version', type=str, default='')
parser.add_argument('--os_user', type=str, default='')
parser.add_argument('--spark_master', type=str, default='')
parser.add_argument('--region', type=str, default='')
parser.add_argument('--datalake_enabled', type=str, default='')
parser.add_argument('--r_enabled', type=str, default='')
args = parser.parse_args()
kernels_dir = '/home/' + args.os_user + '/.local/share/jupyter/kernels/'
cluster_dir = '/opt/' + args.cluster_name + '/'
local_jars_dir = '/opt/jars/'
spark_version = args.spark_version
hadoop_version = args.hadoop_version
scala_link = "http://www.scala-lang.org/files/archive/"
spark_link = "https://archive.apache.org/dist/spark/spark-" + spark_version + "/spark-" + spark_version + \
"-bin-hadoop" + hadoop_version + ".tgz"
def r_kernel(args):
spark_path = '/opt/{}/spark/'.format(args.cluster_name)
local('mkdir -p {}/r_{}/'.format(kernels_dir, args.cluster_name))
kernel_path = "{}/r_{}/kernel.json".format(kernels_dir, args.cluster_name)
template_file = "/tmp/{}/r_dataengine_template.json".format(args.cluster_name)
r_version = local("R --version | awk '/version / {print $3}'", capture = True)
with open(template_file, 'r') as f:
text = f.read()
text = text.replace('CLUSTER_NAME', args.cluster_name)
text = text.replace('SPARK_PATH', spark_path)
text = text.replace('SPARK_VERSION', 'Spark-' + args.spark_version)
text = text.replace('R_KERNEL_VERSION', 'R-{}'.format(str(r_version)))
text = text.replace('SPARK_ACTION', 'init()')
text = text.replace('MASTER', args.spark_master)
with open(kernel_path, 'w') as f:
f.write(text)
def toree_kernel(args):
spark_path = '/opt/' + args.cluster_name + '/spark/'
scala_version = local('scala -e "println(scala.util.Properties.versionNumberString)"', capture=True)
local('mkdir -p ' + kernels_dir + 'toree_' + args.cluster_name + '/')
local('tar zxvf /tmp/{}/toree_kernel.tar.gz -C '.format(args.cluster_name) + kernels_dir + 'toree_' + args.cluster_name + '/')
kernel_path = kernels_dir + "toree_" + args.cluster_name + "/kernel.json"
template_file = "/tmp/{}/toree_dataengine_template.json".format(args.cluster_name)
with open(template_file, 'r') as f:
text = f.read()
text = text.replace('CLUSTER_NAME', args.cluster_name)
text = text.replace('SPARK_VERSION', 'Spark-' + args.spark_version)
text = text.replace('SPARK_PATH', spark_path)
text = text.replace('OS_USER', args.os_user)
text = text.replace('MASTER', args.spark_master)
text = text.replace('SCALA_VERSION', scala_version)
with open(kernel_path, 'w') as f:
f.write(text)
local('touch /tmp/{}/kernel_var.json'.format(args.cluster_name))
local(
"PYJ=`find /opt/" + args.cluster_name +
"/spark/ -name '*py4j*.zip' | tr '\\n' ':' | sed 's|:$||g'`; cat " + kernel_path +
" | sed 's|PY4J|'$PYJ'|g' > /tmp/{}/kernel_var.json".format(args.cluster_name))
local('sudo mv /tmp/{}/kernel_var.json '.format(args.cluster_name) + kernel_path)
run_sh_path = kernels_dir + "toree_" + args.cluster_name + "/bin/run.sh"
template_sh_file = '/tmp/{}/run_template.sh'.format(args.cluster_name)
with open(template_sh_file, 'r') as f:
text = f.read()
text = text.replace('CLUSTER_NAME', args.cluster_name)
text = text.replace('OS_USER', args.os_user)
with open(run_sh_path, 'w') as f:
f.write(text)
def pyspark_kernel(args):
spark_path = '/opt/' + args.cluster_name + '/spark/'
local('mkdir -p ' + kernels_dir + 'pyspark_' + args.cluster_name + '/')
kernel_path = kernels_dir + "pyspark_" + args.cluster_name + "/kernel.json"
template_file = "/tmp/{}/pyspark_dataengine_template.json".format(args.cluster_name)
with open(template_file, 'r') as f:
text = f.read()
text = text.replace('CLUSTER_NAME', args.cluster_name)
text = text.replace('SPARK_VERSION', 'Spark-' + spark_version)
text = text.replace('SPARK_PATH', spark_path)
text = text.replace('PYTHON_SHORT_VERSION', '2.7')
text = text.replace('PYTHON_FULL_VERSION', '2.7')
text = text.replace('MASTER', args.spark_master)
text = text.replace('PYTHON_PATH', '/usr/bin/python2.7')
with open(kernel_path, 'w') as f:
f.write(text)
local('touch /tmp/{}/kernel_var.json'.format(args.cluster_name))
local(
"PYJ=`find /opt/{0}/spark/ -name '*py4j*.zip' | tr '\\n' ':' | sed 's|:$||g'`; cat {1} | sed 's|PY4J|'$PYJ'|g' | sed \'/PYTHONPATH\"\:/s|\(.*\)\"|\\1/home/{2}/caffe/python:/home/{2}/pytorch/build:\"|\' > /tmp/{0}/kernel_var.json".
format(args.cluster_name, kernel_path, args.os_user))
local('sudo mv /tmp/{}/kernel_var.json '.format(args.cluster_name) + kernel_path)
local('mkdir -p ' + kernels_dir + 'py3spark_' + args.cluster_name + '/')
kernel_path = kernels_dir + "py3spark_" + args.cluster_name + "/kernel.json"
template_file = "/tmp/{}/pyspark_dataengine_template.json".format(args.cluster_name)
with open(template_file, 'r') as f:
text = f.read()
text = text.replace('CLUSTER_NAME', args.cluster_name)
text = text.replace('SPARK_VERSION', 'Spark-' + spark_version)
text = text.replace('SPARK_PATH', spark_path)
text = text.replace('MASTER', args.spark_master)
text = text.replace('PYTHON_SHORT_VERSION', '3.5')
text = text.replace('PYTHON_FULL_VERSION', '3.5')
text = text.replace('PYTHON_PATH', '/usr/bin/python3.5')
with open(kernel_path, 'w') as f:
f.write(text)
local('touch /tmp/{}/kernel_var.json'.format(args.cluster_name))
local(
"PYJ=`find /opt/{0}/spark/ -name '*py4j*.zip' | tr '\\n' ':' | sed 's|:$||g'`; cat {1} | sed 's|PY4J|'$PYJ'|g' | sed \'/PYTHONPATH\"\:/s|\(.*\)\"|\\1/home/{2}/caffe/python:/home/{2}/pytorch/build:\"|\' > /tmp/{0}/kernel_var.json".
format(args.cluster_name, kernel_path, args.os_user))
local('sudo mv /tmp/{}/kernel_var.json '.format(args.cluster_name) + kernel_path)
if __name__ == "__main__":
if args.dry_run == 'true':
parser.print_help()
else:
dataengine_dir_prepare('/opt/{}/'.format(args.cluster_name))
install_dataengine_spark(args.cluster_name, spark_link, spark_version, hadoop_version, cluster_dir, args.os_user,
args.datalake_enabled)
configure_dataengine_spark(args.cluster_name, local_jars_dir, cluster_dir, args.region, args.datalake_enabled)
pyspark_kernel(args)
toree_kernel(args)
if args.r_enabled == 'true':
r_kernel(args)
|
epam/DLab
|
infrastructure-provisioning/src/general/scripts/os/jupyter_dataengine_create_configs.py
|
Python
|
apache-2.0
| 7,919
| 0.003283
|
#!/usr/bin/env python
from nose.tools import ok_
from nose.tools import eq_
import networkx as nx
from networkx.algorithms.approximation import min_weighted_dominating_set
from networkx.algorithms.approximation import min_edge_dominating_set
class TestMinWeightDominatingSet:
def test_min_weighted_dominating_set(self):
graph = nx.Graph()
graph.add_edge(1, 2)
graph.add_edge(1, 5)
graph.add_edge(2, 3)
graph.add_edge(2, 5)
graph.add_edge(3, 4)
graph.add_edge(3, 6)
graph.add_edge(5, 6)
vertices = set([1, 2, 3, 4, 5, 6])
# due to ties, this might be hard to test tight bounds
dom_set = min_weighted_dominating_set(graph)
for vertex in vertices - dom_set:
neighbors = set(graph.neighbors(vertex))
ok_(len(neighbors & dom_set) > 0, "Non dominating set found!")
def test_star_graph(self):
"""Tests that an approximate dominating set for the star graph,
even when the center node does not have the smallest integer
label, gives just the center node.
For more information, see #1527.
"""
# Create a star graph in which the center node has the highest
# label instead of the lowest.
G = nx.star_graph(10)
G = nx.relabel_nodes(G, {0: 9, 9: 0})
eq_(min_weighted_dominating_set(G), {9})
def test_min_edge_dominating_set(self):
graph = nx.path_graph(5)
dom_set = min_edge_dominating_set(graph)
# this is a crappy way to test, but good enough for now.
for edge in graph.edges_iter():
if edge in dom_set:
continue
else:
u, v = edge
found = False
for dom_edge in dom_set:
found |= u == dom_edge[0] or u == dom_edge[1]
ok_(found, "Non adjacent edge found!")
graph = nx.complete_graph(10)
dom_set = min_edge_dominating_set(graph)
# this is a crappy way to test, but good enough for now.
for edge in graph.edges_iter():
if edge in dom_set:
continue
else:
u, v = edge
found = False
for dom_edge in dom_set:
found |= u == dom_edge[0] or u == dom_edge[1]
ok_(found, "Non adjacent edge found!")
|
LumPenPacK/NetworkExtractionFromImages
|
win_build/nefi2_win_amd64_msvc_2015/site-packages/networkx/algorithms/approximation/tests/test_dominating_set.py
|
Python
|
bsd-2-clause
| 2,410
| 0
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPCs related to blockchainstate.
Test the following RPCs:
- gettxoutsetinfo
- getdifficulty
- getbestblockhash
- getblockhash
- getblockheader
- getchaintxstats
- getnetworkhashps
- verifychain
Tests correspond to code in rpc/blockchain.cpp.
"""
from decimal import Decimal
import http.client
import subprocess
from test_framework.test_framework import (BitcoinTestFramework, BITCOIND_PROC_WAIT_TIMEOUT)
from test_framework.util import (
assert_equal,
assert_raises,
assert_raises_jsonrpc,
assert_is_hex_string,
assert_is_hash_string,
)
class BlockchainTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = False
self.num_nodes = 1
self.extra_args = [['-stopatheight=207']]
def run_test(self):
self._test_getchaintxstats()
self._test_gettxoutsetinfo()
self._test_getblockheader()
self._test_getdifficulty()
self._test_getnetworkhashps()
self._test_stopatheight()
assert self.nodes[0].verifychain(4, 0)
def _test_getchaintxstats(self):
chaintxstats = self.nodes[0].getchaintxstats(1)
# 200 txs plus genesis tx
assert_equal(chaintxstats['txcount'], 201)
# tx rate should be 1 per 10 minutes, or 1/600
# we have to round because of binary math
assert_equal(round(chaintxstats['txrate'] * 600, 10), Decimal(1))
def _test_gettxoutsetinfo(self):
node = self.nodes[0]
res = node.gettxoutsetinfo()
assert_equal(res['total_amount'], Decimal('8725.00000000'))
assert_equal(res['transactions'], 200)
assert_equal(res['height'], 200)
assert_equal(res['txouts'], 200)
assert_equal(res['bogosize'], 17000),
assert_equal(res['bestblock'], node.getblockhash(200))
size = res['disk_size']
assert size > 6400
assert size < 64000
assert_equal(len(res['bestblock']), 64)
assert_equal(len(res['hash_serialized_2']), 64)
self.log.info("Test that gettxoutsetinfo() works for blockchain with just the genesis block")
b1hash = node.getblockhash(1)
node.invalidateblock(b1hash)
res2 = node.gettxoutsetinfo()
assert_equal(res2['transactions'], 0)
assert_equal(res2['total_amount'], Decimal('0'))
assert_equal(res2['height'], 0)
assert_equal(res2['txouts'], 0)
assert_equal(res2['bogosize'], 0),
assert_equal(res2['bestblock'], node.getblockhash(0))
assert_equal(len(res2['hash_serialized_2']), 64)
self.log.info("Test that gettxoutsetinfo() returns the same result after invalidate/reconsider block")
node.reconsiderblock(b1hash)
res3 = node.gettxoutsetinfo()
assert_equal(res['total_amount'], res3['total_amount'])
assert_equal(res['transactions'], res3['transactions'])
assert_equal(res['height'], res3['height'])
assert_equal(res['txouts'], res3['txouts'])
assert_equal(res['bogosize'], res3['bogosize'])
assert_equal(res['bestblock'], res3['bestblock'])
assert_equal(res['hash_serialized_2'], res3['hash_serialized_2'])
def _test_getblockheader(self):
node = self.nodes[0]
assert_raises_jsonrpc(-5, "Block not found",
node.getblockheader, "nonsense")
besthash = node.getbestblockhash()
secondbesthash = node.getblockhash(199)
header = node.getblockheader(besthash)
assert_equal(header['hash'], besthash)
assert_equal(header['height'], 200)
assert_equal(header['confirmations'], 1)
assert_equal(header['previousblockhash'], secondbesthash)
assert_is_hex_string(header['chainwork'])
assert_is_hash_string(header['hash'])
assert_is_hash_string(header['previousblockhash'])
assert_is_hash_string(header['merkleroot'])
assert_is_hash_string(header['bits'], length=None)
assert isinstance(header['time'], int)
assert isinstance(header['mediantime'], int)
assert isinstance(header['nonce'], int)
assert isinstance(header['version'], int)
assert isinstance(int(header['versionHex'], 16), int)
assert isinstance(header['difficulty'], Decimal)
def _test_getdifficulty(self):
difficulty = self.nodes[0].getdifficulty()
# 1 hash in 2 should be valid, so difficulty should be 1/2**31
# binary => decimal => binary math is why we do this check
assert abs(difficulty * 2**31 - 1) < 0.0001
def _test_getnetworkhashps(self):
hashes_per_second = self.nodes[0].getnetworkhashps()
# This should be 2 hashes every 10 minutes or 1/300
assert abs(hashes_per_second * 300 - 1) < 0.0001
def _test_stopatheight(self):
assert_equal(self.nodes[0].getblockcount(), 200)
self.nodes[0].generate(6)
assert_equal(self.nodes[0].getblockcount(), 206)
self.log.debug('Node should not stop at this height')
assert_raises(subprocess.TimeoutExpired, lambda: self.nodes[0].process.wait(timeout=3))
try:
self.nodes[0].generate(1)
except (ConnectionError, http.client.BadStatusLine):
pass # The node already shut down before response
self.log.debug('Node should stop at this height...')
self.nodes[0].process.wait(timeout=BITCOIND_PROC_WAIT_TIMEOUT)
self.nodes[0] = self.start_node(0, self.options.tmpdir)
assert_equal(self.nodes[0].getblockcount(), 207)
if __name__ == '__main__':
BlockchainTest().main()
|
kevcooper/bitcoin
|
test/functional/blockchain.py
|
Python
|
mit
| 5,880
| 0.00068
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder import flags
FLAGS = flags.FLAGS
flags.DECLARE('iscsi_num_targets', 'cinder.volume.driver')
flags.DECLARE('policy_file', 'cinder.policy')
flags.DECLARE('volume_driver', 'cinder.volume.manager')
flags.DECLARE('xiv_proxy', 'cinder.volume.xiv')
def_vol_type = 'fake_vol_type'
def set_defaults(conf):
conf.set_default('default_volume_type', def_vol_type)
conf.set_default('volume_driver', 'cinder.volume.driver.FakeISCSIDriver')
conf.set_default('connection_type', 'fake')
conf.set_default('fake_rabbit', True)
conf.set_default('rpc_backend', 'cinder.openstack.common.rpc.impl_fake')
conf.set_default('iscsi_num_targets', 8)
conf.set_default('verbose', True)
conf.set_default('sql_connection', "sqlite://")
conf.set_default('sqlite_synchronous', False)
conf.set_default('policy_file', 'cinder/tests/policy.json')
conf.set_default('xiv_proxy', 'cinder.tests.test_xiv.XIVFakeProxyDriver')
|
NewpTone/stacklab-cinder
|
cinder/tests/fake_flags.py
|
Python
|
apache-2.0
| 1,721
| 0
|
# -*- coding: utf-8 -*-
"""Define the base module for server test."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
from influxdb.tests import using_pypy
from influxdb.tests.server_tests.influxdb_instance import InfluxDbInstance
from influxdb.client import InfluxDBClient
if not using_pypy:
from influxdb.dataframe_client import DataFrameClient
def _setup_influxdb_server(inst):
inst.influxd_inst = InfluxDbInstance(
inst.influxdb_template_conf,
udp_enabled=getattr(inst, 'influxdb_udp_enabled', False),
)
inst.cli = InfluxDBClient('localhost',
inst.influxd_inst.http_port,
'root',
'',
database='db')
if not using_pypy:
inst.cliDF = DataFrameClient('localhost',
inst.influxd_inst.http_port,
'root',
'',
database='db')
def _teardown_influxdb_server(inst):
remove_tree = sys.exc_info() == (None, None, None)
inst.influxd_inst.close(remove_tree=remove_tree)
class SingleTestCaseWithServerMixin(object):
"""Define the single testcase with server mixin.
A mixin for unittest.TestCase to start an influxdb server instance
in a temporary directory **for each test function/case**
"""
# 'influxdb_template_conf' attribute must be set
# on the TestCase class or instance.
setUp = _setup_influxdb_server
tearDown = _teardown_influxdb_server
class ManyTestCasesWithServerMixin(object):
"""Define the many testcase with server mixin.
Same as the SingleTestCaseWithServerMixin but this module creates
a single instance for the whole class. Also pre-creates a fresh
database: 'db'.
"""
# 'influxdb_template_conf' attribute must be set on the class itself !
@classmethod
def setUpClass(cls):
"""Set up an instance of the ManyTestCasesWithServerMixin."""
_setup_influxdb_server(cls)
def setUp(self):
"""Set up an instance of the ManyTestCasesWithServerMixin."""
self.cli.create_database('db')
@classmethod
def tearDownClass(cls):
"""Deconstruct an instance of ManyTestCasesWithServerMixin."""
_teardown_influxdb_server(cls)
def tearDown(self):
"""Deconstruct an instance of ManyTestCasesWithServerMixin."""
self.cli.drop_database('db')
|
BenHewins/influxdb-python
|
influxdb/tests/server_tests/base.py
|
Python
|
mit
| 2,621
| 0
|
#!/usr/bin/env python
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
#
# mockredis
#
# This module helps start and stop redis instances for unit-testing
# redis must be pre-installed for this to work
#
import os
import signal
import subprocess
import logging
import socket
import time
import redis
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
redis_ver = '2.6.13'
redis_bdir = '/tmp/cache/' + os.environ['USER'] + '/systemless_test'
redis_url = redis_bdir + '/redis-'+redis_ver+'.tar.gz'
redis_exe = redis_bdir + '/bin/redis-server'
def install_redis():
if not os.path.exists(redis_url):
process = subprocess.Popen(['wget', '-P', redis_bdir,
'https://redis.googlecode.com/files/redis-'\
+ redis_ver + '.tar.gz'],
cwd=redis_bdir)
process.wait()
if process.returncode is not 0:
raise SystemError('wget '+redis_url)
if not os.path.exists(redis_bdir + '/redis-'+redis_ver):
process = subprocess.Popen(['tar', 'xzvf', redis_url],
cwd=redis_bdir)
process.wait()
if process.returncode is not 0:
raise SystemError('untar '+redis_url)
if not os.path.exists(redis_exe):
process = subprocess.Popen(['make', 'PREFIX=' + redis_bdir, 'install'],
cwd=redis_bdir + '/redis-'+redis_ver)
process.wait()
if process.returncode is not 0:
raise SystemError('install '+redis_url)
def get_redis_path():
if not os.path.exists(redis_exe):
install_redis()
return redis_exe
def redis_version():
'''
Determine redis-server version
'''
return 2.6
'''
command = "redis-server --version"
logging.info('redis_version call 1')
process = subprocess.Popen(command.split(' '), stdout=subprocess.PIPE)
logging.info('redis_version call 2')
output, _ = process.communicate()
if "v=2.6" in output[0]:
return 2.6
else:
return 2.4
'''
def start_redis(port, password=None):
'''
Client uses this function to start an instance of redis
Arguments:
cport : An unused TCP port for redis to use as the client port
'''
exe = get_redis_path()
version = redis_version()
if version == 2.6:
redis_conf = "redis.26.conf"
else:
redis_conf = "redis.24.conf"
conftemplate = os.path.dirname(os.path.abspath(__file__)) + "/" +\
redis_conf
redisbase = "/tmp/redis.%s.%d/" % (os.getenv('USER', 'None'), port)
output, _ = call_command_("rm -rf " + redisbase)
output, _ = call_command_("mkdir " + redisbase)
output, _ = call_command_("mkdir " + redisbase + "cache")
logging.info('Redis Port %d' % port)
output, _ = call_command_("cp " + conftemplate + " " + redisbase +
redis_conf)
replace_string_(redisbase + redis_conf,
[("/var/run/redis_6379.pid", redisbase + "pid"),
("port 6379", "port " + str(port)),
("/var/log/redis_6379.log", redisbase + "log"),
("/var/lib/redis/6379", redisbase + "cache")])
if password:
replace_string_(redisbase + redis_conf,[("# requirepass foobared","requirepass " + password)])
command = exe + " " + redisbase + redis_conf
subprocess.Popen(command.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
r = redis.StrictRedis(host='localhost', port=port, db=0, password=password)
done = False
start_wait = os.getenv('CONTRIAL_ANALYTICS_TEST_MAX_START_WAIT_TIME', 15)
cnt = 0
while not done:
try:
r.ping()
except:
cnt += 1
if cnt > start_wait:
logging.info('Redis Failed. Logs below: ')
with open(redisbase + "log", 'r') as fin:
logging.info(fin.read())
return False
logging.info('Redis not ready')
time.sleep(1)
else:
done = True
logging.info('Redis ready')
return True
def stop_redis(port, password=None):
'''
Client uses this function to stop an instance of redis
This will only work for redis instances that were started by this module
Arguments:
cport : The Client Port for the instance of redis to be stopped
'''
r = redis.StrictRedis(host='localhost', port=port, db=0, password=password)
r.shutdown()
del r
redisbase = "/tmp/redis.%s.%d/" % (os.getenv('USER', 'None'), port)
output, _ = call_command_("rm -rf " + redisbase)
def replace_string_(filePath, findreplace):
"replaces all findStr by repStr in file filePath"
print filePath
tempName = filePath + '~~~'
input = open(filePath)
output = open(tempName, 'w')
s = input.read()
for couple in findreplace:
outtext = s.replace(couple[0], couple[1])
s = outtext
output.write(outtext)
output.close()
input.close()
os.rename(tempName, filePath)
def call_command_(command):
process = subprocess.Popen(command.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return process.communicate()
if __name__ == "__main__":
cs = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
cs.bind(("", 0))
cport = cs.getsockname()[1]
cs.close()
start_redis(cport)
|
facetothefate/contrail-controller
|
src/analytics/test/utils/mockredis/mockredis/mockredis.py
|
Python
|
apache-2.0
| 5,634
| 0.002662
|
import web
from gothonweb import map
urls = (
'/game', 'GameEngine',
'/', 'Index',
)
app = web.application(urls, globals())
#little hack so that debug mode works with sessions
if web.config.get('_session') is None:
store = web.session.DiskStore('sessions')
session = web.session.Session(app, store,
initializer={'room':None})
web.config._session = session
else:
session = web.config._session
render = web.template.render('templates/', base="layout")
class Index(object):
def GET(self):
# this is used to "setup" the session with starting values
session.room = map.START
web.seeother("/game")
class GameEngine(object):
def GET(self):
if session.room:
return render.show_room(room=session.room)
# else:
# # why is there here? do you need it?
# return render.you_died()
def POST(self):
form = web.input(action=None)
if session.room:
session.room = session.room.go(form.action)
web.seeother("/game")
if __name__ == "__main__":
app.run()
|
githubfun/lphw
|
gothonweb/bin/gothon_app.py
|
Python
|
mit
| 1,131
| 0.005305
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides a web interface for dumping graph data as JSON.
This is meant to be used with /load_from_prod in order to easily grab
data for a graph to a local server for testing.
"""
import base64
import json
from google.appengine.ext import ndb
from google.appengine.ext.ndb import model
from dashboard.common import request_handler
from dashboard.common import utils
from dashboard.models import anomaly
from dashboard.models import graph_data
_DEFAULT_MAX_POINTS = 500
# This is about the limit we want to return since we fetch many associated
# entities for each anomaly.
_DEFAULT_MAX_ANOMALIES = 30
class DumpGraphJsonHandler(request_handler.RequestHandler):
"""Handler for extracting entities from datastore."""
def get(self):
"""Handles dumping dashboard data."""
if self.request.get('sheriff'):
self._DumpAnomalyDataForSheriff()
elif self.request.get('test_path'):
self._DumpTestData()
else:
self.ReportError('No parameters specified.')
def _DumpTestData(self):
"""Dumps data for the requested test.
Request parameters:
test_path: A single full test path, including master/bot.
num_points: Max number of Row entities (optional).
end_rev: Ending revision number, inclusive (optional).
Outputs:
JSON array of encoded protobuf messages, which encode all of
the datastore entities relating to one test (including Master, Bot,
TestMetadata, Row, Anomaly and Sheriff entities).
"""
test_path = self.request.get('test_path')
num_points = int(self.request.get('num_points', _DEFAULT_MAX_POINTS))
end_rev = self.request.get('end_rev')
test_key = utils.TestKey(test_path)
if not test_key or test_key.kind() != 'TestMetadata':
# Bad test_path passed in.
self.response.out.write(json.dumps([]))
return
# List of datastore entities that will be dumped.
entities = []
entities.extend(self._GetTestAncestors([test_key]))
# Get the Row entities.
q = graph_data.Row.query()
print test_key
q = q.filter(graph_data.Row.parent_test == utils.OldStyleTestKey(test_key))
if end_rev:
q = q.filter(graph_data.Row.revision <= int(end_rev))
q = q.order(-graph_data.Row.revision)
entities += q.fetch(limit=num_points)
# Get the Anomaly and Sheriff entities.
alerts = anomaly.Anomaly.GetAlertsForTest(test_key)
sheriff_keys = {alert.sheriff for alert in alerts}
sheriffs = [sheriff.get() for sheriff in sheriff_keys]
entities += alerts
entities += sheriffs
# Convert the entities to protobuf message strings and output as JSON.
protobuf_strings = map(EntityToBinaryProtobuf, entities)
self.response.out.write(json.dumps(protobuf_strings))
def _DumpAnomalyDataForSheriff(self):
"""Dumps Anomaly data for all sheriffs.
Request parameters:
sheriff: Sheriff name.
num_points: Max number of Row entities (optional).
num_alerts: Max number of Anomaly entities (optional).
Outputs:
JSON array of encoded protobuf messages, which encode all of
the datastore entities relating to one test (including Master, Bot,
TestMetadata, Row, Anomaly and Sheriff entities).
"""
sheriff_name = self.request.get('sheriff')
num_points = int(self.request.get('num_points', _DEFAULT_MAX_POINTS))
num_anomalies = int(self.request.get('num_alerts', _DEFAULT_MAX_ANOMALIES))
sheriff = ndb.Key('Sheriff', sheriff_name).get()
if not sheriff:
self.ReportError('Unknown sheriff specified.')
return
anomalies = self._FetchAnomalies(sheriff, num_anomalies)
test_keys = [a.GetTestMetadataKey() for a in anomalies]
# List of datastore entities that will be dumped.
entities = []
entities.extend(self._GetTestAncestors(test_keys))
# Get the Row entities.
entities.extend(self._FetchRowsAsync(test_keys, num_points))
# Add the Anomaly and Sheriff entities.
entities += anomalies
entities.append(sheriff)
# Convert the entities to protobuf message strings and output as JSON.
protobuf_strings = map(EntityToBinaryProtobuf, entities)
self.response.out.write(json.dumps(protobuf_strings))
def _GetTestAncestors(self, test_keys):
"""Gets the TestMetadata, Bot, and Master entities preceding in path."""
entities = []
added_parents = set()
for test_key in test_keys:
if test_key.kind() != 'TestMetadata':
continue
parts = utils.TestPath(test_key).split('/')
for index, _, in enumerate(parts):
test_path = '/'.join(parts[:index + 1])
if test_path in added_parents:
continue
added_parents.add(test_path)
if index == 0:
entities.append(ndb.Key('Master', parts[0]).get())
elif index == 1:
entities.append(ndb.Key('Master', parts[0], 'Bot', parts[1]).get())
else:
entities.append(ndb.Key('TestMetadata', test_path).get())
return [e for e in entities if e is not None]
def _FetchRowsAsync(self, test_keys, num_points):
"""Fetches recent Row asynchronously across all 'test_keys'."""
rows = []
futures = []
for test_key in test_keys:
q = graph_data.Row.query()
q = q.filter(
graph_data.Row.parent_test == utils.OldStyleTestKey(test_key))
q = q.order(-graph_data.Row.revision)
futures.append(q.fetch_async(limit=num_points))
ndb.Future.wait_all(futures)
for future in futures:
rows.extend(future.get_result())
return rows
def _FetchAnomalies(self, sheriff, num_anomalies):
"""Fetches recent anomalies for 'sheriff'."""
q = anomaly.Anomaly.query(
anomaly.Anomaly.sheriff == sheriff.key)
q = q.order(-anomaly.Anomaly.timestamp)
return q.fetch(limit=num_anomalies)
def EntityToBinaryProtobuf(entity):
"""Converts an ndb entity to a protobuf message in binary format."""
# Encode in binary representation of the protocol buffer.
message = ndb.ModelAdapter().entity_to_pb(entity).Encode()
# Base64 encode the data to text format for json.dumps.
return base64.b64encode(message)
def BinaryProtobufToEntity(pb_str):
"""Converts a protobuf message in binary format to an ndb entity.
Args:
pb_str: Binary encoded protocol buffer which is encoded as text.
Returns:
A ndb Entity.
"""
message = model.entity_pb.EntityProto(base64.b64decode(pb_str))
return ndb.ModelAdapter().pb_to_entity(message)
|
benschmaus/catapult
|
dashboard/dashboard/dump_graph_json.py
|
Python
|
bsd-3-clause
| 6,621
| 0.005437
|
# Copyright 2016 Casey Jaymes
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PySCAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PySCAP. If not, see <http://www.gnu.org/licenses/>.
import logging
from scap.model.oval_5.sc.EntityItemType import EntityItemType
logger = logging.getLogger(__name__)
class EntityItemEncryptMethodType(EntityItemType):
MODEL_MAP = {
'elements': [
],
'attributes': {
},
}
def get_value_enum(self):
return [
'DES',
'BSDi',
'MD5',
'Blowfish',
'Sun MD5',
'SHA-256',
'SHA-512',
'',
]
|
cjaymes/pyscap
|
src/scap/model/oval_5/sc/unix/EntityItemEncryptMethodType.py
|
Python
|
gpl-3.0
| 1,167
| 0.001714
|
from setuptools import setup, find_packages
setup(
name='zeit.content.gallery',
version='2.9.2.dev0',
author='gocept, Zeit Online',
author_email='zon-backend@zeit.de',
url='http://www.zeit.de/',
description="vivi Content-Type Portraitbox",
packages=find_packages('src'),
package_dir={'': 'src'},
include_package_data=True,
zip_safe=False,
license='BSD',
namespace_packages=['zeit', 'zeit.content'],
install_requires=[
'cssselect',
'Pillow',
'gocept.form',
'setuptools',
'zeit.cms >= 3.0.dev0',
'zeit.connector>=2.4.0.dev0',
'zeit.imp>=0.15.0.dev0',
'zeit.content.image',
'zeit.push>=1.21.0.dev0',
'zeit.wysiwyg',
'zope.app.appsetup',
'zope.app.testing',
'zope.component',
'zope.formlib',
'zope.interface',
'zope.publisher',
'zope.security',
'zope.testing',
],
entry_points={
'fanstatic.libraries': [
'zeit_content_gallery=zeit.content.gallery.browser.resources:lib',
],
},
)
|
ZeitOnline/zeit.content.gallery
|
setup.py
|
Python
|
bsd-3-clause
| 1,113
| 0
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014, 2015, 2016 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import arrow
import logging
import re
import requests
import json
from os.path import splitext
from io import BytesIO
from eve.io.base import DataLayer
from eve_elastic.elastic import ElasticCursor
from superdesk.upload import url_for_media
from superdesk.errors import SuperdeskApiError, ProviderError
from superdesk.media.media_operations import process_file_from_stream, decode_metadata
from superdesk.media.renditions import generate_renditions, delete_file_on_error, get_renditions_spec
from superdesk.metadata.item import ITEM_TYPE, CONTENT_TYPE
from superdesk.utc import utcnow, get_date, local_to_utc
import mimetypes
# scanpix preview size to use (if available) for superdesk rendition
# preview sizes are in order of preference, first found is used
REND2PREV = {
'thumbnail': ('generated_jpg', 'thumbnail', 'thumbnail_big'),
'viewImage': ('preview', 'thumbnail_big', 'thumbnail', 'preview_big'),
'baseImage': ('mp4_preview', 'mp4_thumbnail', 'preview_big', 'preview', 'thumbnail_big', 'thumbnail')}
logger = logging.getLogger('ntb:scanpix')
# Default timezone used to convert datetimes from scanpix api results to utc
SCANPIX_TZ = 'Europe/Oslo'
def extract_params(query, names):
if isinstance(names, str):
names = [names]
findall = re.findall(r'([\w]+):\(([-\w\s*]+)\)', query)
params = {name: value for (name, value) in findall if name in names}
for name, value in findall:
query = query.replace('%s:(%s)' % (name, value), '')
query = query.strip()
# escape dashes
for name, value in params.items():
params[name] = value.replace('-', r'\-')
if query:
params['q'] = query
return params
class ScanpixDatalayer(DataLayer):
def set_credentials(self, user, password):
self._user = user
self._password = password
def init_app(self, app):
app.config.setdefault('SCANPIX_SEARCH_URL', 'http://api.scanpix.no/v2')
self._app = app
self._user = None
self._password = None
self._headers = {
'Content-Type': 'application/json',
}
def fetch_file(self, url):
"""Get file stream for given image url.
It will fetch the file using predefined auth token.
:param url: pa image api url
"""
raise NotImplementedError
def find(self, resource, req, lookup):
"""
Called to execute a search against the Scanpix API. It attempts to translate the search request
passed in req to a suitable form for a search request against the API. It parses the response into a
suitable ElasticCursor.
:param resource:
:param req:
:param lookup:
:return:
"""
url = self._app.config['SCANPIX_SEARCH_URL'] + '/search'
data = {
'mainGroup': 'any'
}
if 'query' in req['query']['filtered']:
query = req['query']['filtered']['query']['query_string']['query'] \
.replace('slugline:', 'keywords:') \
.replace('description:', 'caption:')
# Black & White
try:
bw = bool(int(extract_params(query, 'bw')['bw']))
except KeyError:
pass
else:
if bw:
data['saturation'] = {'max': 1}
# Clear Edge
try:
clear_edge = bool(int(extract_params(query, 'clear_edge')['clear_edge']))
except KeyError:
pass
else:
if clear_edge:
data['clearEdge'] = True
text_params = extract_params(query, ('headline', 'keywords', 'caption', 'text'))
# combine all possible text params to use the q field.
data['searchString'] = ' '.join(text_params.values())
try:
ids = extract_params(query, 'id')['id'].split()
except KeyError:
pass
else:
data['refPtrs'] = ids
# subscription
data['subscription'] = 'subscription' # this is requested as a default value
# data['subscription'] is always equal to 'subscription', but we keep the test in case
# of the behaviour is changed again in the future.
if 'ntbtema' in resource and data['subscription'] == 'subscription':
# small hack for SDNTB-250
data['subscription'] = 'punchcard'
for criterion in req.get('post_filter', {}).get('and', {}):
if 'range' in criterion:
start = None
end = None
filter_data = criterion.get('range', {})
if 'firstcreated' in filter_data:
created = criterion['range']['firstcreated']
if 'gte' in created:
start = created['gte'][0:10]
if 'lte' in created:
end = created['lte'][0:10]
# if there is a special start and no end it's one of the date buttons
if start and not end:
if start == 'now-24H':
data['timeLimit'] = 'last24'
if start == 'now-1w':
data['timeLimit'] = 'lastweek'
if start == 'now-1M':
data['timeLimit'] = 'lastmonth'
elif start or end:
data['archived'] = {
'min': '',
'max': ''
}
if start:
data['archived']['min'] = start
if end:
data['archived']['max'] = end
if 'terms' in criterion:
if 'type' in criterion.get('terms', {}):
type_ = criterion['terms']['type']
if type_ == CONTENT_TYPE.VIDEO:
data['mainGroup'] = 'video'
offset, limit = int(req.get('from', '0')), max(10, int(req.get('size', '25')))
data['offset'] = offset
data['showNumResults'] = limit
r = self._request(url, data, resource)
hits = self._parse_hits(r.json())
return ElasticCursor(docs=hits['docs'], hits={'hits': hits})
def _request(self, url, data, resource):
"""Perform GET request to given url.
It adds predefined headers and auth token if available.
:param url
:param data
"""
r = requests.post(url, data=json.dumps(data), headers=self._headers, auth=(self._user, self._password))
if r.status_code < 200 or r.status_code >= 300:
logger.error('error fetching url=%s status=%s content=%s' % (url, r.status_code, r.content or ''))
raise ProviderError.externalProviderError("Scanpix request can't be performed", provider={'name': resource})
return r
def _parse_doc(self, doc):
new_doc = {}
new_doc['_id'] = doc['refPtr']
new_doc['guid'] = doc['refPtr']
try:
new_doc['description_text'] = doc['caption']
except KeyError:
pass
try:
new_doc['headline'] = doc['headline']
except KeyError:
pass
try:
new_doc['original_source'] = new_doc['source'] = doc['credit']
except KeyError:
pass
new_doc['versioncreated'] = new_doc['firstcreated'] = self._datetime(
local_to_utc(SCANPIX_TZ, get_date(doc['archivedTime']))
)
new_doc['pubstatus'] = 'usable'
# This must match the action
new_doc['_type'] = 'externalsource'
# entry that the client can use to identify the fetch endpoint
new_doc['fetch_endpoint'] = 'scanpix'
# mimetype is not directly found in Scanpix API
# so we use original filename to guess it
mimetype = mimetypes.guess_type("_{}".format(splitext(doc.get('originalFileName', ''))[1]))[0]
if mimetype is None:
# nothing found with filename, we try out luck with fileFormat
try:
format_ = doc['fileFormat'].split()[0]
except (KeyError, IndexError):
mimetype = None
else:
mimetype = mimetypes.guess_type('_.{}'.format(format_))[0]
if mimetype is not None:
new_doc['mimetype'] = mimetype
main_group = doc['mainGroup']
if main_group == 'video':
new_doc[ITEM_TYPE] = CONTENT_TYPE.VIDEO
elif main_group == 'graphic':
new_doc[ITEM_TYPE] = CONTENT_TYPE.GRAPHIC
new_doc['mimetype'] = 'image/jpeg'
else:
new_doc[ITEM_TYPE] = CONTENT_TYPE.PICTURE
try:
doc_previews = doc['previews']
except KeyError:
logger.warning('no preview found for item {}'.format(new_doc['_id']))
else:
# we look for best available scanpix preview
available_previews = [p['type'] for p in doc_previews]
renditions = new_doc['renditions'] = {}
for rend, previews in REND2PREV.items():
for prev in previews:
if prev in available_previews:
idx = available_previews.index(prev)
renditions[rend] = {"href": doc_previews[idx]['url']}
break
new_doc['byline'] = doc['byline']
doc.clear()
doc.update(new_doc)
def _parse_hits(self, hits):
hits['docs'] = hits.pop('data')
hits['total'] = hits.pop('numResults')
for doc in hits['docs']:
self._parse_doc(doc)
return hits
def _datetime(self, string):
try:
return arrow.get(string).datetime
except Exception:
return utcnow()
def find_all(self, resource, max_results=1000):
raise NotImplementedError
def find_one(self, resource, req, **lookup):
raise NotImplementedError
def find_one_raw(self, resource, _id):
# XXX: preview is used here instead of paid download
# see SDNTB-15
data = {}
url = self._app.config['SCANPIX_SEARCH_URL'] + '/search'
data['refPtrs'] = [_id]
r = self._request(url, data, resource)
doc = r.json()['data'][0]
self._parse_doc(doc)
url = doc['renditions']['baseImage']['href']
# if MIME type can't be guessed, we default to jpeg
mime_type = mimetypes.guess_type(url)[0] or 'image/jpeg'
r = self._request(url, data, resource)
out = BytesIO(r.content)
file_name, content_type, metadata = process_file_from_stream(out, mime_type)
logger.debug('Going to save media file with %s ' % file_name)
out.seek(0)
try:
file_id = self._app.media.put(out, filename=file_name, content_type=content_type, metadata=None)
except Exception as e:
logger.exception(e)
raise SuperdeskApiError.internalError('Media saving failed')
else:
try:
inserted = [file_id]
doc['mimetype'] = content_type
doc['filemeta'] = decode_metadata(metadata)
# set the version created to now to bring it to the top of the desk, images can be quite old
doc['versioncreated'] = utcnow()
file_type = content_type.split('/')[0]
rendition_spec = get_renditions_spec()
renditions = generate_renditions(out, file_id, inserted, file_type,
content_type, rendition_spec,
url_for_media, insert_metadata=False)
doc['renditions'] = renditions
except (IndexError, KeyError, json.JSONDecodeError) as e:
logger.exception("Internal error: {}".format(e))
delete_file_on_error(doc, file_id)
raise SuperdeskApiError.internalError('Generating renditions failed')
return doc
def find_list_of_ids(self, resource, ids, client_projection=None):
raise NotImplementedError
def insert(self, resource, docs, **kwargs):
raise NotImplementedError
def update(self, resource, id_, updates, original):
raise NotImplementedError
def update_all(self, resource, query, updates):
raise NotImplementedError
def replace(self, resource, id_, document, original):
raise NotImplementedError
def remove(self, resource, lookup=None):
raise NotImplementedError
def is_empty(self, resource):
raise NotImplementedError
|
ioanpocol/superdesk-ntb
|
server/ntb/scanpix/scanpix_datalayer.py
|
Python
|
agpl-3.0
| 13,055
| 0.001762
|
"""Code for CLI base"""
import logging
import pathlib
import click
import coloredlogs
import yaml
from flask.cli import FlaskGroup, with_appcontext
# General, logging
from scout import __version__
from scout.commands.convert import convert
from scout.commands.delete import delete
from scout.commands.download import download as download_command
from scout.commands.export import export
from scout.commands.index_command import index as index_command
# Commands
from scout.commands.load import load as load_command
from scout.commands.serve import serve
from scout.commands.setup import setup as setup_command
from scout.commands.update import update as update_command
from scout.commands.view import view as view_command
from scout.commands.wipe_database import wipe
from scout.server.app import create_app
LOG_LEVELS = ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]
LOG = logging.getLogger(__name__)
@click.pass_context
def loglevel(ctx):
"""Set app cli log level"""
log_level = ctx.find_root().params.get("loglevel")
log_format = None
coloredlogs.install(level=log_level, fmt=log_format)
LOG.info("Running scout version %s", __version__)
LOG.debug("Debug logging enabled.")
@click.pass_context
def get_app(ctx=None):
"""Create an app with the correct config or with default app params"""
loglevel() # Set up log level even before creating the app object
# store provided params into a options variable
options = ctx.find_root()
cli_config = {}
# if a .yaml config file was provided use its params to intiate the app
if options.params.get("config"):
with open(options.params["config"], "r") as in_handle:
cli_config = yaml.load(in_handle, Loader=yaml.SafeLoader)
flask_conf = None
if options.params.get("flask_config"):
flask_conf = pathlib.Path(options.params["flask_config"]).absolute()
if options.params.get("demo"):
cli_config["demo"] = "scout-demo"
try:
app = create_app(
config=dict(
MONGO_DBNAME=options.params.get("mongodb")
or cli_config.get("demo")
or cli_config.get("mongodb")
or "scout",
MONGO_HOST=options.params.get("host") or cli_config.get("host"),
MONGO_PORT=options.params.get("port") or cli_config.get("port"),
MONGO_USERNAME=options.params.get("username") or cli_config.get("username"),
MONGO_PASSWORD=options.params.get("password") or cli_config.get("password"),
MONGO_URI=options.params.get("mongo_uri") or cli_config.get("mongo_uri"),
OMIM_API_KEY=cli_config.get("omim_api_key"),
),
config_file=flask_conf,
)
except SyntaxError as err:
LOG.error(err)
raise click.Abort
return app
@click.version_option(__version__)
@click.group(
cls=FlaskGroup,
create_app=get_app,
invoke_without_command=True,
add_default_commands=False,
add_version_option=False,
)
@click.option(
"-c",
"--config",
type=click.Path(exists=True),
help="Path to a YAML config file with database info.",
)
@click.option(
"--loglevel",
default="DEBUG",
type=click.Choice(LOG_LEVELS),
help="Set the level of log output.",
show_default=True,
)
@click.option("--demo", is_flag=True, help="If the demo database should be used")
@click.option("-db", "--mongodb", help="Name of mongo database [scout]")
@click.option("-uri", "--mongo-uri", help="MongoDB connection string")
@click.option("-u", "--username")
@click.option("-p", "--password")
@click.option("-a", "--authdb", help="database to use for authentication")
@click.option("-port", "--port", help="Specify on what port to listen for the mongod")
@click.option("-h", "--host", help="Specify the host for the mongo database.")
@click.option(
"-f",
"--flask-config",
type=click.Path(exists=True),
help="Path to a PYTHON config file",
)
@with_appcontext
def cli(**_):
"""scout: manage interactions with a scout instance."""
cli.add_command(load_command)
cli.add_command(wipe)
cli.add_command(setup_command)
cli.add_command(delete)
cli.add_command(export)
cli.add_command(convert)
cli.add_command(index_command)
cli.add_command(view_command)
cli.add_command(update_command)
cli.add_command(download_command)
cli.add_command(serve)
|
Clinical-Genomics/scout
|
scout/commands/base.py
|
Python
|
bsd-3-clause
| 4,404
| 0.001589
|
from distutils.core import setup
setup(
name="kafka-python",
version="0.1-alpha",
author="David Arthur",
author_email="mumrah@gmail.com",
url="https://github.com/mumrah/kafka-python",
packages=["kafka"],
license="Copyright 2012, David Arthur under Apache License, v2.0",
description="Pure Python client for Apache Kafka",
long_description=open("README.md").read(),
)
|
enoex/kafka-python
|
setup.py
|
Python
|
apache-2.0
| 404
| 0
|
import logging
import signal
import socket
import configparser
import importlib.machinery
import serial
import copy
import zmq
class Dispatcher(object):
""" Superclass for all Dispatchers.
This is the part of the simulator that handles the connections.
"""
def __init__(self, dispatcher_type, dispatcher_id):
self.name = name
self.dispatcher_id = dispatcher_id
self.call_backs = {}
self.go_on = True
logger = logging.getLogger('{0}_simulator'
.format(dispatcher_type))
logger.setLevel(logging.INFO)
logfile = '/tmp/test.log'
filehandler = logging.FileHandler(logfile)
filehandler.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
filehandler.setFormatter(formatter)
logger.addHandler(filehandler)
self.logger = logger
self.context = zmq.Context(1)
def control_c_handler():
"""Controlled shutdown so we can cleanup."""
self.go_on = False
return False
def create_sockets(self, accept_socket):
# Open a socket to listen for commands from the scenario player
address = "tcp://*:{0}".format(self.command_listen_port)
self.logger.info("Command subscription at {0}".format(address))
command_socket = self.context.socket(zmq.SUB)
command_socket.bind(address)
command_socket.setsockopt(zmq.SUBSCRIBE, "")
# Add the sockets to the zmq poller.
self.poller = zmq.Poller()
self.poller.register(accept_socket, zmq.POLLIN)
self.poller.register(command_socket, zmq.POLLIN)
# Register the call backs.
self.call_backs[accept_socket.fileno()] = (accept_socket, self.accept)
self.call_backs[command_socket] = (command_socket,
self.process_player_command)
# Not part of the poller
# Message forwarding link to player
address = "tcp://*:{0}".format(self.message_forward_port)
self.logger.info("Publishing on " + address)
self.repeater_socket = self.context.socket(zmq.PUB)
self.repeater_socket.bind(address)
def accept(self, a_socket):
"""Accept a connection from the system.
"""
system_socket, address = a_socket.accept()
self.logger.info('Connection from ' + str(address))
# Register this socket too so we look for incoming data
self.poller.register(system_socket, zmq.POLLIN)
self.call_backs[system_socket.fileno()] = (
system_socket, self.process_message)
self.system_socket = system_socket
def process_player_command(self, a_socket):
""" Process a command from the scenario player.
"""
# receive the command
command = a_socket.recv_pyobj()
self.logger.info('received command from scenario player: {0}'
.format(type(command)))
self.system_socket.send(self.message.to_message(command))
def process_message(self, a_socket):
""" Receive and forward a message from the system """
self.logger.info( 'Data from the system' )
# We do not know beforehand how big the blob is.
data = a_socket.recv( 2048 )
if data == "" :
# Connection was closed, so unregister and close the socket.
self.poller.unregister(a_socket)
del self.call_backs[a_socket.fileno()]
a_socket.close()
self.system_socket = None
else :
a_message = message.from_message(data)
self.logger.info('Copying data to player')
self.repeater_socket.send_pyobj(a_message)
def run(self):
# Catch any Control-C
signal.signal(signal.SIGINT, self.control_c_handler)
self.create_sockets()
while self.go_on :
# Note that poller uses fileno() as the key for non-zmq sockets.
socks = dict(self.poller.poll(60000)) # Timeout in ms, 1 minute
for socket_key in self.call_backs.copy() :
# Need copy here cause we might modify the call_backs
# while in the call back functions.
if socket_key in socks and socks[socket_key] == zmq.POLLIN:
if socket_key in self.call_backs:
cbp = self.call_backs[socket_key]
function = cbp[1]
function(cbp[0])
self.logger.info("Still alive")
self.run(socks)
self.logger.info("Stopping")
self.context.term()
#------------------------------------------------------------------------------
class TCPDispatcher(Dispatcher):
""" Dispatcher subclass for TCP connections"""
def __init__(self, dispatcher_type, dispatcher_id):
Dispatcher.__init__(self, name, dispatcher_id)
config = configparser.ConfigParser()
config.read('simulator.conf')
dispatcher_section = ('dispatcher-{0}-{1}'
.format(dispatcher_type, dispatcher_id))
if (dispatcher_section) in config.sections():
entries = config[dispatcher_section]
# path to the message class
self.message_path = entries['MessagePath']
if message_path is not None:
loader = importlib.machinery.SourceFileLoader('message',
message_path)
message_module = loader.exec_module('message')
message = message_module.Message()
# address and port to listen on for messages from the system
self.accept_address = entries['AcceptAddress']
self.listen_port = entries['ListenPort']
# port to listen on for commands from the player.
self.command_listen_port = entries['CommandListenPort']
# port to forward messages to the player.
self.message_forward_port = entries['MessageForwardPort']
else:
self.logger.critical('no valid tcp section found in config file')
def create_sockets(self):
""" Create the TCP sockets between the system and the
Scenario player
"""
self.logger.info('Creating sockets for {0} {1}'
.format(self.name, self.dispatcher_id))
# Open a tcp socket to listen for new connections
# from the system.
self.logger.info("Listening on address {0}"
.format(str(self.accept_address)))
self.logger.info("Listening on port {0}".format(str(self.listen_port)))
accept_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
accept_socket.bind((self.accept_address, self.port))
# Only handle one connection at a time.
accept_socket.listen(1)
# Let the superclass finish the creation of the rest of the
# sockets, because it is the same.
Dispatcher.create_sockets(self, accept_socket)
def run(self):
# TCP dispatcher has no extra steps to add to the default loop.
# We will just exit this method.
pass
#------------------------------------------------------------------------------
class SerialDispatcher(Dispatcher):
""" Dispatcher subclass for Serial connections"""
SERIAL_PARITY = {'none':serial.PARITY_NONE , 'even':serial.PARITY_EVEN ,
'odd':serial.PARITY_ODD , 'mark':serial.PARITY_MARK ,
'space':serial.PARITY_SPACE}
SERIAL_STOPBITS= {'one':serial.STOPBITS_ONE ,
'onePointFive': serial.STOPBITS_ONE_POINT_FIVE,
'two':serial.STOPBITS_TWO }
default_timeout = 60000
def __init__(self, dispatcher_type, dispatcher_id):
Dispatcher.__init__(self, dispatcher_type, dispatcher_id)
self.repeater_socket = None
self.poller = None
self.call_backs = None
self.serial_link = None
self.timeout = default_timeout
self.receiving = False
self.blob = ""
config = configparser.ConfigParser()
config.read('simulator.conf')
dispatcher_section = ('dispatcher-{0}-{1}'
.format(dispatcher_type, dispatcher_id))
if (dispatcher_section) in config.sections():
entries = config[dispatcher_section]
# path to the message class
self.message_path = entries['MessagePath']
if message_path is not None:
loader = importlib.machinery.SourceFileLoader('message',
message_path)
message_module = loader.exec_module('message')
message = message_module.Message()
# Settings for the serial link to the system.
self.serial_device = entries['Device']
self.serial_baudrate = int(entries['BaudRate'])
self.serial_bytesize = int(entries['ByteSize'])
self.serial_parity = SERIAL_PARITY.get(entries['Parity'])
self.serial_stopbits = SERIAL_STOPBITS.get(entries['StopBits'])
# port to listen on for commands from the player.
self.command_listen_port = entries['CommandListenPort']
# port to forward messages to the player.
self.message_forward_port = entries['MessageForwardPort']
else:
self.logger.critical('no valid serial section '
'found in config file')
def create_sockets(self):
""" Create the socket to the scenario player and set up the
serial link to the system
"""
self.logger.info('Creating sockets for {0} {1}'
.format(self.name, self.dispatcher_id))
# Setup a serial link to listen to the system
self.logger.info("Opening serial device {0} ".format(serial_device))
self.serial_link = serial.Serial(serial_device, serial_baudrate,
serial_parity, serial_bytesize,
serial_stopbits)
# Open a socket to listen for commands from the scenario player
address = "tcp://*:{0}".format(self.command_listen_port)
self.logger.info("Command subscription at {0}".format(address))
command_socket = self.context.socket(zmq.SUB)
command_socket.bind(address)
command_socket.setsockopt(zmq.SUBSCRIBE, "")
# Add the sockets to the zmq poller.
self.poller = zmq.Poller()
if(self.serial_link):
self.poller.register(self.serial_link, zmq_POLLIN)
# Register callback
self.call_backs[self.serial_link.fileno()] = (self.serial_link,
self.read_message)
self.poller.register(command_socket, zmq.POLLIN)
# Register the call backs.
self.call_backs[command_socket] = (command_socket,
self.process_player_command)
# Not part of the poller
# Message forwarding link to player
address = "tcp://*:{0}".format(self.message_forward_port)
self.logger.info("Publishing on " + address)
self.repeater_socket = self.context.socket(zmq.PUB)
self.repeater_socket.bind(address)
def read_message(self, link):
"""Read one or more bytes from the system
"""
# We do not know beforehand how big the blob is and data might come in
# parts, mostly one character at a time, sometimes a few more.
blob = handle.read()
self.blob += blob
# Set timeout to a low value. We should receive a new byte within
# this period otherwise we assume it is the end of the message. If we
# make this too high, there will be a delay in processing the message.
# The baud rate is 57600 so a single character takes
# 8/57600 == 0.000138 seconds == 0.138 milliseconds
# So 10ms should be enough.
self.timeout = 10 # in ms
self.receiving = True
def process_message(self):
"""Receive and forward a message from the system.
"""
self.logger.info('Received a full message from the system')
self.logger.info(",".join(map(lambda x: hex(ord(x)), self.blob)))
a_message = self.message.from_message( self.blob )
self.logger.info('Copying data to player')
self.repeater_socket.send_pyobj(a_message)
self.blob = ""
def process_player_command(self, a_socket):
""" Process a command from the scenario player.
"""
# receive the command
command = a_socket.recv_pyobj()
self.logger.info('received command from scenario player: {0}'
.format(type(command)))
self.serial_link.write(self.message.to_message(command))
def run(self, socks):
if len(socks) == 0 and self.receiving :
# We were in the process of receiving data from OBIS.
# We did not receive any new bytes, so we assume it's
# the end of the message.
self.process_message()
self.receiving = False
# Set timeout back to a high value, so we not waste CPU
# cycles.
self.timeout = default_timeout
self.blob = "" # Reset the message buffer
elif len(socks) == 0 and self.timeout == default_timeout :
self.logger.info("Nothing happened for a long time.")
else:
pass
self.logger.info('Stopping')
self.context.term()
#------------------------------------------------------------------------------
class UDPDispatcher(Dispatcher):
""" Dispatcher subclass for UDP connections"""
def __init__(self, dispatcher_type, dispatcher_id):
Dispatcher.__init__(self, dispatcher_type, dispatcher_id)
config = configparser.ConfigParser()
config.read('simulator.conf')
dispatcher_section = ('dispatcher-{0}-{1}'
.format(dispatcher_type, dispatcher_id))
if (dispatcher_section) in config.sections():
entries = config[dispatcher_section]
# path to the message class
self.message_path = entries['MessagePath']
if message_path is not None:
loader = importlib.machinery.SourceFileLoader('message',
message_path)
message_module = loader.exec_module('message')
message = message_module.Message()
# address and port to listen on for messages from the system
self.accept_address = entries['AcceptAddress']
self.listen_port = entries['ListenPort']
# port to listen on for commands from the player.
self.command_listen_port = entries['CommandListenPort']
# port to forward messages to the player.
self.message_forward_port = entries['MessageForwardPort']
else:
self.logger.critical('no valid udp section found in config file')
def create_sockets(self):
""" Create the UDP sockets between the system and the
Scenario player
"""
self.logger.info('Creating sockets for {0} {1}'
.format(self.name, self.dispatcher_id))
# Open an UDP socket to listen for new connections
# from the system.
self.logger.info("Listening on address {0}"
.format(str(self.accept_address)))
self.logger.info("Listening on port {0}".format(str(self.listen_port)))
address = "udp://{0}:{1}".format(accept_address, listen_port)
accept_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
accept_socket.bind((self.accept_address, self.listen_port))
# Let the superclass finish the creation of the rest of the
# sockets, because it is the same.
Dispatcher.create_sockets(self, accept_socket)
def run(self):
pass
#------------------------------------------------------------------------------
class HttpDispatcher(Dispatcher):
""" Dispatcher subclass for Http connections"""
def __init__(self, dispatcher_type, dispatcher_id):
Dispatcher.__init__(self, dispatcher_type, dispatcher_id)
config = configparser.ConfigParser()
config.read('simulator.conf')
dispatcher_section = ('dispatcher-{0}-{1}'
.format(dispatcher_type, dispatcher_id))
if (dispatcher_section) in config.sections():
entries = config[dispatcher_section]
# path to the message class
self.message_path = entries['MessagePath']
if message_path is not None:
loader = importlib.machinery.SourceFileLoader('message',
message_path)
message_module = loader.exec_module('message')
message = message_module.Message()
# address and port to listen on for messages from the system
self.accept_address = entries['AcceptAddress']
self.listen_port = entries['ListenPort']
# port to listen on for commands from the player.
self.command_listen_port = entries['CommandListenPort']
# port to forward messages to the player.
self.message_forward_port = entries['MessageForwardPort']
else:
self.logger.critical('no valid http section found in config file')
def create_sockets(self):
""" Create the UDP sockets between the system and the
Scenario player
"""
self.logger.info('Creating sockets for {0} {1}'
.format(self.name, self.dispatcher_id))
# Open an UDP socket to listen for new connections
# from the system.
self.logger.info("Listening on address {0}"
.format(str(self.accept_address)))
self.logger.info("Listening on port {0}".format(str(self.listen_port)))
address = "{0}:{1}".format(accept_address, listen_port)
accept_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
accept_socket.bind((self.accept_address, self.listen_port))
# Let the superclass finish the creation of the rest of the
# sockets, because it is the same.
Dispatcher.create_sockets(self, accept_socket)
def process_messsage(self, a_socket):
""" Method to process a HTTP request from the system.
:param a_socket: the socket on which the message arrives.
:type a_socket: socket
"""
self.logger.info('HTTP Request')
if self.http_request == None:
self.http_request = Message()
self.logger.info('Parsing request')
if self.http_request.from_message(a_socket):
self.logger.info('Waiting for more data')
else:
# We received the full request.
# Send a reply and close the connection.
a_socket.send( "HTTP/1.1 200 OK\n" )
self.logger.info('Complete request received.')
self.poller.unregister(a_socket)
del self.call_backs[ a_socket.fileno() ]
a_socket.close()
self.client_socket = None
self.logger.info('Forwarding request to player')
self.forward_socket.send_pyobj(self.http_request)
self.http_request = None
def run(self):
# Clean-up
if self.forward_socket is not None:
self.forward_socket.close()
if self.command_socket is not None:
self.command_socket.close()
if self.accept_socket is not None:
self.accept_socket.close()
if self.client_socket is not None:
self.client_socket.close()
|
InTraffic/TSTK
|
TSTK/dispatcher.py
|
Python
|
gpl-3.0
| 20,467
| 0.004886
|
import os
path = os.path.dirname(os.path.realpath(__file__))
sbmlFilePath = os.path.join(path, 'BIOMD0000000045.xml')
with open(sbmlFilePath,'r') as f:
sbmlString = f.read()
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
if module_exists('libsbml'):
import libsbml
sbml = libsbml.readSBMLFromString(sbmlString)
|
biomodels/BIOMD0000000045
|
BIOMD0000000045/model.py
|
Python
|
cc0-1.0
| 427
| 0.009368
|
#-*- coding:utf-8 -*-
'''
显示命令的输出结果。
'''
import threading
from gi.repository import Gtk, Gdk, GObject, GLib, GtkSource, Pango
from VcEventPipe import *
class ViewLog:
'''
显示日志。
1,来了新命令,是否更新当前的日志。
2,命令来了新的日志,并显示后,是否滚动。
'''
# 设定一个栏目的枚举常量。
(
COLUMN_TAG_LINE_NO, # 行号
COLUMN_TAG_NAME, # Tag名字
NUM_COLUMNS) = range(3)
def __init__(self, vc_cmd_grp):
self.vc_cmd_grp = vc_cmd_grp # 当前执行的命令组
self.vc_cmd = None # 当前执行的命令
sw = Gtk.ScrolledWindow()
sw.set_shadow_type(Gtk.ShadowType.ETCHED_IN)
sw.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
editor = GtkSource.View()
editor.set_cursor_visible(True)
editor.set_show_line_numbers(True) # 显示行号
editor.set_auto_indent(True) # 自动缩进
#editor.set_insert_spaces_instead_of_tabs(True) # 用空格代替tab
editor.set_tab_width(4) # tab宽度4
editor.set_highlight_current_line(True) # 高亮度显示当前行
editor.set_editable(False) # 只读
self._ide_set_font(editor, "Ubuntu mono 12") # 设置字体。
src_buffer = self.create_buffer()
editor.set_buffer(src_buffer)
sw.add(editor)
self.view = sw
self.taglistview = editor
VcEventPipe.register_event(VcEventPipe.EVENT_LOG_COMMAND_START, self.sm_start_new_cmd)
VcEventPipe.register_event(VcEventPipe.EVENT_LOG_APPEND_TEXT, self.sm_append_log)
self.set_scrollable(True)
self.set_show_new_cmd_log(True)
def layout(self):
self.taglistview.show()
self.view.show()
def unregister(self):
VcEventPipe.unregister_event(VcEventPipe.EVENT_LOG_COMMAND_START, self.sm_start_new_cmd)
VcEventPipe.unregister_event(VcEventPipe.EVENT_LOG_APPEND_TEXT, self.sm_append_log)
def _ide_set_font(self, widget, str_font_desc):
''' 设置控件的字体
widget Gtk.Widget 控件
str_font_desc String 字体的描述(名字 大小)
'''
font_desc = Pango.FontDescription.from_string(str_font_desc)
widget.modify_font(font_desc)
def create_buffer(self):
# TODO:寻找适合日志输出的语法。
# 支持的语言
# ada awk boo c c-sharp changelog chdr cpp css d def desktop diff
# docbook dot dpatch dtd eiffel erlang forth fortran gap gettext-translation
# gtk-doc gtkrc haddock haskell haskell-literate html idl ini java js latex
# libtool lua m4 makefile msil nemerle objc objective-caml ocl octave pascal
# perl php pkgconfig python r rpmspec ruby scheme sh sql tcl texinfo vala vbnet
# verilog vhdl xml xslt yacc
src_buffer = GtkSource.Buffer()
manager = GtkSource.LanguageManager()
language = manager.get_language("sh") # 设定语法的类型
src_buffer.set_language(language)
src_buffer.set_highlight_syntax(True) # 语法高亮
return src_buffer
def set_scrollable(self, is_scrollable):
# 更新日志后,不再滚动。
self.is_scrollable = is_scrollable
if is_scrollable: # 想滚动
self._scroll_to_end() # 马上滚动到最后
else: #不想滚动
pass # 什么都不用做。
def get_scrollable(self, is_scrollable):
# 查询当前是否滚动显示最新日志内容
return self.is_scrollable
def set_show_new_cmd_log(self, show):
self.is_show_new_cmd_log = show
if show:
# 如果需要显示最新执行的命令日志,则需要更新当前的情况
lastest_cmd = None
for cmd in self.vc_cmd_grp.commands:
if cmd.is_selected and cmd.process > 0:
lastest_cmd = cmd
if lastest_cmd is not None:
self.vc_cmd = lastest_cmd
self.set_log(lastest_cmd)
else:
# 如果不再需要显示最新的命令日志,则什么都不用做
pass
def get_show_new_cmd_log(self):
return self.is_show_new_cmd_log
def sm_start_new_cmd(self, vc_cmd):
# 如果命令不是这个命令组中的,就退出
if vc_cmd not in self.vc_cmd_grp.commands:
return
# 如果不是当前命令,且不需要显示新的命令,则不再接受新的命令输出。
if not self.is_show_new_cmd_log and self.vc_cmd != vc_cmd:
return
self.vc_cmd = vc_cmd
Gdk.threads_add_idle(GLib.PRIORITY_DEFAULT_IDLE, self.clean_log)
def clean_log(self):
''' 将当前的文本清除 '''
print "clean text"
editor = self.taglistview
src_buf = editor.get_buffer()
src_buf.delete(src_buf.get_start_iter(), src_buf.get_end_iter())
def sm_append_log(self, vc_cmd, text):
# 如果命令不是这个命令组中的,就退出
if vc_cmd not in self.vc_cmd_grp.commands:
return
# 如果不是当前命令,且不需要显示新的命令,则不再接受新的命令输出。
if not self.is_show_new_cmd_log and self.vc_cmd != vc_cmd:
return
Gdk.threads_add_idle(GLib.PRIORITY_DEFAULT_IDLE, self.append_log, text)
def append_log(self, text):
# thrd = threading.currentThread()
# print "append text : %s" % ( thrd.getName() )
#print "append " + text,
''' 添加一条信息。'''
editor = self.taglistview
src_buf = editor.get_buffer()
iter_ = src_buf.get_end_iter()
src_buf.insert(iter_, text)
if self.is_scrollable:
self._scroll_to_end()
def set_log(self, vc_cmd):
self.vc_cmd = vc_cmd
self.set_show_new_cmd_log(False)
self.clean_log()
self.append_log(vc_cmd.get_log())
def _scroll_to_end(self):
editor = self.taglistview
src_buf = editor.get_buffer()
iter_ = src_buf.get_end_iter()
# 移动到最后。(TODO:没有移动到最后)
editor.scroll_to_iter(iter_, 0.25, False, 0.0, 0.5)
|
luo2chun1lei2/AgileEditor
|
vc/src/ViewLog.py
|
Python
|
gpl-2.0
| 6,715
| 0.010425
|
from vkapp.bot.models import Blogger, News, AdminReview, Publication
from .usersDAO import get_or_create_blogger
from datetime import datetime, timedelta, time
def new_news(link, media, uid, pic):
blogger = get_or_create_blogger(uid)
news = News(link=link, blogger=blogger, media=media, pic=pic)
news.save()
return news
def get_news_proposed_today(uid):
today = datetime.now().date()
tomorrow = today + timedelta(1)
today_start = datetime.combine(today, time())
today_end = datetime.combine(tomorrow, time())
news = News.objects.filter(blogger__vk_user__vk_id=uid).filter(date_time__lte=today_end,
date_time__gte=today_start)
return news
def news_by_blogger(uid):
blogger = get_or_create_blogger(uid)
news = News.objects.filter(blogger=blogger)
return news
def get_news_review_rating(news):
review = AdminReview.objects.filter(news=news)
if len(review)==0:
return 0
else:
return review[0].rating
def is_news_published(news):
published_info = Publication.objects.filter(news=news)
if len(published_info) == 0:
return False
else:
return True
|
ParuninPavel/lenta4_hack
|
vkapp/bot/dao/newsDAO.py
|
Python
|
mit
| 1,222
| 0.005728
|
from modes import *
# mode_traffic
field_rate_down = 'rate_down'
field_bw_down = 'bw_down'
field_rate_up = 'rate_up'
field_bw_up = 'bw_up'
# mode_temp
field_cpum = 'cpum'
field_cpub = 'cpub'
field_sw = 'sw'
field_hdd = 'hdd'
# mode_fan_speed
field_fan_speed = 'fan_speed'
# mode_xdsl
field_snr_down = 'snr_down'
field_snr_up = 'snr_up'
# mode_xdsl_errors
field_fec = 'fec'
field_crc = 'crc'
field_hec = 'hec'
field_es = 'es'
field_ses = 'ses'
# mode_switch1
field_rx1 = 'rx_1'
field_tx1 = 'tx_1'
# mode_switch2
field_rx2 = 'rx_2'
field_tx2 = 'tx_2'
# mode_switch3
field_rx3 = 'rx_3'
field_tx3 = 'tx_3'
# mode_switch4
field_rx4 = 'rx_4'
field_tx4 = 'tx_4'
# mode_transmission_tasks
field_nb_tasks_stopped = 'nb_tasks_stopped'
field_nb_tasks_checking = 'nb_tasks_checking'
field_nb_tasks_queued = 'nb_tasks_queued'
field_nb_tasks_extracting = 'nb_tasks_extracting'
field_nb_tasks_done = 'nb_tasks_done'
field_nb_tasks_repairing = 'nb_tasks_repairing'
field_nb_tasks_downloading = 'nb_tasks_downloading'
field_nb_tasks_error = 'nb_tasks_error'
field_nb_tasks_stopping = 'nb_tasks_stopping'
field_nb_tasks_seeding = 'nb_tasks_seeding'
# field_nb_tasks_active = 'nb_tasks_active' # Total active
# nb_tasks = 'nb_tasks' # Total
# mode_transmission_rate
field_rx_throttling = 'throttling_rate.rx_rate'
field_tx_throttling = 'throttling_rate.tx_rate'
field_rx_rate = 'rx_rate'
field_tx_rate = 'tx_rate'
# mode connection
field_bytes_up = 'bytes_up'
field_bytes_down = 'bytes_down'
# mode ftth
field_has_sfp = 'has_sfp'
field_link = 'link'
field_sfp_alim_ok = 'sfp_alim_ok'
field_sfp_has_signal = 'sfp_has_signal'
field_sfp_present = 'sfp_present'
# mode switch-bytes
field_rx_bytes = 'rx_good_bytes'
field_tx_bytes = 'tx_bytes'
# mode switch-packets
field_rx_packets = 'rx_good_packets'
field_tx_packets = 'tx_packets'
field_rx_unicast_packets = 'rx_unicast_packets'
field_tx_unicast_packets = 'tx_unicast_packets'
field_rx_broadcast_packets = 'rx_broadcast_packets'
field_tx_broadcast_packets = 'tx_broadcast_packets'
# mode wifi-stations
field_stations = 'stations'
# mode wifi-bytes
field_wifi_rx_bytes = 'rx_bytes'
field_wifi_tx_bytes = 'tx_bytes'
fields = {
mode_traffic: [
field_rate_down,
field_bw_down,
field_rate_up,
field_bw_up
],
mode_temp: [
field_cpum,
field_cpub,
field_sw,
field_hdd
],
mode_fan_speed: [
field_fan_speed
],
mode_xdsl: [
field_snr_down,
field_snr_up
],
mode_xdsl_errors: [
field_fec,
field_crc,
field_hec,
field_es,
field_ses
],
mode_switch1: [
field_rx1,
field_tx1
],
mode_switch2: [
field_rx2,
field_tx2
],
mode_switch3: [
field_rx3,
field_tx3
],
mode_switch4: [
field_rx4,
field_tx4
],
mode_switch_bytes: [
field_rx_bytes,
field_tx_bytes,
],
mode_switch_packets: [
field_rx_packets,
field_tx_packets,
field_rx_unicast_packets,
field_tx_unicast_packets,
field_rx_broadcast_packets,
field_tx_broadcast_packets,
],
mode_transmission_tasks: [
field_nb_tasks_stopped,
field_nb_tasks_checking,
field_nb_tasks_queued,
field_nb_tasks_extracting,
field_nb_tasks_done,
field_nb_tasks_repairing,
field_nb_tasks_downloading,
field_nb_tasks_error,
field_nb_tasks_stopping,
field_nb_tasks_seeding
],
mode_transmission_traffic: [
field_rx_throttling,
field_tx_throttling,
field_rx_rate,
field_tx_rate,
],
mode_connection: [
field_bytes_up,
field_bytes_down,
],
mode_connection_log: [
field_bytes_up,
field_bytes_down,
],
mode_ftth: [
field_has_sfp,
field_link,
field_sfp_alim_ok,
field_sfp_has_signal,
field_sfp_present,
],
mode_wifi_stations: [
field_stations,
],
mode_wifi_bytes: [
field_wifi_rx_bytes,
field_wifi_tx_bytes,
],
mode_wifi_bytes_log: [
field_wifi_rx_bytes,
field_wifi_tx_bytes,
],
}
xdsl_errors_fields_descriptions = {
field_fec: 'FEC (Forward Error Connection)',
field_crc: 'CRC (Cyclic Redundancy Check)',
field_hec: 'HEC (Header Error Control)',
field_es: 'ES (Errored Seconds)',
field_ses: 'SES (Severely Errored Seconds)'
}
def get_fields(mode):
if mode not in fields:
print('Unknown mode {}'.format(mode))
return fields[mode]
|
chteuchteu/Freebox-OS-munin
|
fields.py
|
Python
|
gpl-2.0
| 4,648
| 0.000215
|
#!/usr/bin/
"""
"""
from __future__ import print_function
from __future__ import division
import networkx as nx
import sys
from networkx.algorithms import bipartite
from operator import itemgetter
import matplotlib.pyplot as plt
import argparse
import pickle as cp
import math
import numpy as np
from numpy import percentile
import collections
import Bio
from Bio.UniProt import GOA
from Bio.Seq import Seq
from dateutil import parser
import os
import xlsxwriter
DATADIR = "data/"
# Some filenames
FILE_ALTERNATE_ID_TO_ID_MAPPING = DATADIR+"alt_to_id.graph"
FILE_CAFA_ID_TO_UNIPROT_ID_MAP = DATADIR+"CAFAIDTOUniprotIDMap.txt"
FILE_MFO_ONTOLOGY_GRAPH = DATADIR+"mf.graph"
FILE_BPO_ONTOLOGY_GRAPH = DATADIR+ "bp.graph"
FILE_CCO_ONTOLOGY_GRAPH = DATADIR+"cc.graph"
FILE_MFO_ONTOLOGY_ANCESTORS_GRAPH = DATADIR+"mf_ancestors.map"
FILE_BPO_ONTOLOGY_ANCESTORS_GRAPH = DATADIR+"bp_ancestors.map"
FILE_CCO_ONTOLOGY_ANCESTORS_GRAPH = DATADIR+ "cc_ancestors.map"
verbose=0
options=""
report=0
GAF21FIELDS = [
'DB',
'DB_Object_ID',
'DB_Object_Symbol',
'Qualifier',
'GO_ID',
'DB:Reference',
'Evidence',
'With',
'Aspect',
'DB_Object_Name',
'Synonym',
'DB_Object_Type',
'Taxon_ID',
'Date',
'Assigned_By',
'Annotation_Extension',
'Gene_Product_Form_ID'
]
EXPEC = [
"EXP",
"IDA",
"IPI",
"IMP",
"IGI",
"IEP"
]
COMPEC = [
"ISS",
"ISO",
"ISA",
"ISM",
"IGC",
"IBA",
"IBD",
"IKR",
"IRD",
"RCA"
]
AUTHEC = [
"TAS",
"NAS"
]
CUREC = [
"IC",
"ND"
]
IEA = ["IEA"]
def column( matrix, i ):
f = itemgetter( i )
return map( f, matrix )
def chooseProteinsBasedOnPublications( data, cutoff_prot, cutoff_attn ):
"""
This function will read the set of proteins and will choose only those proteins which have been probed in those publications which
deal with less than -d <number> of proteins
This program creates a bipartite graph with one set as the GO terms and the other set as the references and cross links them with the
GO_TERMS as weights to the edges.
This function can be used to select a cut off based on number of annotations by a particular reference or even number of proteins
annotated by a reference. It is recommended that the protein cut-off, i.e. -cprot, be used instead of the annotations cutoff. Since
it is relevant for a reference to provide more annotations to fewer proteins than to work with a lot of proteins.
"""
mapping = []
for attnid in data:
per_annotation = data[attnid]
go = per_annotation['GO_ID'] # Extracting the Gene Ontology
protein = per_annotation['DB'] + "_" + per_annotation['DB_Object_ID']
ref = per_annotation['DB:Reference'] # The reference
mapping.append( [protein, ref, go, attnid] ) # Appending the annotation id for later identification
g = nx.MultiGraph()
g.add_nodes_from( column( mapping, 0 ), bipartite = 0 )
g.add_nodes_from( column( mapping, 1 ), bipartite = 1 )
for triplet in mapping:
g.add_edge( triplet[0], triplet[1], weight = triplet[2] + "_" + triplet[3] )
simple_g = nx.Graph( g ) # Converting the multi graph to a simple graph without parallel edges
no_of_prot_annotations_by_each_ref=[]
for ref in list( set( column( mapping, 1 ) ) ):
no_of_prot_annotations_by_each_ref.append(simple_g.degree(ref))
if cutoff_attn==None:
graph=simple_g
threshold=int(cutoff_prot)
elif cutoff_prot==None:
graph=g
threshold=int(cutoff_attn)
list_of_chosen_attn = []
# Looping through each GO term and selecting those for which there is at least one reference which probes fewer proteins than threshold
for protein in list( set( column( mapping, 0 ) ) ):
references = g.neighbors( protein )
for ref in references:
# Condition for inclusion
if graph.degree( ref ) <= threshold:
for key in g.get_edge_data( protein, ref ):
weight = g.get_edge_data( protein, ref )[key]
list_of_chosen_attn.append( weight['weight'].split( "_" )[1] )
new_data = dict()
for attid in list_of_chosen_attn:
new_data[attid] = data[attid]
return new_data
#
def convertToDictionary( filename ):
"""
This function reads from the input gaf file and converts it to a dictionary. This function is deprecated and will be removed in further releases.
Instead of using this function the program now makes use of the gaf iterator function from biopython.
"""
alt_id_to_id_map = cp.load( open( FILE_ALTERNATE_ID_TO_ID_MAPPING, "rb" ) )
fhr = open( filename, "r" )
data = dict()
counter = 1
for line in fhr:
if "!" not in line:
line = line.split( "\t" )
id = "anntn" + str( counter )
per_annotation = dict()
for f_no, field in enumerate( GAF21FIELDS ):
if field=="GO_ID":
if line[f_no].strip() in alt_id_to_id_map:
#print(line[f_no].strip())
line[f_no]=alt_id_to_id_map[line[f_no].strip()]
per_annotation[field] = line[f_no]
data[id] = per_annotation
counter += 1
"""if(len(data)==10):
break"""
fhr.close()
return data
def convertFromGAFToRequiredFormat(gaf):
"""
This function takes the data input which is created by gaf iterator and then makes few changes
in the annotations which is relevant to this program.
"""
alt_id_to_id_map = cp.load( open( FILE_ALTERNATE_ID_TO_ID_MAPPING, "rb" ) )
counter=1
data=dict()
for annotation in gaf:
id="anntn" + str( counter )
if annotation['GO_ID'] in alt_id_to_id_map:
annotation['GO_ID']=alt_id_to_id_map[annotation['GO_ID']]
annotation['DB:Reference']=annotation['DB:Reference'][0]
annotation['Date']=parser.parse(annotation['Date']).date()
#annotation['Qualifier']='|'.join(annotation['Qualifier'])
#print(annotation['Evidence'])
data[id]=annotation
counter += 1
return data
def writeToFile( data, filename ,input_filename):
"""
This function will write the content of the data structure 'data' to the output file.
It requires the input file to read the header. Inclusion of the header is mandatory.
"""
vprint("Writing to file ",filename)
#print(filename)
filepath="/".join(filename.split("/")[:-1] )
try:
if os.path.isdir(filepath)==False:
os.makedirs(filepath)
except OSError:
print("You do not have sufficient Permissions to create the folder. Please alter the permissions or provide a different path.")
sys.exit()
fhr = open(input_filename,"r")
header=""
for line in fhr:
if line[0]=='!':
header+=line
fhr.close()
fhw = open( filename+".gaf", "w" )
fhw.write(header)
for key in data:
per_annotation = data[key]
per_annotation['Qualifier']='|'.join(per_annotation['Qualifier'])
per_annotation['With']='|'.join(per_annotation['With'])
per_annotation['Synonym']='|'.join(per_annotation['Synonym'])
per_annotation['Taxon_ID']='|'.join(per_annotation['Taxon_ID'])
per_annotation['Date']=''.join(str(per_annotation['Date']).split("-"))
# vprint(per_annotation)
string = ""
for field in GAF21FIELDS:
try:
string += per_annotation[field] + "\t"
except TypeError:
print("Exception has occurred in function writeToFile")
print(per_annotation)
print(field)
print(per_annotation[field])
exit()
string += '\n'
fhw.write( string )
fhw.close()
def checkEvidenceCodeForCorrectness( codes ):
"""
This function checks whether the Evidence Codes provided by the user.
It will return false if any incorrect evidence code is provided.
"""
for evidence in codes:
if( evidence != "COMPEC" and evidence != "EXPEC" and evidence != "AUTHEC" and evidence != "CUREC" and evidence != "IEA" ):
evidence = [evidence]
if True not in set( [set( evidence ).issubset( set( COMPEC ) ), set( evidence ).issubset( set( EXPEC ) ), set( evidence ).issubset( set( CUREC ) ), set( evidence ).issubset( set( IEA ) )] ):
return False
return True
def chooseProteinsBasedOnEvidenceCodes( data, evidence_list, evidence_inverse_list ):
"""
This function will select only those annotations which have been annotated by the provided Evidence Codes
"""
# Checking whether the provided Evidence codes are correct or not
if evidence_list is not None:
if checkEvidenceCodeForCorrectness( evidence_list ) == False:
vprint( "Invalid arguments for Evidence Codes provided please check http://geneontology.org/page/guide-go-evidence-codes" )
sys.exit()
else:
if checkEvidenceCodeForCorrectness( evidence_inverse_list ) == False:
vprint( "Invalid arguments for Evidence Codes provided please check http://geneontology.org/page/guide-go-evidence-codes" )
sys.exit()
select_these_EC = []
if( evidence_list is not None ):
for evidence in evidence_list:
if( evidence != "COMPEC" and evidence != "EXPEC" and evidence != "AUTHEC" and evidence != "CUREC" and evidence != "IEA" ):
select_these_EC.append( evidence )
else:
EC_set = ""
if( evidence == "COMPEC" ):
EC_set = COMPEC
elif( evidence == "EXPEC" ):
EC_set = EXPEC
elif( evidence == "AUTHEC" ):
EC_set = AUTHEC
elif( evidence == "CUREC" ):
EC_set = CUREC
elif( evidence == "IEA" ):
EC_set = IEA
for ele in EC_set:
select_these_EC.append( ele )
else:
select_these_EC.extend( COMPEC )
select_these_EC.extend( EXPEC )
select_these_EC.extend( AUTHEC )
select_these_EC.extend( CUREC )
select_these_EC.extend( IEA )
for evidence in evidence_inverse_list:
if( evidence != "COMPEC" and evidence != "EXPEC" and evidence != "AUTHEC" and evidence != "CUREC" and evidence != "IEA" ):
select_these_EC.remove( evidence )
else:
EC_set = ""
if( evidence == "COMPEC" ):
EC_set = COMPEC
elif( evidence == "EXPEC" ):
EC_set = EXPEC
elif( evidence == "AUTHEC" ):
EC_set = AUTHEC
elif( evidence == "CUREC" ):
EC_set = CUREC
elif( evidence == "IEA" ):
EC_set = IEA
for ele in EC_set:
select_these_EC.remove( ele )
new_data = dict()
vprint(select_these_EC)
for attnid in data:
per_annotation = data[attnid]
if per_annotation['Evidence'] in select_these_EC:
new_data[attnid] = per_annotation
return new_data
# This function reads the data entered via command line and returns a dictionary with all relevant options
def parseCommandLineArguments( ):
parser = argparse.ArgumentParser( prog = "debias.py" )
mutex_parser_evidence = parser.add_mutually_exclusive_group()
mutex_parser_assigned_by=parser.add_mutually_exclusive_group()
mutex_parser_IC_THRESH=parser.add_mutually_exclusive_group()
# mutex_parser_PL_THRESH=parser.add_mutually_exclusive_group()
mutex_parser_select_references=parser.add_mutually_exclusive_group()
requiredArguments = parser.add_argument_group( "Required arguments" )
parser.add_argument( '--prefix','-pref',help="Add a prefix to the name of your output files." )
parser.add_argument( '--cutoff_prot', '-cprot', help = "The threshold level for deciding to eliminate annotations which come from references that annotate more than the given 'threshold' number of PROTEINS" )
parser.add_argument( '--cutoff_attn', '-cattn', help = "The threshold level for deciding to eliminate annotations which come from references that annotate more than the given 'threshold' number of ANNOTATIONS" )
parser.add_argument( '--output', '-odir',help = "Writes the final outputs to the directory in this path." )
mutex_parser_evidence.add_argument( '--evidence', '-e', nargs = "+", help = "Accepts Standard Evidence Codes outlined in http://geneontology.org/page/guide-go-evidence-codes. All 3 letter code for each standard evidence is acceptable. In addition to that EXPEC is accepted which will pull out all annotations which are made experimentally. COMPEC will extract all annotations which have been done computationally. Similarly, AUTHEC and CUREC are also accepted. Cannot be provided if -einv is provided " )
mutex_parser_evidence.add_argument( '--evidence_inverse', '-einv', nargs = "+", help = "Leaves out the provided Evidence Codes. Cannot be provided if -e is provided" )
requiredArguments.add_argument( '--input', '-i', nargs = "+", help = "The input file path. Please remember the name of the file must start with goa in front of it, with the name of the species following separated by an underscore", required = True )
parser.add_argument( '--aspect', '-a', nargs = "+", help = "Enter P, C or F for Biological Process, Cellular Component or Molecular Function respectively" )
mutex_parser_assigned_by.add_argument('--assigned_by','-assgn',nargs = "+",help="Choose only those annotations which have been annotated by the provided list of databases. Cannot be provided if -assgninv is provided")
mutex_parser_assigned_by.add_argument('--assigned_by_inverse','-assgninv',nargs = "+",help="Choose only those annotations which have NOT been annotated by the provided list of databases. Cannot be provided if -assgn is provided")
parser.add_argument('--recalculate','-recal',help="Set this to 1 if you wish to enforce the recalculation of the Information Accretion for every GO term. Calculation of the information accretion is time consuming. Therefore keep it to zero if you are performing rerun on old data. The program will then read the information accretion values from a file which it wrote to in the previous run of the program",default=0)
mutex_parser_IC_THRESH.add_argument('--info_threshold_Wyatt_Clark_percentile','-WCTHRESHp',help="Provide the percentile p. All annotations having information content below p will be discarded")
mutex_parser_IC_THRESH.add_argument('--info_threshold_Wyatt_Clark','-WCTHRESH',help="Provide a threshold value t. All annotations having information content below t will be discarded")
mutex_parser_IC_THRESH.add_argument('--info_threshold_Phillip_Lord_percentile','-PLTHRESHp',help="Provide the percentile p. All annotations having information content below p will be discarded")
mutex_parser_IC_THRESH.add_argument('--info_threshold_Phillip_Lord','-PLTHRESH',help="Provide a threshold value t. All annotations having information content below t will be discarded")
parser.add_argument('--verbose','-v',help="Set this argument to 1 if you wish to view the outcome of each operation on console",default=0)
parser.add_argument('--date_before','-dbfr',help="The date entered here will be parsed by the parser from dateutil package. For more information on acceptable date formats please visit https://github.com/dateutil/dateutil/. All annotations made prior to this date will be picked up")
parser.add_argument('--date_after','-daftr',help="The date entered here will be parsed by the parser from dateutil package. For more information on acceptable date formats please visit https://github.com/dateutil/dateutil/. All annotations made after this date will be picked up")
parser.add_argument('--single_file','-single',default=0,help="Set to 1 in order to output the results of each individual species in a single file.")
mutex_parser_select_references.add_argument('--select_references','-selref',nargs='+',help='Provide the paths to files which contain references you wish to select. It is possible to include references in case you wish to select annotations made by a few references. This will prompt the program to interpret string which have the keywords \'GO_REF\',\'PMID\' and \'Reactome\' as a GO reference. Strings which do not contain that keyword will be interpreted as a file path which the program will except to contain a list of GO references. The program will accept a mixture of GO_REF and file names. It is also possible to choose all references of a particular category and a handful of references from another. For example if you wish to choose all PMID references, just put PMID. The program will then select all PMID references. Currently the program can accept PMID, GO_REF and Reactome')
mutex_parser_select_references.add_argument('--select_references_inverse','-selrefinv',nargs='+',help='Works like -selref but does not select the references which have been provided as input')
parser.add_argument('--report','-r',help="Provide the path where the report file will be stored. If you are providing a path please make sure your path ends with a '/'. Otherwise the program will assume the last string after the final '/' as the name of the report file. A single report file will be generated. Information for each species will be put into individual worksheets.")
parser.add_argument('--histogram','-hist',help="Set this option to 1 if you wish to view the histogram of GO_TERM frequency before and after debiasing is performed with respect to cutoffs based on number of proteins or annotations. If you wish to save the file then please enter a filepath. If you are providing a path please make sure your path ends with a '/'. Otherwise the program will assume the last string after the final '/' as the name of the image file. Separate histograms will be generated for each species.")
args = parser.parse_args()
return args
def createProteinToGOMapping( data ):
"""
This function creates a dictionary where key is a protein. Each protein refers to a list where the list consists of GO_TERMS.
"""
prot_to_go = dict()
all_GO = []
alt_id_to_id_map = cp.load( open( FILE_ALTERNATE_ID_TO_ID_MAPPING, "rb" ) )
for attnid in data:
annotation = data[attnid]
prot_id = annotation['DB'] + '_' + annotation['DB_Object_ID']
GO_term = annotation['GO_ID']
if GO_term in alt_id_to_id_map:
GO_term = alt_id_to_id_map[GO_term]
all_GO.append( GO_term )
if prot_id not in prot_to_go:
prot_to_go[prot_id] = []
if [GO_term, annotation['Aspect']] not in prot_to_go[prot_id]:
prot_to_go[prot_id].append( [GO_term, annotation['Aspect']] )
else:
if [GO_term, annotation['Aspect']] not in prot_to_go[prot_id]:
prot_to_go[prot_id].append( [GO_term, annotation['Aspect']] )
# vprint(prot_to_go[prot_id])
return prot_to_go, list( set( all_GO ) )
def propagateOntologies( Prot_to_GO_Map ):
"""
This function takes in each annotation and constructs the ancestors of that term from their respective Aspect
"""
mf_g = cp.load( open( FILE_MFO_ONTOLOGY_GRAPH, "rb" ) )
bp_g = cp.load( open( FILE_BPO_ONTOLOGY_GRAPH, "rb" ) )
cc_g = cp.load( open( FILE_CCO_ONTOLOGY_GRAPH, "rb" ) )
alt_id_to_id_map = cp.load( open( FILE_ALTERNATE_ID_TO_ID_MAPPING, "rb" ) )
# vprint(alt_id_to_id_map)
Prot_to_GO_Map_new = dict()
mf_ancestors=cp.load(open(FILE_MFO_ONTOLOGY_ANCESTORS_GRAPH,"rb"))
bp_ancestors=cp.load(open(FILE_BPO_ONTOLOGY_ANCESTORS_GRAPH,"rb"))
cc_ancestors=cp.load(open(FILE_CCO_ONTOLOGY_ANCESTORS_GRAPH,"rb"))
for eachprotein in Prot_to_GO_Map:
ancestors = []
annotations = Prot_to_GO_Map[eachprotein]
for annotation in annotations:
aspect = annotation[1]
GO_term = annotation[0]
if aspect == 'F':
ancestors.extend(mf_ancestors[GO_term])
if aspect == 'P':
ancestors.extend(bp_ancestors[GO_term])
if aspect == 'C':
ancestors.extend(cc_ancestors[GO_term])
ancestors = list( set( ancestors ) )
Prot_to_GO_Map_new[eachprotein] = ancestors
return Prot_to_GO_Map_new
def findFrequency( annotations, Prot_to_GO_Map ):
count = 0
if annotations == None:
return 0
for prot in Prot_to_GO_Map:
if set( annotations ).issubset( set( Prot_to_GO_Map[prot] ) ):
count += 1
return count
def assignProbabilitiesToOntologyTree( g, Prot_to_GO_Map, all_GO_Terms, ontology_to_ia_map,aspect ):
for node_num, node in enumerate( g.nodes() ):
if( node not in all_GO_Terms ):
ontology_to_ia_map[node] = [0, 0]
continue
if node_num % 100 == 0:
vprint( node_num , " proteins processed for ",aspect )
predecessor = g.successors( node )
# vprint(node,predecessor)
predecessor_with_node = []
predecessor_with_node.extend( predecessor )
predecessor_with_node.append( node )
denom = findFrequency( predecessor, Prot_to_GO_Map )
num = findFrequency( predecessor_with_node, Prot_to_GO_Map )
# vprint(node,g.successors(node))
"""vprint(predecessor_with_node,num)
vprint(predecessor,denom)"""
if( denom == 0 ):
prob = 0
else:
prob = num / denom
ontology_to_ia_map[node] = [prob, -math.log( prob, 2 )]
def assignProbabilitiesToOntologyGraphs( Prot_to_GO_Map, all_GO_Terms,aspects ):
mf_g = cp.load( open( FILE_MFO_ONTOLOGY_GRAPH, "rb" ) )
bp_g = cp.load( open( FILE_BPO_ONTOLOGY_GRAPH, "rb" ) )
cc_g = cp.load( open( FILE_CCO_ONTOLOGY_GRAPH, "rb" ) )
ontology_to_ia_map = dict()
assignProbabilitiesToOntologyTree( mf_g, Prot_to_GO_Map, all_GO_Terms, ontology_to_ia_map, 'MFO' )
assignProbabilitiesToOntologyTree( bp_g, Prot_to_GO_Map, all_GO_Terms, ontology_to_ia_map, 'BPO' )
assignProbabilitiesToOntologyTree( cc_g, Prot_to_GO_Map, all_GO_Terms, ontology_to_ia_map, 'CCO' )
"""for GO in ontology_to_ia_map:
vprint(ontology_to_ia_map[GO])"""
return ontology_to_ia_map
def calculateInformationAccretionForEachProtein( Prot_to_GO_Map, ontology_to_ia_map ):
vprint( "Starting calculation of ia" )
infoAccr = dict()
alt_id_to_id_map = cp.load( open( FILE_ALTERNATE_ID_TO_ID_MAPPING, "rb" ) )
for prot in Prot_to_GO_Map:
annotations = Prot_to_GO_Map[prot]
ia = 0
for annotation in annotations:
if len( annotation ) == 2:
GO_term = annotation[0]
else:
GO_term = annotation
if GO_term not in ontology_to_ia_map:
GO_term = alt_id_to_id_map[GO_term]
# vprint(prot,annotation[0])
ia += ontology_to_ia_map[GO_term][1]
infoAccr[prot] = ia
return infoAccr
def chooseGOBasedOnAspect( data, aspect ):
new_data = dict()
#vprint( aspect )
for attnid in data:
if data[attnid]['Aspect'] in aspect:
new_data[attnid] = data[attnid]
return new_data
def chooseGOBasedOnAssignedBy( data, assigned_by,assigned_by_inverse):
new_data=dict()
for attnid in data:
if(assigned_by!=None):
if data[attnid]['Assigned_By'] in assigned_by:
new_data[attnid]=data[attnid]
else:
if data[attnid]['Assigned_By'] not in assigned_by_inverse:
new_data[attnid]=data[attnid]
return new_data
def calculatePhillipLordInformationContent(data,crisp,percentile_val):
go_terms=[]
"""Prot_to_GO_Map, all_GO_Terms_in_corpus = createProteinToGOMapping( data )
Prot_to_GO_Map_propagated = propagateOntologies( Prot_to_GO_Map )"""
#alt_id_to_id_map = cp.load( open( FILE_ALTERNATE_ID_TO_ID_MAPPING, "rb" ) )
"""for eachprot in Prot_to_GO_Map:
go_terms.extend([annotation[0] for annotation in Prot_to_GO_Map[eachprot]])"""
"""for eachprot in Prot_to_GO_Map_propagated:
go_terms.extend(Prot_to_GO_Map_propagated[eachprot])"""
for attnid in data:
go_terms.append(data[attnid]["GO_ID"])
GO_term_to_PL_info=collections.Counter(go_terms)
ic=[]
for term in GO_term_to_PL_info:
#vprint(term,x[term],x[term]/len(go_terms))
GO_term_to_PL_info[term]=-math.log(GO_term_to_PL_info[term]/len(go_terms),2)
ic.append(GO_term_to_PL_info[term])
if crisp==None:
threshold=((max(ic)-min(ic))*float(percentile_val)/100)+min(ic)
else:
threshold=float(crisp)
num_bins = 10
# the histogram of the data
#n, bins, patches = plt.hist(ic, num_bins, facecolor='green', alpha=0.9)
#plt.show()
vprint("The maximum value of information content is ",max(ic))
vprint("The minimum value of information content is ",min(ic))
vprint("The chosen threshold is ",threshold)
new_data=dict()
for attnid in data:
annotation=data[attnid]
if GO_term_to_PL_info[annotation["GO_ID"]]>=threshold:
new_data[attnid]=data[attnid]
return new_data
#vprint(collections.Counter(go_terms))
def calculateWyattClarkInformationContent(data,recal,crisp,percentile_val,aspects,outputfiles,input_num):
"""
This function will display some essential statistics when the value of threshold
is crisp and a percentile is not provided.
"""
#vprint(outputfiles[0].split("_"))
ontology_to_ia_map_filename="ontology_to_ia_map_"+"_".join(outputfiles[input_num].split("/")[-1].split("_")[:-2])+".txt"
#vprint(ontology_to_ia_map_filename)
#exit()
Prot_to_GO_Map, all_GO_Terms_in_corpus = createProteinToGOMapping( data )
Prot_to_GO_Map_propagated = propagateOntologies( Prot_to_GO_Map )
if(recal==1):
vprint("Recalculating Information Accretion for Wyatt Clark Information Content. This may take a long time depending on the size of input")
ontology_to_ia_map=assignProbabilitiesToOntologyGraphs(Prot_to_GO_Map_propagated,all_GO_Terms_in_corpus,aspects)
if os.path.isdir("data/temp/")==False:
os.makedirs("data/temp/")
cp.dump(ontology_to_ia_map,open("data/temp/"+ontology_to_ia_map_filename,"wb"))
else:
vprint("Skipping recalculation of Information Accretion for Wyatt Clark")
try:
ontology_to_ia_map = cp.load( open( "data/temp/"+ontology_to_ia_map_filename, "rb" ) )
except IOError as e:
print("File for GO_Term to ia NOT FOUND. Please rerun the program with the argument -recal 1")
exit()
#protInfoAccretion = calculateInformationAccretion( Prot_to_GO_Map_propagated, ontology_to_ia_map )
ia=[]
for mapping in ontology_to_ia_map:
if ontology_to_ia_map[mapping][0]!=0:
ia.append(ontology_to_ia_map[mapping][1])
#vprint(sorted(ia))
vprint(len(ia))
# Doing Some statistical analysis with the distribution of information content
if crisp==None:
threshold=(max(ia)-min(ia))*float(percentile_val)/100+min(ia)
else:
threshold=float(crisp)
#vprint("Wyatt Clark Threshold",threshold,min(ia),max(ia))
new_data=dict()
if crisp is not None:
num_bins = 10
# the histogram of the data
n, bins, patches = plt.hist(ia, num_bins, facecolor='green', alpha=0.9)
plt.show()
for attnid in data:
annotation=data[attnid]
#vprint(ontology_to_ia_map[annotation["GO_ID"]])
if ontology_to_ia_map[annotation["GO_ID"]][1]>=threshold:
new_data[attnid]=data[attnid]
#vprint(threshold)
return new_data
def chooseProteinsBasedOnReferences(data,select,inverse_select):
group=[]
references=[]
if select is not None:
ptr=select
else:
ptr=inverse_select
for item in ptr:
vprint(item)
if item in ("GO_REF","PMID","Reactome"):
group.append(item)
elif "GO_REF" in item or "PMID" in item or "Reactome" in item:
references.append(item)
else:
for line in open(item,"r"):
references.append(line.strip())
new_data=dict()
vprint(group)
vprint(references)
for attnid in data:
for item in group:
if item in data[attnid]['DB:Reference']:
new_data[attnid]=data[attnid]
if data[attnid]['DB:Reference'] in references:
new_data[attnid]=data[attnid]
if inverse_select is not None:
newer_data=dict()
for key in set(data.keys())-set(new_data.keys()):
newer_data[key]=data[key]
#vprint(key)
#vprint(newer_data[key])
new_data=newer_data
return new_data
def vprint(*s):
global verbose
#print(s,verbose)
if verbose==1:
for string in s:
print(string,end="")
print()
def printDetailsAboutData(data):
print("Total number of annotations in the provided Database ",len(data))
prots=[]
ref=[]
for attnid in data:
annotation=data[attnid]
prots.append(annotation['DB']+"_"+annotation['DB_Object_ID'])
ref.append(annotation['DB:Reference'])
print("Total number of unique proteins in the provided Database ",len(set(prots)))
print("Total number of unique references in the provided Database ",len(set(ref)))
def chooseAnnotationsBasedOnDate(data,before,after):
if before!=None:
before=parser.parse(before).date()
if after!=None:
after=parser.parse(after).date()
new_data=dict()
for attnid in data:
annotation=data[attnid]
if before!=None and after!=None:
if annotation['Date'] <= before and annotation['Date'] >= after:
new_data[attnid]=annotation
elif before!=None:
if annotation['Date'] <= before:
new_data[attnid]=annotation
elif after!=None:
if annotation['Date'] >= after:
new_data[attnid]=annotation
return new_data
def changeNameofOutputFiles(options):
longAspect={'P':'BPO','C':'CCO','F':'MFO'}
if options.prefix is not None:
prefix=options.prefix
else:
prefix=""
if options.output==None:
options.output=[]
path="./"+prefix
else:
path=options.output+"/"+prefix
options.output=[]
#vprint("Output Options ",options.output)
for num,inputfile in enumerate(options.input):
final_outputfilename=""
#vprint(inputfile)
vprint(options.output)
file=inputfile.split("/")[-1]
species=file.split(".gaf")[0].split("_")[1]
vprint("Species: "+species)
final_outputfilename=path+species
aspect=""
if options.aspect:
for i in range(len(options.aspect)):
aspect+=longAspect[options.aspect[i]]+"_"
final_outputfilename+="_"+aspect[:-1]
if options.cutoff_prot:
final_outputfilename+='_REF_'+options.cutoff_prot
if options.evidence:
final_outputfilename+="_"+"_".join(options.evidence)
if options.info_threshold_Phillip_Lord:
final_outputfilename+='_PL_'+options.info_threshold_Phillip_Lord
elif options.info_threshold_Phillip_Lord_percentile:
final_outputfilename+='_PLP_'+options.info_threshold_Phillip_Lord_percentile
if options.info_threshold_Wyatt_Clark:
final_outputfilename+='_WC_'+options.info_threshold_Wyatt_Clark
elif options.info_threshold_Wyatt_Clark_percentile:
final_outputfilename+='_WCP_'+options.info_threshold_Wyatt_Clark_percentile
options.output.append(final_outputfilename)
vprint(options.output[num])
vprint()
#exit()
#vprint(options.output)
return options.output
def combineOutputFiles(outputfiles,options):
#print(outputfiles)
path="/".join(outputfiles[0].split("/")[:-1])
file=outputfiles[0].split("/")[-1]
"""if ("./" in outputfiles[0]):
finaloutputfilename="all_"+"_".join(file.split("_")[1:])
else:"""
finaloutputfilename="all_"+"_".join(outputfiles[0].split("/")[-1].split("_")[1:])
if options.prefix!=None:
finaloutputfilename=options.prefix+finaloutputfilename
finaloutputfilename=path+"/"+finaloutputfilename
print(finaloutputfilename)
#Combine the gaf files
header=""
for line in open(outputfiles[0]+".gaf","r"):
if "!" in line:
header+=line
d=""
for filename in outputfiles:
for line in open(filename+".gaf","r"):
if "!" not in line:
d+=line
open(finaloutputfilename+".gaf","w").write(header+d)
def deleteTemporaryFiles(options):
"""
This function deletes all the files for each organism
"""
print("Inside delete temporary files")
for filename in options.output:
os.remove(filename+".gaf")
def createReportFile(filepath,outputfilename):
#print("Outputfilename ",outputfilename)
#print("Filepath ",filepath)
repfilepath=""
if(filepath[-1]=='/'):
if os.path.isdir(filepath) == False:
os.makedirs(filepath)
filepath+="report_"+"_".join(outputfilename.split("/")[-1].split("_")[1:])+".xlsx"
elif (filepath=="."):
filepath="report_"+"_".join(outputfilename.split("/")[-1].split("_")[1:])+".xlsx"
else:
if "/" in filepath and os.path.isdir('/'.join(filepath.split("/")[:-1])) == False:
os.makedirs('/'.join(filepath.split("/")[:-1]))
if ("." in filepath.split("/")[-1]):
filepath=".".join(filepath.split(".")[:-1])+".xlsx"
else:
filepath+=".xlsx"
#print("Report Filepath ",filepath)
return filepath
def countProteins(data):
allprots=[]
for annotation in data:
allprots.append(data[annotation]['DB']+"_"+data[annotation]['DB_Object_ID'])
return len(set(allprots))
def writeReport(filename,report):
#fhw=open(filename,"w")
all_filenames=[]
if("/" not in filename):
for species in report:
all_filenames.append(species+"_"+filename[:-4]+"tsv")
else:
for species in report:
all_filenames.append("/".join(filename.split("/")[:-1])+"/"+species+"_"+filename.split("/")[1][:-4]+"tsv")
#print(all_filenames)
#print(report)
for sp_num,species in enumerate(report):
fhw=open(all_filenames[sp_num],"w")
for element in report[species]:
#print(element)
#fhw.write("\t".join(element)+"\n")
for col,ele in enumerate(element):
#print(species,element,ele)
fhw.write(str(ele)+"\t")
fhw.write("\n")
#print(chr(column),chr(column+1))
"""workbook = xlsxwriter.Workbook(filename)
bold = workbook.add_format({'bold': True,'text_wrap':True,'font_name':'monaco'})
font = workbook.add_format({'font_name':'monaco'})
for species in report:
worksheet = workbook.add_worksheet(species.split("/")[-1])
column=0
row=0
worksheet.write(row,column,"Operation",bold)
worksheet.set_column(column,column, 30)
worksheet.write(row,column+1,"Number of proteins before",bold)
worksheet.write(row,column+2,"Number of proteins after",bold)
worksheet.write(row,column+3,"Number of annotations before",bold)
worksheet.write(row,column+4,"Number of annotations after",bold)
row+=1
#print(report[species])
for element in report[species]:
for col,ele in enumerate(element):
if col==0:
worksheet.write(row,column+col,ele,bold)
else:
worksheet.write(row,column+col,ele,font)
row+=1
#print(chr(column),chr(column+1))
workbook.close()"""
def generateHistogram(options,data,species,prev,lat,msg):
filepath=options.histogram
outputfilename=options.output[0]
#print(prev)
if(filepath[-1]=='/'):
if os.path.isdir(filepath) == False:
os.makedirs(filepath)
filepath+=species.split(".gaf")[0].split("_")[-1]+"_"+"histogram_"+"_".join(outputfilename.split("/")[-1].split("_")[1:])+".png"
elif (filepath=="."):
#print("I am here")
filepath=species.split(".gaf")[0].split("_")[-1]+"_"+"histogram_"+"_".join(outputfilename.split("/")[-1].split("_")[1:])+".png"
#print(filepath)
else:
if "/" in filepath and os.path.isdir('/'.join(filepath.split("/")[:-1])) == False:
os.makedirs('/'.join(filepath.split("/")[:-1]))
temp=species.split(".gaf")[0].split("_")[-1]+"_"
if ("." not in filepath and "/" not in filepath):
filepath=species.split(".gaf")[0].split("_")[-1]+"_"+"histogram_"+"_".join(outputfilename.split("/")[-1].split("_")[1:])+".png"
elif ("." in filepath.split("/")[-1]):
filepath="/".join((".".join(filepath.split(".")[:-1])+".png").split("/")[:-1])+"/"+temp+(".".join(filepath.split(".")[:-1])+".png").split("/")[-1]
else:
filepath="/".join(filepath.split("/")[:-1])+"/"+temp+filepath.split("/")[-1]+".png"
#print("IMG filepath",filepath)
prev_val=[prev[key] for key in prev]
#print(sum(prev_val),np.mean(prev_val))
new_prev_val=[-(val/sum(prev_val))*math.log(val/sum(prev_val),2) for val in prev_val]
#prev_val=new_prev_val
lat_val=[lat[key] for key in lat]
#print(sum(lat_val),np.mean(lat_val))
new_lat_val=[-(val/sum(lat_val))*math.log(val/sum(lat_val),2) for val in lat_val]
#lat_val=new_lat_val
"""prev_val=[]
for key in prev:
prev_val.append(prev[key])"""
binsize=100
plt.hist(new_prev_val, bins=binsize,color='r',label="Before Debiasing")
plt.hist(new_lat_val,bins=binsize,color='b',label="After Debiasing")
#plt.xlim((0,max(max(new_prev_val)+0.1,max(new_lat_val)+0.1)))
plt.xlabel("Information Content")
plt.ylabel("Frequency")
plt.title("Histogram for "+species.split(".gaf")[0].split("_")[-1]+"\n"+msg)
if options.histogram == "1":
plt.legend(bbox_to_anchor=(0.4, 1))
plt.show()
else:
#plt.figure(figsize=(70,70))
plt.legend(bbox_to_anchor=(0.45, 1))
plt.savefig(filepath,dpi=900)
fhw=open(filepath[:-3]+".txt","w")
fhw.write(str(new_prev_val)+"\n"+str(new_lat_val))
plt.close()
def freqGO_TERM(data):
go_to_freq=dict()
for annotation in data:
if data[annotation]['GO_ID'] in go_to_freq:
go_to_freq[data[annotation]['GO_ID']]+=1
else:
go_to_freq[data[annotation]['GO_ID']]=1
return go_to_freq
def main():
global verbose,options,report
commandLineArg = sys.argv
if len(commandLineArg)==1:
print("Please use the --help option to get usage information")
# Parse command line arguments
options = parseCommandLineArguments( )
if options.recalculate!=0 and (options.info_threshold_Wyatt_Clark==None and options.info_threshold_Wyatt_Clark_percentile==None):
print("Error in arguments. You must provide Wyatt Clark in order to recalculate")
#print(options.recalculate,options.info_threshold_Wyatt_Clark,options.info_threshold_Wyatt_Clark_percentile )
exit()
if(options.aspect==None):
options.aspect=["F","P","C"]
if options.verbose!=0:
verbose=1
#vprint( options )
options.output=changeNameofOutputFiles(options)
if options.report!=None:
report=1
options.report=createReportFile(options.report,options.output[0])
reportdict=dict()
for input in options.input:
reportdict[input.split(".gaf")[0].split("_")[1]]=[]
#exit()
data=dict()
#data = convertToDictionary( options.input )
for file_num,eachinputfile in enumerate(options.input):
species=eachinputfile.split(".gaf")[0].split("_")[1]
gafoutput=GOA._gaf20iterator(open(eachinputfile,"r"))
data=convertFromGAFToRequiredFormat(gafoutput)
#data.update(temp_data)
#vprint("Processing file ",eachInputFile,file_num)
if options.verbose!=0:
printDetailsAboutData(data)
vprint()
if(options.select_references!=None or options.select_references_inverse!=None):
vprint("Number of annotations before choosing proteins based on references ",len(data))
prev_len=len(data)
if(report==1):
report_row=[]
report_row.append("References")
report_row.append(countProteins(data))
data=chooseProteinsBasedOnReferences(data,options.select_references,options.select_references_inverse)
if(report==1):
report_row.append(countProteins(data))
report_row.append(prev_len)
report_row.append(len(data))
reportdict[eachinputfile].append(report_row)
vprint("Number of annotations before choosing proteins based on references ",len(data))
vprint( "Data discarded ", prev_len - len( data ) )
vprint()
if( options.evidence != None or options.evidence_inverse != None ):
vprint( "Number of annotations before choosing proteins based on Evidence Codes ", len( data ) )
prev_len = len( data )
if(report==1):
report_row=[]
report_row.append("Evidence")
report_row.append(countProteins(data))
data = chooseProteinsBasedOnEvidenceCodes( data, evidence_list = options.evidence, evidence_inverse_list = options.evidence_inverse )
vprint( "Number of annotations after choosing proteins based on Evidence Codes ", len( data ) )
vprint( "Data discarded ", prev_len - len( data ) )
if(report==1):
report_row.append(countProteins(data))
report_row.append(prev_len)
report_row.append(len(data))
reportdict[eachinputfile].append(report_row)
vprint()
if( options.aspect != None ):
vprint( "Number of annotations before choosing proteins based on aspect ", len( data ) )
prev_len = len( data )
if(report==1):
report_row=[]
report_row.append("Aspect")
report_row.append(countProteins(data))
data = chooseGOBasedOnAspect( data, aspect = options.aspect )
vprint( "Number of annotations after choosing proteins based on aspect ", len( data ) )
vprint( "Data discarded ", prev_len - len( data ) )
if(report==1):
report_row.append(countProteins(data))
report_row.append(prev_len)
report_row.append(len(data))
reportdict[species].append(report_row)
vprint()
if(options.assigned_by!=None or options.assigned_by_inverse!=None):
vprint("Number of annotations before choosing proteins based on assigned by",len(data))
prev_len=len(data)
if(report==1):
report_row=[]
report_row.append("Assigned By")
report_row.append(countProteins(data))
data=chooseGOBasedOnAssignedBy(data,options.assigned_by,options.assigned_by_inverse)
vprint( "Number of annotations after choosing proteins based on assigned_by ", len( data ) )
vprint( "Data discarded ", prev_len - len( data ) )
if(report==1):
report_row.append(countProteins(data))
report_row.append(prev_len)
report_row.append(len(data))
reportdict[eachinputfile].append(report_row)
vprint()
if(options.date_before!=None or options.date_after!=None):
vprint("Number of annotations before choosing proteins based on provided range of dates ",len(data))
prev_len=len(data)
if(report==1):
report_row=[]
report_row.append("Date")
report_row.append(countProteins(data))
data=chooseAnnotationsBasedOnDate(data,options.date_before,options.date_after)
vprint("Number of annotations after choosing proteins based on provided range of dates ",len(data))
vprint( "Data discarded ", prev_len - len( data ) )
if(report==1):
report_row.append(countProteins(data))
report_row.append(prev_len)
report_row.append(len(data))
reportdict[eachinputfile].append(report_row)
vprint()
if( options.cutoff_prot != None or options.cutoff_attn !=None):
vprint( "Number of annotations before choosing proteins based on Publications ", len( data ) )
prev_len = len( data )
if(report==1):
report_row=[]
if (options.cutoff_prot!=None):
report_row.append("Protein Cut off")
else:
report_row.append("Annotation Cut off")
report_row.append(countProteins(data))
if options.histogram!=None:
prev_go_term_freq=freqGO_TERM(data)
data = chooseProteinsBasedOnPublications( data, options.cutoff_prot,options.cutoff_attn)
if options.histogram!=None:
later_go_term_freq=freqGO_TERM(data)
generateHistogram(options,data,eachinputfile,prev_go_term_freq,later_go_term_freq,"Removal of high Throughput Papers")
vprint( "Number of annotations after choosing proteins based on Publications ", len( data ) )
vprint( "Data discarded ", prev_len - len( data ) )
if(report==1):
report_row.append(countProteins(data))
report_row.append(prev_len)
report_row.append(len(data))
reportdict[species].append(report_row)
vprint()
if(options.info_threshold_Phillip_Lord!=None or options.info_threshold_Phillip_Lord_percentile!=None):
vprint("Number of annotations before choosing proteins based on Phillip Lord Threshold ",len(data))
prev_len=len(data)
if(report==1):
report_row=[]
report_row.append("Phillip Lord Threshold")
report_row.append(countProteins(data))
data=calculatePhillipLordInformationContent(data,options.info_threshold_Phillip_Lord,options.info_threshold_Phillip_Lord_percentile)
vprint( "Number of annotations after choosing proteins based on Phillip Lord Threshold ", len( data ) )
vprint( "Data discarded ", prev_len - len( data ) )
if(report==1):
report_row.append(countProteins(data))
report_row.append(prev_len)
report_row.append(len(data))
reportdict[eachinputfile].append(report_row)
vprint()
if(options.info_threshold_Wyatt_Clark or options.info_threshold_Wyatt_Clark_percentile!=None):
vprint("Number of annotations before choosing proteins based on Wyatt Clark Threshold ",len(data))
prev_len=len(data)
if(report==1):
report_row=[]
report_row.append("Wyatt Clark Threshold")
report_row.append(countProteins(data))
data=calculateWyattClarkInformationContent(data,int(options.recalculate),options.info_threshold_Wyatt_Clark,options.info_threshold_Wyatt_Clark_percentile,options.aspect,options.output,file_num)
vprint( "Number of annotations after choosing proteins based on Wyatt Clark Threshold ", len( data ) )
vprint( "Data discarded ", prev_len - len( data ) )
if(report==1):
report_row.append(countProteins(data))
report_row.append(prev_len)
report_row.append(len(data))
reportdict[eachinputfile].append(report_row)
vprint()
writeToFile( data, options.output[file_num],options.input[file_num] )
if len(options.input)>1:
combineOutputFiles(options.output,options)
if(options.single_file!=0):
deleteTemporaryFiles(options)
if report==1:
writeReport(options.report,reportdict)
if __name__ == "__main__":
main()
|
Rinoahu/debias
|
lib/debias.py
|
Python
|
gpl-3.0
| 49,496
| 0.022204
|
import argparse
from models import Service
from models import Base
import helpers
import traceback
import sys
import os
import importlib
import shutil
@helpers.handle_dbsession()
def prepare_service_db(sqlsession, name, desc, models, uses_blueprint):
s = sqlsession.query(Service).filter_by(name=name).first()
if s:
print('Service %s exists yet. Aborting.' % name)
return False
if models:
pass
Base.metadata.create_all(helpers.engine, [m.__table__ for m in models])
s = Service(name=name, uses_blueprint=uses_blueprint)
sqlsession.add(s)
sqlsession.commit()
print('Successfully prepared DB new service %s: %s' % (name, desc))
if models:
print('%s contains the following fields:' % name)
for model in models:
print(str(model.__name__))
else:
print('%s contains no fields.' % name)
return True
def validate_service(path):
if os.path.isdir(path):
# servicename = os.path.basename(path)
if not os.path.isfile(os.path.join(path, '__init__.py')):
print('Service contains no __init__.py.')
return False
# m = importlib.import_module('%s' % servicename, '')
# if m.__uses_blueprint__:
# blueprint = os.path.join(path, 'blueprint')
# if not os.path.isdir(blueprint):
# print('Service contains no blueprint. Please place it in the blueprint dir.')
# return False
# if not os.path.isfile(os.path.join(blueprint, '__init__.py')):
# print('Service blueprint contains no __init__.py.')
# return False
# templates = os.path.join(blueprint, 'templates')
# if not os.path.isdir(templates):
# print('Warning: Service blueprint contains no template dir.')
# elif not os.listdir(templates):
# print('Warning: Service blueprint template dir is empty.')
return True
else:
print('%s is not a directory. Please check your input' % path)
return False
def register_service(path):
print('Importing service from %s.' % path)
if validate_service(path):
servicename = os.path.basename(path)
if os.path.isdir(os.path.join('services/', servicename)):
print('Service could not be imported due to a service using the same name existing yet.')
return False
else:
destination = os.path.join('services/', servicename)
try:
shutil.copytree(path, destination)
except Exception as e:
print(e)
traceback.print_tb(sys.exc_info()[2])
shutil.rmtree(destination)
return False
else:
print('Service is faulty, please consult the errors.')
return False
print('Preparing the DB for service %s' % servicename)
try:
m = importlib.import_module('.%s' % servicename, 'services')
if prepare_service_db(m.__service_name__, m.__description__, m.__models__, m.__uses_blueprint__):
print('Successfully prepared DB for service %s' % servicename)
else:
print('Failed to prepare the DB fro service %s', servicename)
return False
except Exception as e:
print(e)
traceback.print_tb(sys.exc_info()[2])
print('Failed to load service %s due to a faulty module' % servicename)
return False
return True
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Service importer')
parser.add_argument('--path',
metavar='url',
type=str,
nargs='+',
help='Path to the service to import')
args = parser.parse_args()
if not args.path or len(args.path) < 1:
print('Please specify at least one service to import')
else:
for p in args.path:
if register_service(p):
print('Successfully registered new service %s' % p)
else:
print('Failed to register service %s' % p)
# prepare_service_db('basics', 'Basic services and commands', (
# ('text', 'txt', Type.text, '.', (('2345', 'adsd'), ('2345', 'adsd'), ('2345', 'adsd'))),
# ('int', 'd', Type.int, '', ()),
# ('bool', 'truefalse', Type.bool, '', ())
# ))
|
Yatekii/backdoor
|
register_service.py
|
Python
|
agpl-3.0
| 4,477
| 0.00134
|
import sys
def suite(n,s):
p = -1
fin = ''
c = 0
for i in range(0,n+1):
if i == n:
if s[i-1]==p:
fin = fin+str(c)+str(p)
else:
fin = fin+str(c)+str(p)
p = s[i]
c = 1
break
if p == -1:
p = s[i]
c = 1
else:
if s[i]==p:
c = c+1
else:
fin = fin+str(c)+str(p)
p = s[i]
c = 1
print fin
return
if __name__ == '__main__':
n = int(raw_input())
s = raw_input()
suite(n,s)
|
Hugoo/Prologin
|
2008 - Machine/suite.py
|
Python
|
mit
| 678
| 0.00885
|
from django import forms
class PutForm(forms.Form):
body = forms.CharField(widget=forms.Textarea())
tube = forms.CharField(initial='default')
priority = forms.IntegerField(initial=2147483648)
delay = forms.IntegerField(initial=0)
ttr = forms.IntegerField(initial=120)
|
andreisavu/django-jack
|
jack/beanstalk/forms.py
|
Python
|
apache-2.0
| 291
| 0.006873
|
#!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import requests
import subprocess
import sys
import time
if len(sys.argv) != 5:
sys.stderr.write('usage: program <github-username> <upstream-remote> <previous-release-branch> <current-release-branch>\n')
sys.stderr.write(" e.g., program myusername upstream 0.13.0-incubating 0.14.0-incubating")
sys.stderr.write(" It is also necessary to set a GIT_TOKEN environment variable containing a personal access token.")
sys.exit(1)
github_username = sys.argv[1]
upstream_remote = sys.argv[2]
previous_branch = sys.argv[3]
release_branch = sys.argv[4]
master_branch = "master"
upstream_master = "{}/{}".format(upstream_remote, master_branch)
upstream_previous = "{}/{}".format(upstream_remote, previous_branch)
upstream_release = "{}/{}".format(upstream_remote, release_branch)
command = "git log {}..{} --oneline | tail -1".format(upstream_master, upstream_previous)
# Find the commit where the previous release branch was cut from master
previous_branch_first_commit = subprocess.check_output(command, shell=True).decode('UTF-8')
match_result = re.match("(\w+) .*", previous_branch_first_commit)
previous_branch_first_commit = match_result.group(1)
print("Previous branch: {}, first commit: {}".format(upstream_previous, previous_branch_first_commit))
# Find all commits between that commit and the current release branch
command = "git rev-list {}..{}".format(previous_branch_first_commit, upstream_release)
all_release_commits = subprocess.check_output(command, shell=True).decode('UTF-8')
for commit_id in all_release_commits.splitlines():
try:
# wait 3 seconds between calls to avoid hitting the rate limit
time.sleep(3)
search_url = "https://api.github.com/search/issues?q=type:pr+is:merged+is:closed+repo:apache/incubator-druid+SHA:{}"
resp = requests.get(search_url.format(commit_id), auth=(github_username, os.environ["GIT_TOKEN"]))
resp_json = resp.json()
milestone_found = False
closed_pr_nums = []
if (resp_json.get("items") is None):
print("Could not get PRs for commit ID {}, resp: {}".format(commit_id, resp_json))
continue
for pr in resp_json["items"]:
closed_pr_nums.append(pr["number"])
milestone = pr["milestone"]
if milestone is not None:
milestone_found = True
print("COMMIT: {}, PR#: {}, MILESTONE: {}".format(commit_id, pr["number"], pr["milestone"]["url"]))
if not milestone_found:
print("NO MILESTONE FOUND FOR COMMIT: {}, CLOSED PRs: {}".format(commit_id, closed_pr_nums))
except Exception as e:
print("Got exception for commitID: {} ex: {}".format(commit_id, e))
continue
|
knoguchi/druid
|
docs/_bin/get-milestone-prs.py
|
Python
|
apache-2.0
| 3,551
| 0.005069
|
# ActivitySim
# See full license in LICENSE.txt.
import sys
import os
import logging
import yaml
import numpy as np
import pandas as pd
from activitysim.abm.models.util import tour_frequency as tf
from activitysim.core.util import reindex
from activitysim.abm.models.util import canonical_ids as cid
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setFormatter(logging.Formatter('%(levelname)s - %(message)s'))
logger.addHandler(ch)
CONSTANTS = {}
SURVEY_TOUR_ID = 'survey_tour_id'
SURVEY_PARENT_TOUR_ID = 'survey_parent_tour_id'
SURVEY_PARTICIPANT_ID = 'survey_participant_id'
SURVEY_TRIP_ID = 'survey_trip_id'
ASIM_TOUR_ID = 'tour_id'
ASIM_PARENT_TOUR_ID = 'parent_tour_id'
ASIM_TRIP_ID = 'trip_id'
ASIM_PARTICIPANT_ID = 'participant_id'
survey_tables = {
'households': {
'file_name': 'survey_households.csv',
'index': 'household_id'
},
'persons': {
'file_name': 'survey_persons.csv',
'index': 'person_id'
},
'tours': {
'file_name': 'survey_tours.csv'
},
'joint_tour_participants': {
'file_name': 'survey_joint_tour_participants.csv'
},
'trips': {
'file_name': 'survey_trips.csv'
},
}
outputs = {
'households': 'override_households.csv',
'persons': 'override_persons.csv',
'tours': 'override_tours.csv',
'joint_tour_participants': 'override_joint_tour_participants.csv',
'trips': 'override_trips.csv',
}
control_tables = {
'households': {
'file_name': 'final_households.csv',
'index': 'household_id'
},
'persons': {
'file_name': 'final_persons.csv',
'index': 'person_id'
},
'tours': {
'file_name': 'final_tours.csv'
},
'joint_tour_participants': {
'file_name': 'final_joint_tour_participants.csv'
},
'trips': {
'file_name': 'final_trips.csv'
},
}
apply_controls = True
skip_controls = not apply_controls
def mangle_ids(ids):
return ids * 10
def unmangle_ids(ids):
return ids // 10
def infer_cdap_activity(persons, tours, joint_tour_participants):
mandatory_tour_types = ['work', 'school']
non_mandatory_tour_types = ['escort', 'shopping', 'othmaint', 'othdiscr', 'eatout', 'social']
num_mandatory_tours = \
tours[tours.tour_type.isin(mandatory_tour_types)].\
groupby('person_id').size().\
reindex(persons.index).fillna(0).astype(np.int8)
num_non_mandatory_tours = \
tours[tours.tour_type.isin(non_mandatory_tour_types)].\
groupby('person_id').size().\
reindex(persons.index).fillna(0).astype(np.int8)
num_joint_tours = \
joint_tour_participants.\
groupby('person_id').size().\
reindex(persons.index).fillna(0).astype(np.int8)
num_non_mandatory_tours += num_joint_tours
cdap_activity = pd.Series('H', index=persons.index)
cdap_activity = cdap_activity.where(num_mandatory_tours == 0, 'M')
cdap_activity = cdap_activity.where((cdap_activity == 'M') | (num_non_mandatory_tours == 0), 'N')
return cdap_activity
def infer_mandatory_tour_frequency(persons, tours):
num_work_tours = \
tours[tours.tour_type == 'work'].\
groupby('person_id').size().reindex(persons.index).fillna(0).astype(np.int8)
num_school_tours = \
tours[tours.tour_type == 'school'].\
groupby('person_id').size().reindex(persons.index).fillna(0).astype(np.int8)
mtf = {
0: '',
1: 'work1',
2: 'work2',
10: 'school1',
20: 'school2',
11: 'work_and_school'
}
mandatory_tour_frequency = (num_work_tours + num_school_tours*10).map(mtf)
return mandatory_tour_frequency
def infer_non_mandatory_tour_frequency(configs_dir, persons, tours):
def read_alts():
# escort,shopping,othmaint,othdiscr,eatout,social
# 0,0,0,0,0,0
# 0,0,0,1,0,0, ...
alts = \
pd.read_csv(os.path.join(configs_dir, 'non_mandatory_tour_frequency_alternatives.csv'),
comment='#')
alts = alts.astype(np.int8) # - NARROW
return alts
tours = tours[tours.tour_category == 'non_mandatory']
alts = read_alts()
tour_types = list(alts.columns.values)
# tour_frequency is index in alts table
alts['alt_id'] = alts.index
# actual tour counts (may exceed counts envisioned by alts)
unconstrained_tour_counts = pd.DataFrame(index=persons.index)
for tour_type in tour_types:
unconstrained_tour_counts[tour_type] = \
tours[tours.tour_type == tour_type].\
groupby('person_id').size().reindex(persons.index).fillna(0).astype(np.int8)
# unextend tour counts
# activitysim extend tours counts based on a probability table
# counts can only be extended if original count is between 1 and 4
# and tours can only be extended if their count is at the max possible
max_tour_counts = alts[tour_types].max(axis=0)
constrained_tour_counts = pd.DataFrame(index=persons.index)
for tour_type in tour_types:
constrained_tour_counts[tour_type] = unconstrained_tour_counts[tour_type].clip(upper=max_tour_counts[tour_type])
# persons whose tours were constrained who aren't eligible for extension becuase they have > 4 constrained tours
has_constrained_tours = (unconstrained_tour_counts != constrained_tour_counts).any(axis=1)
print("%s persons with constrained tours" % (has_constrained_tours.sum()))
too_many_tours = has_constrained_tours & constrained_tour_counts.sum(axis=1) > 4
if too_many_tours.any():
print("%s persons with too many tours" % (too_many_tours.sum()))
print(constrained_tour_counts[too_many_tours])
# not sure what to do about this. Throw out some tours? let them through?
print("not sure what to do about this. Throw out some tours? let them through?")
assert False
# determine alt id corresponding to constrained_tour_counts
# need to do index waltz because pd.merge doesn't preserve index in this case
alt_id = \
pd.merge(constrained_tour_counts.reset_index(), alts,
left_on=tour_types, right_on=tour_types, how='left').set_index(persons.index.name).alt_id
# did we end up with any tour frequencies not in alts?
if alt_id.isna().any():
bad_tour_frequencies = alt_id.isna()
logger.warning("WARNING Bad joint tour frequencies\n\n")
logger.warning("\nWARNING Bad non_mandatory tour frequencies: num_tours\n%s" %
constrained_tour_counts[bad_tour_frequencies])
logger.warning("\nWARNING Bad non_mandatory tour frequencies: num_tours\n%s" %
tours[tours.person_id.isin(persons.index[bad_tour_frequencies])].sort_values('person_id'))
bug
tf = unconstrained_tour_counts.rename(columns={tour_type: '_%s' % tour_type for tour_type in tour_types})
tf['non_mandatory_tour_frequency'] = alt_id
return tf
def infer_joint_tour_frequency(configs_dir, households, tours):
def read_alts():
# right now this file just contains the start and end hour
alts = \
pd.read_csv(os.path.join(configs_dir, 'joint_tour_frequency_alternatives.csv'),
comment='#', index_col='alt')
alts = alts.astype(np.int8) # - NARROW
return alts
alts = read_alts()
tour_types = list(alts.columns.values)
assert(len(alts.index[(alts == 0).all(axis=1)]) == 1) # should be one zero_tours alt
zero_tours_alt = alts.index[(alts == 0).all(axis=1)].values[0]
alts['joint_tour_frequency'] = alts.index
joint_tours = tours[tours.tour_category == 'joint']
num_tours = pd.DataFrame(index=households.index)
for tour_type in tour_types:
joint_tour_is_tour_type = (joint_tours.tour_type == tour_type)
if joint_tour_is_tour_type.any():
num_tours[tour_type] = \
joint_tours[joint_tour_is_tour_type].\
groupby('household_id').size().\
reindex(households.index).fillna(0)
else:
logger.warning("WARNING infer_joint_tour_frequency - no tours of type '%s'" % tour_type)
num_tours[tour_type] = 0
num_tours = num_tours.fillna(0).astype(np.int64)
# need to do index waltz because pd.merge doesn't preserve index in this case
jtf = pd.merge(num_tours.reset_index(), alts, left_on=tour_types, right_on=tour_types, how='left').\
set_index(households.index.name)
if jtf.joint_tour_frequency.isna().any():
bad_tour_frequencies = jtf.joint_tour_frequency.isna()
logger.warning("WARNING Bad joint tour frequencies\n\n")
logger.warning("\nWARNING Bad joint tour frequencies: num_tours\n%s" %
num_tours[bad_tour_frequencies])
logger.warning("\nWARNING Bad joint tour frequencies: num_tours\n%s" %
joint_tours[joint_tours.household_id.isin(households.index[bad_tour_frequencies])])
bug
logger.info("infer_joint_tour_frequency: %s households with joint tours",
(jtf.joint_tour_frequency != zero_tours_alt).sum())
return jtf.joint_tour_frequency
def infer_joint_tour_composition(persons, tours, joint_tour_participants):
"""
assign joint_tours a 'composition' column ('adults', 'children', or 'mixed')
depending on the composition of the joint_tour_participants
"""
joint_tours = tours[tours.tour_category == 'joint'].copy()
joint_tour_participants = \
pd.merge(joint_tour_participants, persons,
left_on='person_id', right_index=True, how='left')
# FIXME - computed by asim annotate persons - not needed if embeded in asim and called just-in-time
if 'adult' not in joint_tour_participants:
joint_tour_participants['adult'] = (joint_tour_participants.age >= 18)
tour_has_adults = \
joint_tour_participants[joint_tour_participants.adult]\
.groupby(SURVEY_TOUR_ID).size()\
.reindex(joint_tours[SURVEY_TOUR_ID]).fillna(0) > 0
tour_has_children = \
joint_tour_participants[~joint_tour_participants.adult]\
.groupby([SURVEY_TOUR_ID]).size()\
.reindex(joint_tours[SURVEY_TOUR_ID]).fillna(0) > 0
assert (tour_has_adults | tour_has_children).all()
joint_tours['composition'] = np.where(tour_has_adults, np.where(tour_has_children, 'mixed', 'adults'), 'children')
return joint_tours.composition.reindex(tours.index).fillna('').astype(str)
def infer_tour_scheduling(configs_dir, tours):
# given start and end periods, infer tdd
def read_tdd_alts():
# right now this file just contains the start and end hour
tdd_alts = pd.read_csv(os.path.join(configs_dir, 'tour_departure_and_duration_alternatives.csv'))
tdd_alts['duration'] = tdd_alts.end - tdd_alts.start
tdd_alts = tdd_alts.astype(np.int8) # - NARROW
tdd_alts['tdd'] = tdd_alts.index
return tdd_alts
tdd_alts = read_tdd_alts()
if not tours.start.isin(tdd_alts.start).all():
print(tours[~tours.start.isin(tdd_alts.start)])
assert tours.start.isin(tdd_alts.start).all(), "not all tour starts in tdd_alts"
assert tours.end.isin(tdd_alts.end).all(), "not all tour starts in tdd_alts"
tdds = pd.merge(tours[['start', 'end']], tdd_alts, left_on=['start', 'end'], right_on=['start', 'end'], how='left')
if tdds.tdd.isna().any():
bad_tdds = tours[tdds.tdd.isna()]
print("Bad tour start/end times:")
print(bad_tdds)
bug
# print("tdd_alts\n%s" %tdd_alts, "\n")
# print("tours\n%s" %tours[['start', 'end']])
# print("tdds\n%s" %tdds)
return tdds.tdd
def patch_tour_ids(persons, tours, joint_tour_participants):
def set_tour_index(tours, parent_tour_num_col, is_joint):
group_cols = ['person_id', 'tour_category', 'tour_type']
if 'parent_tour_num' in tours:
group_cols += ['parent_tour_num']
tours['tour_type_num'] = \
tours.sort_values(by=group_cols).groupby(group_cols).cumcount() + 1
return cid.set_tour_index(tours, parent_tour_num_col=parent_tour_num_col, is_joint=is_joint)
assert 'mandatory_tour_frequency' in persons
# replace survey_tour ids with asim standard tour_ids (which are based on person_id and tour_type)
#####################
# mandatory tours
#####################
mandatory_tours = \
set_tour_index(tours[tours.tour_category == 'mandatory'], parent_tour_num_col=None, is_joint=False)
assert mandatory_tours.index.name == 'tour_id'
#####################
# joint tours
#####################
# joint tours tour_id was assigned based on person_id of the first person in household (PNUM == 1)
# because the actual point person forthe tour is only identified later in joint_tour_participants)
temp_point_persons = persons.loc[persons.PNUM == 1, ['household_id']]
temp_point_persons['person_id'] = temp_point_persons.index
temp_point_persons.set_index('household_id', inplace=True)
# patch person_id with value of temp_point_person_id and use it to set_tour_index
joint_tours = tours[tours.tour_category == 'joint']
joint_tours['cache_point_person_id'] = joint_tours['person_id']
joint_tours['person_id'] = reindex(temp_point_persons.person_id, joint_tours.household_id)
joint_tours = set_tour_index(joint_tours, parent_tour_num_col=None, is_joint=True)
joint_tours['person_id'] = joint_tours['cache_point_person_id']
del joint_tours['cache_point_person_id']
# patch tour_id column in patched_joint_tour_participants
patched_joint_tour_participants = joint_tour_participants.copy()
asim_tour_id = pd.Series(joint_tours.index, index=joint_tours[SURVEY_TOUR_ID])
patched_joint_tour_participants[ASIM_TOUR_ID] = \
reindex(asim_tour_id, patched_joint_tour_participants[SURVEY_TOUR_ID])
# participant_id is formed by combining tour_id and participant pern.PNUM
# pathological knowledge, but awkward to conflate with joint_tour_participation.py logic
participant_pnum = reindex(persons.PNUM, patched_joint_tour_participants['person_id'])
patched_joint_tour_participants[ASIM_PARTICIPANT_ID] = \
(patched_joint_tour_participants[ASIM_TOUR_ID] * cid.MAX_PARTICIPANT_PNUM) + participant_pnum
#####################
# non_mandatory tours
#####################
non_mandatory_tours = \
set_tour_index(tours[tours.tour_category == 'non_mandatory'], parent_tour_num_col=None, is_joint=False)
#####################
# atwork tours
#####################
atwork_tours = tours[tours.tour_category == 'atwork']
# patch atwork tours parent_tour_id before assigning their tour_id
# tours for workers with both work and school trips should have lower tour_num for work,
# tours for students with both work and school trips should have lower tour_num for school
# tours are already sorted, but schools comes before work (which is alphabetical, not the alternative id order),
# so work_and_school tour_nums are correct for students (school=1, work=2) but workers need to be flipped
mandatory_tour_frequency = \
reindex(persons.mandatory_tour_frequency, mandatory_tours.person_id)
is_worker = \
reindex(persons.pemploy, mandatory_tours.person_id).\
isin([CONSTANTS['PEMPLOY_FULL'], CONSTANTS['PEMPLOY_PART']])
work_and_school_and_worker = (mandatory_tour_frequency == 'work_and_school') & is_worker
# calculate tour_num for work tours (required to set_tour_index for atwork subtours)
parent_tours = mandatory_tours[[SURVEY_TOUR_ID]]
parent_tours['tour_num'] = \
mandatory_tours.\
sort_values(by=['person_id', 'tour_category', 'tour_type']).\
groupby(['person_id', 'tour_category']).cumcount() + 1
parent_tours.tour_num = parent_tours.tour_num.where(~work_and_school_and_worker, 3 - parent_tours.tour_num)
parent_tours = parent_tours.set_index(SURVEY_TOUR_ID, drop=True)
# temporarily add parent_tour_num column to atwork tours, call set_tour_index, and then delete it
atwork_tours['parent_tour_num'] = reindex(parent_tours.tour_num, atwork_tours[SURVEY_PARENT_TOUR_ID])
atwork_tours = set_tour_index(atwork_tours, parent_tour_num_col='parent_tour_num', is_joint=False)
del atwork_tours['parent_tour_num']
# tours['household_id'] = reindex(persons.household_id, tours.person_id)
asim_tour_id = pd.Series(mandatory_tours.index, index=mandatory_tours[SURVEY_TOUR_ID])
atwork_tours[ASIM_PARENT_TOUR_ID] = reindex(asim_tour_id, atwork_tours[SURVEY_PARENT_TOUR_ID])
#####################
# concat tours
#####################
# only true for fake data
assert (mandatory_tours.index == unmangle_ids(mandatory_tours[SURVEY_TOUR_ID])).all()
assert (joint_tours.index == unmangle_ids(joint_tours[SURVEY_TOUR_ID])).all()
assert (non_mandatory_tours.index == unmangle_ids(non_mandatory_tours[SURVEY_TOUR_ID])).all()
patched_tours = pd.concat([mandatory_tours, joint_tours, non_mandatory_tours, atwork_tours])
assert patched_tours.index.name == ASIM_TOUR_ID
patched_tours = patched_tours.reset_index()
del patched_tours['tour_type_num']
assert ASIM_TOUR_ID in patched_tours
assert ASIM_PARENT_TOUR_ID in patched_tours
return patched_tours, patched_joint_tour_participants
def infer_atwork_subtour_frequency(configs_dir, tours):
# first column is 'atwork_subtour_frequency' nickname, remaining columns are trip type counts
alts = pd.read_csv(os.path.join(configs_dir, 'atwork_subtour_frequency_alternatives.csv'), comment='#')
tour_types = list(alts.drop(columns=alts.columns[0]).columns) # get trip_types, ignoring first column
alts['alt_id'] = alts.index
# alt eat business maint alt_id
# 0 no_subtours 0 0 0 0
# 1 eat 1 0 0 1
# 2 business1 0 1 0 2
# 3 maint 0 0 1 3
# 4 business2 0 2 0 4
# 5 eat_business 1 1 0 5
work_tours = tours[tours.tour_type == 'work']
work_tours = work_tours[[ASIM_TOUR_ID]]
subtours = tours[tours.tour_category == 'atwork']
subtours = subtours[['tour_id', 'tour_type', 'parent_tour_id']]
# actual tour counts (may exceed counts envisioned by alts)
tour_counts = pd.DataFrame(index=work_tours[ASIM_TOUR_ID])
for tour_type in tour_types:
# count subtours of this type by parent_tour_id
tour_type_count = subtours[subtours.tour_type == tour_type].groupby('parent_tour_id').size()
# backfill with 0 count
tour_counts[tour_type] = tour_type_count.reindex(tour_counts.index).fillna(0).astype(np.int8)
# determine alt id corresponding to constrained_tour_counts
# need to do index waltz because pd.merge doesn't preserve index in this case
tour_counts = \
pd.merge(tour_counts.reset_index(), alts,
left_on=tour_types, right_on=tour_types, how='left').set_index(tour_counts.index.name)
atwork_subtour_frequency = tour_counts.alt
# did we end up with any tour frequencies not in alts?
if atwork_subtour_frequency.isna().any():
bad_tour_frequencies = atwork_subtour_frequency.isna()
logger.warning("WARNING Bad atwork subtour frequencies for %s work tours" % bad_tour_frequencies.sum())
logger.warning("WARNING Bad atwork subtour frequencies: num_tours\n%s" %
tour_counts[bad_tour_frequencies])
logger.warning("WARNING Bad atwork subtour frequencies: num_tours\n%s" %
subtours[subtours.parent_tour_id.isin(tour_counts[bad_tour_frequencies].index)].
sort_values('parent_tour_id'))
bug
atwork_subtour_frequency = reindex(atwork_subtour_frequency, tours[ASIM_TOUR_ID]).fillna('')
return atwork_subtour_frequency
def patch_trip_ids(tours, trips):
"""
replace survey trip_ids with asim standard trip_id
replace survey tour_id foreign key with asim standard tour_id
"""
# tour_id is a column, not index
assert ASIM_TOUR_ID in tours
# patch tour_id foreign key
# tours['household_id'] = reindex(persons.household_id, tours.person_id)
asim_tour_id = pd.Series(tours[ASIM_TOUR_ID].values, index=tours[SURVEY_TOUR_ID].values)
trips[ASIM_TOUR_ID] = reindex(asim_tour_id, trips[SURVEY_TOUR_ID])
# person_is_university = persons.pstudent == constants.PSTUDENT_UNIVERSITY
# tour_is_university = reindex(person_is_university, tours.person_id)
# tour_primary_purpose = tours.tour_type.where((tours.tour_type != 'school') | ~tour_is_university, 'univ')
# tour_primary_purpose = tour_primary_purpose.where(tours.tour_category!='atwork', 'atwork')
#
# trips['primary_purpose'] = reindex(tour_primary_purpose, trips.tour_id)
# if order is ambiguous if trips depart in same time slot - order by SURVEY_TRIP_ID hoping that increases with time
if 'trip_num' not in trips:
trips['trip_num'] = \
trips.sort_values(by=['tour_id', 'outbound', 'depart', SURVEY_TRIP_ID]).\
groupby(['tour_id', 'outbound']).\
cumcount() + 1
cid.set_trip_index(trips)
assert trips.index.name == ASIM_TRIP_ID
trips = trips.reset_index().rename(columns={'trip_id': ASIM_TRIP_ID})
return trips
def infer_stop_frequency(configs_dir, tours, trips):
# alt,out,in
# 0out_0in,0,0
# 0out_1in,0,1
# ...
alts = pd.read_csv(os.path.join(configs_dir, 'stop_frequency_alternatives.csv'), comment='#')
assert 'alt' in alts
assert 'in' in alts
assert 'out' in alts
freq = pd.DataFrame(index=tours[SURVEY_TOUR_ID])
# number of trips is one less than number of stops
freq['out'] = trips[trips.outbound].groupby(SURVEY_TOUR_ID).trip_num.max() - 1
freq['in'] = trips[~trips.outbound].groupby(SURVEY_TOUR_ID).trip_num.max() - 1
freq = pd.merge(freq.reset_index(), alts, on=['out', 'in'], how='left')
assert (freq[SURVEY_TOUR_ID] == tours[SURVEY_TOUR_ID]).all()
return freq.alt
def read_tables(input_dir, tables):
for table, info in tables.items():
table = pd.read_csv(os.path.join(input_dir, info['file_name']), index_col=info.get('index'))
# coerce missing data in string columns to empty strings, not NaNs
for c in table.columns:
# read_csv converts empty string to NaN, even if all non-empty values are strings
if table[c].dtype == 'object':
print("##### converting", c, table[c].dtype)
table[c] = table[c].fillna('').astype(str)
info['table'] = table
households = tables['households'].get('table')
persons = tables['persons'].get('table')
tours = tables['tours'].get('table')
joint_tour_participants = tables['joint_tour_participants'].get('table')
trips = tables['trips'].get('table')
return households, persons, tours, joint_tour_participants, trips
def check_controls(table_name, column_name):
table = survey_tables[table_name].get('table')
c_table = control_tables[table_name].get('table')
if column_name == 'index':
dont_match = (table.index != c_table.index)
else:
dont_match = (table[column_name] != c_table[column_name])
if dont_match.any():
print("check_controls %s.%s: %s out of %s do not match" %
(table_name, column_name, dont_match.sum(), len(table)))
print("control\n%s" % c_table[dont_match][[column_name]])
print("survey\n%s" % table[dont_match][[column_name]])
print("control\n%s" % c_table[dont_match][table.columns])
print("survey\n%s" % table[dont_match][table.columns])
return False
return True
def infer(configs_dir, input_dir, output_dir):
households, persons, tours, joint_tour_participants, trips = read_tables(input_dir, survey_tables)
# be explicit about all tour_ids to avoid confusion between asim and survey ids
tours = tours.rename(columns={'tour_id': SURVEY_TOUR_ID, 'parent_tour_id': SURVEY_PARENT_TOUR_ID})
joint_tour_participants = \
joint_tour_participants.rename(columns={'tour_id': SURVEY_TOUR_ID, 'participant_id': SURVEY_PARTICIPANT_ID})
trips = trips.rename(columns={'trip_id': SURVEY_TRIP_ID, 'tour_id': SURVEY_TOUR_ID})
# mangle survey tour ids to keep us honest
tours[SURVEY_TOUR_ID] = mangle_ids(tours[SURVEY_TOUR_ID])
tours[SURVEY_PARENT_TOUR_ID] = mangle_ids(tours[SURVEY_PARENT_TOUR_ID])
joint_tour_participants[SURVEY_TOUR_ID] = mangle_ids(joint_tour_participants[SURVEY_TOUR_ID])
joint_tour_participants[SURVEY_PARTICIPANT_ID] = mangle_ids(joint_tour_participants[SURVEY_PARTICIPANT_ID])
trips[SURVEY_TRIP_ID] = mangle_ids(trips[SURVEY_TRIP_ID])
trips[SURVEY_TOUR_ID] = mangle_ids(trips[SURVEY_TOUR_ID])
# persons.cdap_activity
persons['cdap_activity'] = infer_cdap_activity(persons, tours, joint_tour_participants)
# check but don't assert as this is not deterministic
skip_controls or check_controls('persons', 'cdap_activity')
# persons.mandatory_tour_frequency
persons['mandatory_tour_frequency'] = infer_mandatory_tour_frequency(persons, tours)
assert skip_controls or check_controls('persons', 'mandatory_tour_frequency')
# persons.non_mandatory_tour_frequency
tour_frequency = infer_non_mandatory_tour_frequency(configs_dir, persons, tours)
for c in tour_frequency.columns:
print("assigning persons", c)
persons[c] = tour_frequency[c]
assert skip_controls or check_controls('persons', 'non_mandatory_tour_frequency')
# patch_tour_ids
tours, joint_tour_participants = patch_tour_ids(persons, tours, joint_tour_participants)
survey_tables['tours']['table'] = tours
survey_tables['joint_tour_participants']['table'] = joint_tour_participants
assert skip_controls or check_controls('tours', 'index')
assert skip_controls or check_controls('joint_tour_participants', 'index')
# patch_tour_ids
trips = patch_trip_ids(tours, trips)
survey_tables['trips']['table'] = trips # so we can check_controls
assert skip_controls or check_controls('trips', 'index')
# households.joint_tour_frequency
households['joint_tour_frequency'] = infer_joint_tour_frequency(configs_dir, households, tours)
assert skip_controls or check_controls('households', 'joint_tour_frequency')
# tours.composition
tours['composition'] = infer_joint_tour_composition(persons, tours, joint_tour_participants)
assert skip_controls or check_controls('tours', 'composition')
# tours.tdd
tours['tdd'] = infer_tour_scheduling(configs_dir, tours)
assert skip_controls or check_controls('tours', 'tdd')
tours['atwork_subtour_frequency'] = infer_atwork_subtour_frequency(configs_dir, tours)
assert skip_controls or check_controls('tours', 'atwork_subtour_frequency')
tours['stop_frequency'] = infer_stop_frequency(configs_dir, tours, trips)
assert skip_controls or check_controls('tours', 'stop_frequency')
# write output files
households.to_csv(os.path.join(output_dir, outputs['households']), index=True)
persons.to_csv(os.path.join(output_dir, outputs['persons']), index=True)
tours.to_csv(os.path.join(output_dir, outputs['tours']), index=False)
joint_tour_participants.to_csv(os.path.join(output_dir, outputs['joint_tour_participants']), index=False)
trips.to_csv(os.path.join(output_dir, outputs['trips']), index=False)
# python infer.py data
args = sys.argv[1:]
assert len(args) == 2, "usage: python infer.py <data_dir> <configs_dir>"
data_dir = args[0]
configs_dir = args[1]
with open(os.path.join(configs_dir, 'constants.yaml')) as stream:
CONSTANTS = yaml.load(stream, Loader=yaml.SafeLoader)
input_dir = os.path.join(data_dir, 'survey_data/')
output_dir = input_dir
if apply_controls:
read_tables(input_dir, control_tables)
infer(configs_dir, input_dir, output_dir)
|
synthicity/activitysim
|
activitysim/examples/example_estimation/scripts/infer.py
|
Python
|
agpl-3.0
| 28,331
| 0.003565
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
You can run this example like this:
.. code:: console
$ rm -rf '/tmp/bar'
$ luigi --module examples.foo examples.Foo --workers 2 --local-scheduler
"""
from __future__ import print_function
import time
import luigi
class Foo(luigi.WrapperTask):
task_namespace = 'examples'
def run(self):
print("Running Foo")
def requires(self):
for i in range(10):
yield Bar(i)
class Bar(luigi.Task):
task_namespace = 'examples'
num = luigi.IntParameter()
def run(self):
time.sleep(1)
self.output().open('w').close()
def output(self):
"""
Returns the target output for this task.
:return: the target output for this task.
:rtype: object (:py:class:`~luigi.target.Target`)
"""
time.sleep(1)
return luigi.LocalTarget('/tmp/bar/%d' % self.num)
|
samuell/luigi
|
examples/foo.py
|
Python
|
apache-2.0
| 1,501
| 0.000666
|
from __future__ import absolute_import
from scrapy.spiders import Spider
from scrapy.selector import Selector
from scrapy import Request
import sys
from Schoogle.items import O_Item
from sys import getsizeof
from datetime import datetime
import time
import re
mport string
def reduce(text):
return "".join([c for c in text if c in string.letters or c in (" ",)])
#return re.sub('\s+',' ', re.sub(r'([^\s\w]|_)+', '', text))
#@params:
#@html_list: this is list of html in a "List"(aka vector), we replace all of those annoying
# \t and \n's in clunkey html and return a string with the pages entire html contents,
# this object will later be used by postgreql for a full text search.
def prune(html_list):
toreturn = []
for i in html_list:
t = i.encode('ascii','ignore')
t = reduce(t)
if t != '' or ' ':
toreturn.append(t)
return " ".join(toreturn)
class O_Spider(Spider):
name = 'O_Spider'
allowed_domains = ['owu.edu']
start_urls = ['http://www.owu.edu']
# @params
# @response: this is a Scrapy.Response object containing much of the website information
# attibutes of this object will be used to flesh out our O_Item object
# @yield(1): this returns a single object each time next( this object ) is called
# first parse yields all items
# @yield(2): this is completed only after we have yielded an object from this webpage, it will
# recursively call parse on all links in a web page
def parse(self,response):
# here we use scrapy's request object to catch all invalid links when parsing our documnet
try:
links = response.xpath('//@href').extract()
for link in links:
try:
req = Request(link,callback = self.parse)
except ValueError:
pass # might want to log these eventually
except AttributeError:
pass # log these eventually
# fill up item with statistics
current_item = O_Item()
current_item['url'] = response.url
try:
current_item['title'] = response.xpath('//title/text()').extract()
current_item['timestamp'] = datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
current_item['page_size'] = getsizeof(response.body)
current_item['full_html'] = response.body_as_unicode() # not sure if we really want this..
current_item['full_text'] = " ".join(prune(response.xpath('//text()').extract()))
current_item['secure'] = 'https' in str(response.request)
current_item['links'] = links
yield current_item
except Exception as e:
print "______________________________________________________________"
print " ERROR THROW ON ITEM YIELD"
print e
pass
# recursive page search is below, this must happen after the item is pipelined to postgresql
# this is where we yield a requests object with parse as the callback and the real recursion kicks ins
try:
for link in response.xpath('//@href').extract():
try:
req = Request(link,callback = self.parse)
yield req
except ValueError:
pass # might want to log these eventually
except AttributeError:
pass # log these eventually
|
dwt2c/Schoogle
|
Schoogle/Schoogle/spiders/O_Spider.py
|
Python
|
gpl-2.0
| 3,037
| 0.029964
|
import math
curr = 0
goal = 1000000
potential_nums = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
output_num = []
if __name__ == '__main__':
for i in xrange(10, 0, -1):
print (curr, i, "outer loop")
for j in xrange(i + 1):
print (curr, j, "inner loop")
temp = math.factorial(i - 1) * j + curr
if temp >= goal:
print (temp)
curr += (math.factorial(i - 1) * (j-1))
print (curr, goal, i, j)
output_num.append(potential_nums[j-1])
potential_nums.remove(potential_nums[j-1])
break
print output_num
# SOLVED : 2783915460
|
kylebegovich/ProjectEuler
|
Python/Solved/Page1/Problem24.py
|
Python
|
gpl-3.0
| 663
| 0.006033
|
import collections
import difflib
import inspect
import logging
import os.path
import warnings
import os
import importlib
import cherrypy
import yaml
from yaml import load
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
json = None
for pkg in ['ujson', 'yajl', 'simplejson', 'cjson', 'json']:
try:
json = importlib.import_module(pkg)
except:
pass
else:
break
from blueberrypy.email import Mailer
from blueberrypy.exc import (BlueberryPyNotConfiguredError,
BlueberryPyConfigurationError)
logger = logging.getLogger(__name__)
class BlueberryPyConfiguration(object):
class _YAMLLoader(Loader):
"""YAML loader supporting additional tags."""
def __init__(self, *args, **kwargs):
super(BlueberryPyConfiguration._YAMLLoader, self).__init__(*args, **kwargs)
self._setup_loader()
def register_tag(self, tag, callback):
yaml.add_constructor(tag, callback, Loader=self.__class__)
def _tag_env_var(self, loader, node):
env_var_name = loader.construct_scalar(node)
return os.getenv(env_var_name)
def _tag_first_of(self, loader, node):
seq = loader.construct_sequence(node)
for v in seq:
if v is not None:
return v
raise yaml.YAMLError('At least one of values passed to !FirstOf tag must be not None')
def _setup_loader(self):
self.register_tag('!EnvVar', self._tag_env_var)
self.register_tag('!FirstOf', self._tag_first_of)
def __init__(self, config_dir=None, app_config=None, logging_config=None,
webassets_env=None, environment=None,
env_var_name='BLUEBERRYPY_CONFIG'):
"""Loads BlueberryPy configuration from `config_dir` if supplied.
If `app_config` or `logging_config` or `webassets_env` are given, they
will be used instead of the configuration files found from `config_dir`.
If `environment` is given, it must be an existing CherryPy environment.
If `environment` is `production`, and `config_dir` is given, the `prod`
subdirectory will be searched for configuration files, otherwise the
`dev` subdirectory` will be searched.
If `env_var_name` is given, it must be an existing environment
variable, it will override values from YAML config.
Upon initialization of this configuration object, all the configuration
will be validated for sanity and either BlueberryPyConfigurationError or
BlueberryPyNotConfiguredError will be thrown if insane. For less severe
configuration insanity cases, a warning will be emitted instead.
:arg config_dir: a path, str
:arg app_config: a CherryPy config, dict
:arg logging_config: a logging config, dict
:arg webassets_env: a webassets environment, webassets.Environment
:arg environment: a CherryPy configuration environment, str
:arg env_var_name: an environment variable name for configuration, str
"""
ENV_CONFIG = self.__class__._load_env_var(env_var_name)
CWD = os.getcwdu() if getattr(os, "getcwdu", None) else os.getcwd()
if ENV_CONFIG.get('global', {}).get('CWD') and \
os.path.isdir(
os.path.join(ENV_CONFIG['global']['CWD'], 'src')):
CWD = ENV_CONFIG['global']['CWD']
if config_dir is None:
self.config_dir = config_dir = os.path.join(CWD, "config")
else:
self.config_dir = config_dir = os.path.abspath(config_dir)
if environment == "production":
self.config_dir = config_dir = os.path.join(config_dir, "prod")
elif environment == "test_suite" and os.path.exists(os.path.join(config_dir, "test")):
self.config_dir = config_dir = os.path.join(config_dir, "test")
else:
self.config_dir = config_dir = os.path.join(config_dir, "dev")
config_file_paths = {}
app_yml_path = os.path.join(config_dir, "app.yml")
logging_yml_path = os.path.join(config_dir, "logging.yml")
bundles_yml_path = os.path.join(config_dir, "bundles.yml")
# A local-only config, which overrides the app.yml values
app_override_yml_path = os.path.join(config_dir, "app.override.yml")
if os.path.exists(app_yml_path):
config_file_paths["app_yml"] = app_yml_path
if os.path.exists(logging_yml_path):
config_file_paths["logging_yml"] = logging_yml_path
if os.path.exists(bundles_yml_path):
config_file_paths["bundles_yml"] = bundles_yml_path
if os.path.exists(app_override_yml_path):
config_file_paths["app_override_yml"] = app_override_yml_path
self._config_file_paths = config_file_paths
if "app_yml" in config_file_paths and not app_config:
with open(config_file_paths["app_yml"]) as app_yml:
self._app_config = load(app_yml, self._YAMLLoader)
# If the overrides file exists, override the app config values
# with ones from app.override.yml
if "app_override_yml" in config_file_paths:
app_override_config = {}
with open(config_file_paths["app_override_yml"]) as app_override_yml:
app_override_config = load(app_override_yml, self._YAMLLoader)
self._app_config = self.__class__.merge_dicts(
self._app_config,
app_override_config
)
if "logging_yml" in config_file_paths and not logging_config:
with open(config_file_paths["logging_yml"]) as logging_yml:
self._logging_config = load(logging_yml, self._YAMLLoader)
if "bundles_yml" in config_file_paths and not webassets_env:
from webassets.loaders import YAMLLoader
self._webassets_env = YAMLLoader(config_file_paths["bundles_yml"]).load_environment()
if app_config:
self._app_config = dict(app_config)
try:
# Merge JSON from environment variable
self._app_config = self.__class__.merge_dicts(self._app_config, ENV_CONFIG)
except AttributeError:
if ENV_CONFIG: # not an empty dict
self._app_config = ENV_CONFIG
# Don't re-raise exception, self.validate() will do this later
if logging_config:
self._logging_config = dict(logging_config)
if webassets_env is not None:
self._webassets_env = webassets_env
self.validate() # Checks that all attributes are pre-populated
# Convert relative paths to absolute where needed
# self.validate() will fail if there's no app_config['controllers']
for _ in self._app_config['controllers']:
section = self._app_config['controllers'][_]
for r in section:
if isinstance(section[r], dict):
for __ in ['tools.staticdir.root',
'tools.staticfile.root']:
pth = section[r].get(__)
if pth is not None and not pth.startswith('/'):
self._app_config['controllers'][_][r][__] = \
os.path.join(CWD, pth)
# Convert relative paths of logs in handlers
# self.validate() will fail if there's no self._logging_config
for handler_name, handler_config in (getattr(self, '_logging_config', {}) or {}).get('handlers', {}).viewitems():
pth = handler_config.get('filename')
if pth is not None and not pth.startswith('/'):
self._logging_config['handlers'][handler_name]['filename'] = \
os.path.join(CWD, pth)
if environment == "backlash":
self.setup_backlash_environment()
@property
def config_file_paths(self):
if self._config_file_paths:
sorted_kv_pairs = tuple(((k, self._config_file_paths[k])
for k in sorted(self._config_file_paths.viewkeys())))
paths = collections.namedtuple("config_file_paths", [e[0] for e in sorted_kv_pairs])
return paths(*[e[1] for e in sorted_kv_pairs])
@property
def project_metadata(self):
return self.app_config["project_metadata"]
@property
def use_logging(self):
return self.app_config.get("global", {}).get("engine.logging.on", False)
@property
def use_redis(self):
if self.controllers_config:
for _, controller_config in self.controllers_config.viewitems():
controller_config = controller_config.copy()
controller_config.pop("controller")
for path_config in controller_config.viewvalues():
if path_config.get("tools.sessions.storage_type") == "redis":
return True
return False
@property
def use_sqlalchemy(self):
return self.app_config.get("global", {}).get("engine.sqlalchemy.on", False)
@property
def use_jinja2(self):
return "jinja2" in self.app_config
@property
def use_webassets(self):
return self.use_jinja2 and self.app_config["jinja2"].get("use_webassets", False)
@property
def use_email(self):
return "email" in self.app_config
@property
def controllers_config(self):
return self.app_config.get("controllers")
@property
def app_config(self):
return self._app_config
@property
def logging_config(self):
return getattr(self, "_logging_config", None)
@property
def webassets_env(self):
return getattr(self, "_webassets_env", None)
@property
def jinja2_config(self):
if self.use_jinja2:
conf = self.app_config["jinja2"].copy()
conf.pop("use_webassets", None)
return conf
@property
def sqlalchemy_config(self):
if self.use_sqlalchemy:
if "sqlalchemy_engine" in self.app_config:
saconf = self.app_config["sqlalchemy_engine"].copy()
return {"sqlalchemy_engine": saconf}
else:
return dict([(k, v) for k, v in self.app_config.viewitems()
if k.startswith("sqlalchemy_engine")])
@property
def email_config(self):
return self.app_config.get("email")
def setup_backlash_environment(self):
"""
Returns a new copy of this configuration object configured to run under
the backlash defbugger environment and ensure it is created for
cherrypy's config object.
"""
try:
from backlash import DebuggedApplication
except ImportError:
warnings.warn("backlash not installed")
return
cherrypy._cpconfig.environments["backlash"] = {
"log.wsgi": True,
"request.throw_errors": True,
"log.screen": False,
"engine.autoreload_on": False
}
def remove_error_options(section):
section.pop("request.handler_error", None)
section.pop("request.error_response", None)
section.pop("tools.err_redirect.on", None)
section.pop("tools.log_headers.on", None)
section.pop("tools.log_tracebacks.on", None)
for k in section.copy().viewkeys():
if k.startswith("error_page.") or \
k.startswith("request.error_page."):
section.pop(k)
for section_name, section in self.app_config.viewitems():
if section_name.startswith("/") or section_name == "global":
remove_error_options(section)
wsgi_pipeline = []
if "/" in self.app_config:
wsgi_pipeline = self.app_config["/"].get("wsgi.pipeline", [])
else:
self.app_config["/"] = {}
wsgi_pipeline.insert(0, ("backlash", DebuggedApplication))
self.app_config["/"]["wsgi.pipeline"] = wsgi_pipeline
def validate(self):
# no need to check for cp config, which will be checked on startup
if not hasattr(self, "_app_config") or not self.app_config:
raise BlueberryPyNotConfiguredError("BlueberryPy application configuration not found.")
if self.use_sqlalchemy and not self.sqlalchemy_config:
raise BlueberryPyNotConfiguredError("SQLAlchemy configuration not found.")
if self.use_webassets:
if self.webassets_env is None:
raise BlueberryPyNotConfiguredError("Webassets configuration not found.")
elif len(self.webassets_env) == 0:
raise BlueberryPyNotConfiguredError("No bundles found in webassets env.")
if self.use_jinja2 and not self.jinja2_config:
raise BlueberryPyNotConfiguredError("Jinja2 configuration not found.")
if self.use_logging and not self.logging_config:
warnings.warn("BlueberryPy application-specific logging "
"configuration not found. Continuing without "
"BlueberryPy's logging plugin.")
if self.use_email:
if not self.email_config:
warnings.warn("BlueberryPy email configuration is empty.")
else:
try:
signature = inspect.signature(Mailer.__init__)
argnames = frozenset(signature.parameters.keys()[1:])
except AttributeError:
mailer_ctor_argspec = inspect.getargspec(Mailer.__init__)
argnames = frozenset(mailer_ctor_argspec.args[1:])
for key in self.email_config.viewkeys():
if key not in argnames:
closest_match = difflib.get_close_matches(key, argnames, 1)
closest_match = ((closest_match and " Did you mean %r?" % closest_match[0])
or "")
warnings.warn(("Unknown key %r found for [email]." % key) + closest_match)
if not self.controllers_config:
raise BlueberryPyConfigurationError("You must declare at least one controller.")
else:
for script_name, section in self.controllers_config.viewitems():
controller = section.get("controller")
if controller is None:
raise BlueberryPyConfigurationError("You must define a controller in the "
"[controllers][%s] section." % script_name)
elif isinstance(controller, cherrypy.dispatch.RoutesDispatcher):
if not controller.controllers:
warnings.warn("Controller %r has no connected routes." % script_name)
else:
for member_name, member_obj in inspect.getmembers(controller):
if member_name == "exposed" and member_obj:
break
elif (hasattr(member_obj, "exposed") and
member_obj.exposed is True):
break
else:
warnings.warn("Controller %r has no exposed method." % script_name)
@classmethod
def _load_env_var(cls, env_var_name):
env_conf = {}
try:
env_conf = json.loads(os.getenv(env_var_name),
object_hook=cls._callable_json_loader)
except ValueError:
# Don't use simplejson.JSONDecodeError, since it only exists in
# simplejson implementation and is a subclass of ValueError
# See: https://github.com/Yelp/mrjob/issues/544
logger.error('${} is not a valid JSON string!'
.format(env_var_name))
except TypeError:
logger.warning('${} environment variable is not set!'
.format(env_var_name))
except:
logger.exception('Could not parse ${} environment variable for an '
'unknown reason!'.format(env_var_name))
return env_conf
@staticmethod
def get_callable_from_str(s):
python_module, python_name = s.rsplit('.', 1)
return getattr(importlib.import_module(python_module), python_name)
@classmethod
def _callable_json_loader(cls, obj):
if isinstance(obj, str):
if obj.startswith('!!python/name:'):
cllbl = cls.get_callable_from_str(obj.split(':', 1)[-1])
return cllbl if callable(cllbl) else obj
if isinstance(obj, dict):
keys = tuple(filter(lambda _: _.startswith('!!python/object:'),
obj.keys()))
for k in keys:
cllbl = cls.get_callable_from_str(k.split(':', 1)[-1])
return cllbl(**obj[k]) if callable(cllbl) else obj
return obj
@classmethod
def merge_dicts(cls, base, overrides):
'''Recursive helper for merging of two dicts'''
for k in overrides.keys():
if k in base:
if isinstance(base[k], dict) and isinstance(overrides[k], dict):
base[k] = cls.merge_dicts(base[k], overrides[k])
elif isinstance(overrides[k], list) and \
not isinstance(base[k], list):
base[k] = [base[k]] + overrides[k]
elif isinstance(base[k], list) and \
not isinstance(overrides[k], list):
base[k] = base[k] + [overrides[k]]
elif not isinstance(base[k], dict):
base[k] = overrides[k]
else:
base[k].update(overrides[k])
else:
base[k] = overrides[k]
return base
|
open-craft-guild/blueberrypy
|
src/blueberrypy/config.py
|
Python
|
bsd-3-clause
| 18,173
| 0.002091
|
import argparse
import sys
import os
from annotated_set import loadData
from data_structures import CanonicalDerivation
from canonical_parser import CanonicalParser
from derivation_tree import DerivationTree
from conversion.ghkm2tib import ghkm2tib
#from lib.amr.dag import Dag
class ExtractorCanSem:
def __init__(self):
pass
@classmethod
def help(self):
"""
Returns CanSem help message.
"""
return ExtractorCanSem.main(ExtractorCanSem(),"--help")
def main(self, *args):
parser = argparse.ArgumentParser(description='CanSem Extraction Algorithm for SHRG',
fromfile_prefix_chars='@',
prog='%s extract-cansem'%sys.argv[0])
parser.add_argument('nl_file', type=str, help="Natural Language File")
parser.add_argument('mr_file', type=str, help="Meaning Representation File")
parser.add_argument('alignment_file', type=str, help="Alignment File")
parser.add_argument('--ghkmDir', nargs='?', default='/home/kmh/Files/Tools/stanford-ghkm-2010-03-08', help="GHKM directory")
parser.add_argument('--tiburonLoc', nargs='?', default='/home/kmh/Files/Tools/newtib/tiburon', help="Tiburon executable file")
parser.add_argument('--prefix', nargs='?', default=False, help="Suffix for temporary and output files")
args = parser.parse_args(args=args)
if args.prefix == False:
args.prefix = "test"
args.parse_path = "%s.ptb"%args.prefix
args.align_path = "%s.a"%args.prefix
args.text_path = "%s.f"%args.prefix
args.ghkm_path = "%s.ghkm"%args.prefix
args.tib_path = "%s.tib"%args.prefix
# load input data into AnnotatedSet
data = loadData(args.nl_file,args.mr_file,args.alignment_file)
derivations = []
for sentence in data:
# Extraction
parser = CanonicalParser(sentence)
if len(parser.derivs_done) > 0:
derivations.append((sentence,parser.derivs_done[0]))
print len(derivations)
self.genGHKMfiles(args,derivations)
def genGHKMfiles(self,args,derivations):
parse_file = open(args.parse_path,'w')
align_file = open(args.align_path,'w')
text_file = open(args.text_path,'w')
for s,d in derivations:
x = DerivationTree.fromDerivation(d)
parse,align = x.getGHKMtriple_Java()
text = s["nl"].strip(' \t\n\r')
parse_file.write("%s\n"%parse)
align_file.write("%s\n"%align)
text_file.write("%s\n"%text)
parse_file.close()
align_file.close()
text_file.close()
print "Running GHKM Java rule extraction"
mem = "2g"
ghkm_opts = "-fCorpus %s -eParsedCorpus %s -align %s -joshuaFormat false -maxLHS 200 -maxRHS 15 -MaxUnalignedRHS 15" % (args.text_path,args.parse_path,args.align_path)
java_opts="-Xmx%s -Xms%s -cp %s/ghkm.jar:%s/lib/fastutil.jar -XX:+UseCompressedOops"%(mem,mem,args.ghkmDir,args.ghkmDir)
os.system("java %s edu.stanford.nlp.mt.syntax.ghkm.RuleExtractor %s > %s" % (java_opts,ghkm_opts,args.ghkm_path))
print "Converting GHKM rules to Tiburon format"
ghkm2tib(args.ghkm_path,args.tib_path)
|
GullyAPCBurns/bolinas
|
extractor_cansem/extractor_cansem.py
|
Python
|
mit
| 3,342
| 0.012567
|
#!/usr/bin/env python
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
from calibre.gui2.complete2 import LineEdit
from calibre.gui2.widgets import history
class HistoryLineEdit2(LineEdit):
max_history_items = None
def __init__(self, parent=None, completer_widget=None, sort_func=lambda x:None):
LineEdit.__init__(self, parent=parent, completer_widget=completer_widget, sort_func=sort_func)
@property
def store_name(self):
return 'lineedit_history_'+self._name
def initialize(self, name):
self._name = name
self.history = history.get(self.store_name, [])
self.set_separator(None)
self.update_items_cache(self.history)
self.setText('')
self.editingFinished.connect(self.save_history)
def save_history(self):
ct = unicode(self.text())
if len(ct) > 2:
try:
self.history.remove(ct)
except ValueError:
pass
self.history.insert(0, ct)
if self.max_history_items is not None:
del self.history[self.max_history_items:]
history.set(self.store_name, self.history)
self.update_items_cache(self.history)
def clear_history(self):
self.history = []
history.set(self.store_name, self.history)
self.update_items_cache(self.history)
|
insomnia-lab/calibre
|
src/calibre/gui2/widgets2.py
|
Python
|
gpl-3.0
| 1,547
| 0.003232
|
import re
import traceback
from urllib.parse import quote
from requests.utils import dict_from_cookiejar
from sickchill import logger
from sickchill.helper.common import convert_size, try_int
from sickchill.oldbeard import tvcache
from sickchill.oldbeard.bs4_parser import BS4Parser
from sickchill.providers.torrent.TorrentProvider import TorrentProvider
class Provider(TorrentProvider):
def __init__(self):
super().__init__("Pretome")
self.username = None
self.password = None
self.pin = None
self.minseed = 0
self.minleech = 0
self.urls = {
"base_url": "https://pretome.info",
"login": "https://pretome.info/takelogin.php",
"detail": "https://pretome.info/details.php?id=%s",
"search": "https://pretome.info/browse.php?search=%s%s",
"download": "https://pretome.info/download.php/%s/%s.torrent",
}
self.url = self.urls["base_url"]
self.categories = "&st=1&cat%5B%5D=7"
self.proper_strings = ["PROPER", "REPACK"]
self.cache = tvcache.TVCache(self)
def _check_auth(self):
if not self.username or not self.password or not self.pin:
logger.warning("Invalid username or password or pin. Check your settings")
return True
def login(self):
if any(dict_from_cookiejar(self.session.cookies).values()):
return True
login_params = {"username": self.username, "password": self.password, "login_pin": self.pin}
response = self.get_url(self.urls["login"], post_data=login_params, returns="text")
if not response:
logger.warning("Unable to connect to provider")
return False
if re.search("Username or password incorrect", response):
logger.warning("Invalid username or password. Check your settings")
return False
return True
def search(self, search_params, age=0, ep_obj=None):
results = []
if not self.login():
return results
for mode in search_params:
items = []
logger.debug(_("Search Mode: {mode}".format(mode=mode)))
for search_string in search_params[mode]:
if mode != "RSS":
logger.debug(_("Search String: {search_string}".format(search_string=search_string)))
search_url = self.urls["search"] % (quote(search_string), self.categories)
data = self.get_url(search_url, returns="text")
if not data:
continue
try:
with BS4Parser(data, "html5lib") as html:
# Continue only if one Release is found
empty = html.find("h2", text="No .torrents fit this filter criteria")
if empty:
logger.debug("Data returned from provider does not contain any torrents")
continue
torrent_table = html.find("table", style="border: none; width: 100%;")
if not torrent_table:
logger.exception("Could not find table of torrents")
continue
torrent_rows = torrent_table("tr", class_="browse")
for result in torrent_rows:
cells = result("td")
size = None
link = cells[1].find("a", style="font-size: 1.25em; font-weight: bold;")
torrent_id = link["href"].replace("details.php?id=", "")
try:
if link.get("title", ""):
title = link["title"]
else:
title = link.contents[0]
download_url = self.urls["download"] % (torrent_id, link.contents[0])
seeders = int(cells[9].contents[0])
leechers = int(cells[10].contents[0])
# Need size for failed downloads handling
if size is None:
torrent_size = cells[7].text
size = convert_size(torrent_size) or -1
except (AttributeError, TypeError):
continue
if not all([title, download_url]):
continue
# Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode != "RSS":
logger.debug(
"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(
title, seeders, leechers
)
)
continue
item = {"title": title, "link": download_url, "size": size, "seeders": seeders, "leechers": leechers, "hash": ""}
if mode != "RSS":
logger.debug("Found result: {0} with {1} seeders and {2} leechers".format(title, seeders, leechers))
items.append(item)
except Exception:
logger.exception("Failed parsing provider. Traceback: {0}".format(traceback.format_exc()))
# For each search mode sort all the items by seeders if available
items.sort(key=lambda d: try_int(d.get("seeders", 0)), reverse=True)
results += items
return results
|
h3llrais3r/SickRage
|
sickchill/oldbeard/providers/pretome.py
|
Python
|
gpl-3.0
| 5,958
| 0.003021
|
"""Undocumented Module"""
__all__ = ['Transitions']
from panda3d.core import *
from direct.gui.DirectGui import *
from direct.interval.LerpInterval import LerpColorScaleInterval, LerpColorInterval, LerpScaleInterval, LerpPosInterval
from direct.interval.MetaInterval import Sequence, Parallel
from direct.interval.FunctionInterval import Func
class Transitions:
# These may be reassigned before the fade or iris transitions are
# actually invoked to change the models that will be used.
IrisModelName = "models/misc/iris"
FadeModelName = "models/misc/fade"
def __init__(self, loader,
model=None,
scale=3.0,
pos=Vec3(0, 0, 0)):
self.transitionIval = None
self.letterboxIval = None
self.iris = None
self.fade = None
self.letterbox = None
self.fadeModel = model
self.imagePos = pos
if model:
self.alphaOff = Vec4(1, 1, 1, 0)
self.alphaOn = Vec4(1, 1, 1, 1)
model.setTransparency(1)
self.lerpFunc = LerpColorScaleInterval
else:
self.alphaOff = Vec4(0, 0, 0, 0)
self.alphaOn = Vec4(0, 0, 0, 1)
self.lerpFunc = LerpColorInterval
self.irisTaskName = "irisTask"
self.fadeTaskName = "fadeTask"
self.letterboxTaskName = "letterboxTask"
def __del__(self):
if self.fadeModel:
self.fadeModel.removeNode()
self.fadeModel = None
##################################################
# Fade
##################################################
# We can set a custom model for the fade before using it for the first time
def setFadeModel(self, model, scale=1.0):
self.fadeModel = model
# We have to change some default parameters for a custom fadeModel
self.alphaOn = Vec4(1, 1, 1, 1)
# Reload fade if its already been created
if self.fade:
self.fade.destroy()
self.fade = None
self.loadFade()
def loadFade(self):
if self.fade is None:
# We create a DirectFrame for the fade polygon, instead of
# simply loading the polygon model and using it directly,
# so that it will also obscure mouse events for objects
# positioned behind it.
self.fade = DirectFrame(
parent = hidden,
guiId = 'fade',
relief = None,
image = self.fadeModel,
image_scale = (4, 2, 2),
state = DGG.NORMAL,
)
if not self.fadeModel:
# No fade model was given, so we make this the fade model.
self.fade["relief"] = DGG.FLAT
self.fade["frameSize"] = (-2, 2, -1, 1)
self.fade["frameColor"] = (0, 0, 0, 1)
self.fade.setTransparency(TransparencyAttrib.MAlpha)
self.fade.setBin('unsorted', 0)
self.fade.setColor(0,0,0,0)
def getFadeInIval(self, t=0.5, finishIval=None):
"""
Returns an interval without starting it. This is particularly useful in
cutscenes, so when the cutsceneIval is escaped out of we can finish the fade immediately
"""
#self.noTransitions() masad: this creates a one frame pop, is it necessary?
self.loadFade()
transitionIval = Sequence(Func(self.fade.reparentTo, aspect2d, FADE_SORT_INDEX),
Func(self.fade.showThrough), # in case aspect2d is hidden for some reason
self.lerpFunc(self.fade, t,
self.alphaOff,
# self.alphaOn,
),
Func(self.fade.detachNode),
name = self.fadeTaskName,
)
if finishIval:
transitionIval.append(finishIval)
return transitionIval
def getFadeOutIval(self, t=0.5, finishIval=None):
"""
Create a sequence that lerps the color out, then
parents the fade to hidden
"""
self.noTransitions()
self.loadFade()
transitionIval = Sequence(Func(self.fade.reparentTo,aspect2d,FADE_SORT_INDEX),
Func(self.fade.showThrough), # in case aspect2d is hidden for some reason
self.lerpFunc(self.fade, t,
self.alphaOn,
# self.alphaOff,
),
name = self.fadeTaskName,
)
if finishIval:
transitionIval.append(finishIval)
return transitionIval
def fadeIn(self, t=0.5, finishIval=None):
"""
Play a fade in transition over t seconds.
Places a polygon on the aspect2d plane then lerps the color
from black to transparent. When the color lerp is finished, it
parents the fade polygon to hidden.
"""
gsg = base.win.getGsg()
if gsg:
# If we're about to fade in from black, go ahead and
# preload all the textures etc.
base.graphicsEngine.renderFrame()
render.prepareScene(gsg)
render2d.prepareScene(gsg)
if (t == 0):
# Fade in immediately with no lerp
#print "transitiosn: fadeIn 0.0"
self.noTransitions()
self.loadFade()
self.fade.detachNode()
else:
# Create a sequence that lerps the color out, then
# parents the fade to hidden
self.transitionIval = self.getFadeInIval(t, finishIval)
self.transitionIval.start()
def fadeOut(self, t=0.5, finishIval=None):
"""
Play a fade out transition over t seconds.
Places a polygon on the aspect2d plane then lerps the color
from transparent to full black. When the color lerp is finished,
it leaves the fade polygon covering the aspect2d plane until you
fadeIn or call noFade.
lerp
"""
if (t == 0):
# Fade out immediately with no lerp
self.noTransitions()
self.loadFade()
self.fade.reparentTo(aspect2d, FADE_SORT_INDEX)
self.fade.setColor(self.alphaOn)
elif base.config.GetBool('no-loading-screen',0):
if finishIval:
self.transitionIval = finishIval
self.transitionIval.start()
else:
# Create a sequence that lerps the color out, then
# parents the fade to hidden
self.transitionIval = self.getFadeOutIval(t,finishIval)
self.transitionIval.start()
def fadeOutActive(self):
return self.fade and self.fade.getColor()[3] > 0
def fadeScreen(self, alpha=0.5):
"""
Put a semitransparent screen over the camera plane
to darken out the world. Useful for drawing attention to
a dialog box for instance
"""
#print "transitiosn: fadeScreen"
self.noTransitions()
self.loadFade()
self.fade.reparentTo(aspect2d, FADE_SORT_INDEX)
self.fade.setColor(self.alphaOn[0],
self.alphaOn[1],
self.alphaOn[2],
alpha)
def fadeScreenColor(self, color):
"""
Put a semitransparent screen over the camera plane
to darken out the world. Useful for drawing attention to
a dialog box for instance
"""
#print "transitiosn: fadeScreenColor"
self.noTransitions()
self.loadFade()
self.fade.reparentTo(aspect2d, FADE_SORT_INDEX)
self.fade.setColor(color)
def noFade(self):
"""
Removes any current fade tasks and parents the fade polygon away
"""
#print "transitiosn: noFade"
if self.transitionIval:
self.transitionIval.pause()
self.transitionIval = None
if self.fade:
# Make sure to reset the color, since fadeOutActive() is looking at it
self.fade.setColor(self.alphaOff)
self.fade.detachNode()
def setFadeColor(self, r, g, b):
self.alphaOn.set(r, g, b, 1)
self.alphaOff.set(r, g, b, 0)
##################################################
# Iris
##################################################
def loadIris(self):
if self.iris == None:
self.iris = loader.loadModel(self.IrisModelName)
self.iris.setPos(0, 0, 0)
def irisIn(self, t=0.5, finishIval=None):
"""
Play an iris in transition over t seconds.
Places a polygon on the aspect2d plane then lerps the scale
of the iris polygon up so it looks like we iris in. When the
scale lerp is finished, it parents the iris polygon to hidden.
"""
self.noTransitions()
self.loadIris()
if (t == 0):
self.iris.detachNode()
else:
self.iris.reparentTo(aspect2d, FADE_SORT_INDEX)
self.transitionIval = Sequence(LerpScaleInterval(self.iris, t,
scale = 0.18,
startScale = 0.01),
Func(self.iris.detachNode),
name = self.irisTaskName,
)
if finishIval:
self.transitionIval.append(finishIval)
self.transitionIval.start()
def irisOut(self, t=0.5, finishIval=None):
"""
Play an iris out transition over t seconds.
Places a polygon on the aspect2d plane then lerps the scale
of the iris down so it looks like we iris out. When the scale
lerp is finished, it leaves the iris polygon covering the
aspect2d plane until you irisIn or call noIris.
"""
self.noTransitions()
self.loadIris()
self.loadFade() # we need this to cover up the hole.
if (t == 0):
self.iris.detachNode()
self.fadeOut(0)
else:
self.iris.reparentTo(aspect2d, FADE_SORT_INDEX)
self.transitionIval = Sequence(LerpScaleInterval(self.iris, t,
scale = 0.01,
startScale = 0.18),
Func(self.iris.detachNode),
# Use the fade to cover up the hole that the iris would leave
Func(self.fadeOut, 0),
name = self.irisTaskName,
)
if finishIval:
self.transitionIval.append(finishIval)
self.transitionIval.start()
def noIris(self):
"""
Removes any current iris tasks and parents the iris polygon away
"""
if self.transitionIval:
self.transitionIval.pause()
self.transitionIval = None
if self.iris != None:
self.iris.detachNode()
# Actually we need to remove the fade too,
# because the iris effect uses it.
self.noFade()
def noTransitions(self):
"""
This call should immediately remove any and all transitions running
"""
self.noFade()
self.noIris()
# Letterbox is not really a transition, it is a screen overlay
# self.noLetterbox()
##################################################
# Letterbox
##################################################
def loadLetterbox(self):
if not self.letterbox:
# We create a DirectFrame for the fade polygon, instead of
# simply loading the polygon model and using it directly,
# so that it will also obscure mouse events for objects
# positioned behind it.
self.letterbox = NodePath("letterbox")
# Allow fade in and out of the bars
self.letterbox.setTransparency(1)
# Allow DirectLabels to be parented to the letterbox sensibly
self.letterbox.setBin('unsorted', 0)
# Allow a custom look to the letterbox graphic.
# TODO: This model isn't available everywhere. We should
# pass it in as a parameter.
button = loader.loadModel('models/gui/toplevel_gui',
okMissing = True)
barImage = None
if button:
barImage = button.find('**/generic_button')
self.letterboxTop = DirectFrame(
parent = self.letterbox,
guiId = 'letterboxTop',
relief = DGG.FLAT,
state = DGG.NORMAL,
frameColor = (0, 0, 0, 1),
borderWidth = (0, 0),
frameSize = (-1, 1, 0, 0.2),
pos = (0, 0, 0.8),
image = barImage,
image_scale = (2.25,1,.5),
image_pos = (0,0,.1),
image_color = (0.3,0.3,0.3,1),
sortOrder = 0,
)
self.letterboxBottom = DirectFrame(
parent = self.letterbox,
guiId = 'letterboxBottom',
relief = DGG.FLAT,
state = DGG.NORMAL,
frameColor = (0, 0, 0, 1),
borderWidth = (0, 0),
frameSize = (-1, 1, 0, 0.2),
pos = (0, 0, -1),
image = barImage,
image_scale = (2.25,1,.5),
image_pos = (0,0,.1),
image_color = (0.3,0.3,0.3,1),
sortOrder = 0,
)
# masad: always place these at the bottom of render
self.letterboxTop.setBin('sorted',0)
self.letterboxBottom.setBin('sorted',0)
self.letterbox.reparentTo(render2d, -1)
self.letterboxOff(0)
def noLetterbox(self):
"""
Removes any current letterbox tasks and parents the letterbox polygon away
"""
if self.letterboxIval:
self.letterboxIval.pause()
self.letterboxIval = None
if self.letterbox:
self.letterbox.stash()
def letterboxOn(self, t=0.25, finishIval=None):
"""
Move black bars in over t seconds.
"""
self.noLetterbox()
self.loadLetterbox()
self.letterbox.unstash()
if (t == 0):
self.letterboxBottom.setPos(0, 0, -1)
self.letterboxTop.setPos(0, 0, 0.8)
else:
self.letterboxIval = Sequence(Parallel(
LerpPosInterval(self.letterboxBottom,
t,
pos = Vec3(0, 0, -1),
#startPos = Vec3(0, 0, -1.2),
),
LerpPosInterval(self.letterboxTop,
t,
pos = Vec3(0, 0, 0.8),
# startPos = Vec3(0, 0, 1),
),
),
name = self.letterboxTaskName,
)
if finishIval:
self.letterboxIval.append(finishIval)
self.letterboxIval.start()
def letterboxOff(self, t=0.25, finishIval=None):
"""
Move black bars away over t seconds.
"""
self.noLetterbox()
self.loadLetterbox()
self.letterbox.unstash()
if (t == 0):
self.letterbox.stash()
else:
self.letterboxIval = Sequence(Parallel(
LerpPosInterval(self.letterboxBottom,
t,
pos = Vec3(0, 0, -1.2),
# startPos = Vec3(0, 0, -1),
),
LerpPosInterval(self.letterboxTop,
t,
pos = Vec3(0, 0, 1),
# startPos = Vec3(0, 0, 0.8),
),
),
Func(self.letterbox.stash),
Func(messenger.send,'letterboxOff'),
name = self.letterboxTaskName,
)
if finishIval:
self.letterboxIval.append(finishIval)
self.letterboxIval.start()
|
hj3938/panda3d
|
direct/src/showbase/Transitions.py
|
Python
|
bsd-3-clause
| 17,107
| 0.009002
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
from resource_management.core.logger import Logger
class GenericManagerProperties(object):
"""
Class to keep all Package-manager depended properties. Each non-generic implementation should override properties
declared here
"""
empty_file = "/dev/null"
locked_output = None
repo_error = None
repo_manager_bin = None
pkg_manager_bin = None
repo_update_cmd = None
available_packages_cmd = None
installed_packages_cmd = None
all_packages_cmd = None
repo_definition_location = None
install_cmd = {
True: None,
False: None
}
remove_cmd = {
True: None,
False: None
}
verify_dependency_cmd = None
class GenericManager(object):
"""
Interface for all custom implementations. Provides the required base for any custom manager, to be smoothly integrated
"""
@property
def properties(self):
return GenericManagerProperties
def install_package(self, name, context):
"""
Install package
:type name str
:type context ambari_commons.shell.RepoCallContext
:raise ValueError if name is empty
"""
raise NotImplementedError()
def remove_package(self, name, context, ignore_dependencies=False):
"""
Remove package
:type name str
:type context ambari_commons.shell.RepoCallContext
:type ignore_dependencies bool
:raise ValueError if name is empty
"""
raise NotImplementedError()
def upgrade_package(self, name, context):
"""
Install package
:type name str
:type context ambari_commons.shell.RepoCallContext
:raise ValueError if name is empty
"""
raise NotImplementedError()
def check_uncompleted_transactions(self):
"""
Check package manager against uncompleted transactions.
:rtype bool
"""
return False
def print_uncompleted_transaction_hint(self):
"""
Print friendly message about they way to fix the issue
"""
pass
def get_available_packages_in_repos(self, repositories):
"""
Gets all (both installed and available) packages that are available at given repositories.
:type repositories resource_management.libraries.functions.repository_util.CommandRepository
:return: installed and available packages from these repositories
"""
raise NotImplementedError()
def installed_packages(self, pkg_names=None, repo_filter=None):
raise NotImplementedError()
def available_packages(self, pkg_names=None, repo_filter=None):
raise NotImplementedError()
def all_packages(self, pkg_names=None, repo_filter=None):
raise NotImplementedError()
def get_installed_repos(self, hint_packages, all_packages, ignore_repos):
"""
Gets all installed repos by name based on repos that provide any package
contained in hintPackages
Repos starting with value in ignoreRepos will not be returned
hintPackages must be regexps.
"""
all_repos = []
repo_list = []
for hintPackage in hint_packages:
for item in all_packages:
if re.match(hintPackage, item[0]) and not item[2] in all_repos:
all_repos.append(item[2])
for repo in all_repos:
ignore = False
for ignoredRepo in ignore_repos:
if self.name_match(ignoredRepo, repo):
ignore = True
if not ignore:
repo_list.append(repo)
return repo_list
def get_installed_pkgs_by_repo(self, repos, ignore_packages, installed_packages):
"""
Get all the installed packages from the repos listed in repos
"""
packages_from_repo = []
packages_to_remove = []
for repo in repos:
sub_result = []
for item in installed_packages:
if repo == item[2]:
sub_result.append(item[0])
packages_from_repo = list(set(packages_from_repo + sub_result))
for package in packages_from_repo:
keep_package = True
for ignorePackage in ignore_packages:
if self.name_match(ignorePackage, package):
keep_package = False
break
if keep_package:
packages_to_remove.append(package)
return packages_to_remove
def get_installed_pkgs_by_names(self, pkg_names, all_packages_list=None):
"""
Gets all installed packages that start with names in pkgNames
:type pkg_names list[str]
:type all_packages_list list[str]
"""
return self.installed_packages(pkg_names)
def get_package_details(self, installed_packages, found_packages):
"""
Gets the name, version, and repoName for the packages
:type installed_packages list[tuple[str,str,str]]
:type found_packages list[str]
"""
package_details = []
for package in found_packages:
pkg_detail = {}
for installed_package in installed_packages:
if package == installed_package[0]:
pkg_detail['name'] = installed_package[0]
pkg_detail['version'] = installed_package[1]
pkg_detail['repoName'] = installed_package[2]
package_details.append(pkg_detail)
return package_details
def get_repos_to_remove(self, repos, ignore_list):
repos_to_remove = []
for repo in repos:
add_to_remove_list = True
for ignore_repo in ignore_list:
if self.name_match(ignore_repo, repo):
add_to_remove_list = False
continue
if add_to_remove_list:
repos_to_remove.append(repo)
return repos_to_remove
def get_installed_package_version(self, package_name):
raise NotImplementedError()
def verify_dependencies(self):
"""
Verify that we have no dependency issues in package manager. Dependency issues could appear because of aborted or terminated
package installation process or invalid packages state after manual modification of packages list on the host
:return True if no dependency issues found, False if dependency issue present
:rtype bool
"""
raise NotImplementedError()
def name_match(self, lookup_name, actual_name):
tokens = actual_name.strip().lower()
lookup_name = lookup_name.lower()
return " " not in lookup_name and lookup_name in tokens
def _executor_error_handler(self, command, error_log, exit_code):
"""
Error handler for ac_shell.process_executor
:type command list|str
:type error_log list
:type exit_code int
"""
if isinstance(command, (list, tuple)):
command = " ".join(command)
Logger.error("Command execution error: command = \"{0}\", exit code = {1}, stderr = {2}".format(
command, exit_code, "\n".join(error_log)))
|
sekikn/ambari
|
ambari-common/src/main/python/ambari_commons/repo_manager/generic_manager.py
|
Python
|
apache-2.0
| 7,285
| 0.009746
|
# -*- coding: utf-8 -*-
'''
Rupture
version 1.4.0
build 5
'''
from bs4 import BeautifulSoup
import datetime
import requests
import socket
import pickle
import time
import ssl
from .utils import six
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.poolmanager import PoolManager
class Rupture(object):
parser = None # None or html.parser or lxml
encoding = None
def __init__(self, proxies=None, parser='html.parser', timeout=None, headers=None):
self.session = requests.Session()
if headers:
self.session.headers.update(headers)
self.proxies = proxies
self.parser = parser
self.timeout = timeout
def _wrap_response(self, obj, parser):
def get_soup(self):
if not hasattr(self, '_soup'):
start_time = datetime.datetime.now()
from_encoding = None if self.encoding == 'utf-8' else self.encoding
if isinstance(self.text, six.text_type):
from_encoding = None # Prevent UserWarning
self._soup = BeautifulSoup(self.text, self.parser, from_encoding=from_encoding)
self._soup.elapsed = datetime.datetime.now() - start_time
if self.parser == 'lxml':
import lxml
lxml.etree.clear_error_log()
return self._soup
def get__repr__(self):
if hasattr(self, 'text'):
return '<Response [%s]: %s>' % (self.status_code, self.text)
return '<Response [%s]>' % (self.status_code)
obj.__class__.parser = parser
obj.__class__.soup = property(get_soup)
obj.__class__.__repr__ = get__repr__
return obj
def http_request(self, method, url, params=None, data=None, timeout=None, proxies=None, encoding=None, parser=None, retries=None, retries_interval=None, **kwargs):
timeout = self.timeout if timeout is None else timeout
proxies = self.proxies if proxies is None else proxies
encoding = self.encoding if encoding is None else encoding
parser = self.parser if parser is None else parser
if not retries:
retries = 0
while True:
try:
proxies = {'http': proxies, 'https': proxies} if proxies else None
start_time = datetime.datetime.now()
r = self.session.request(method, url, params=params, data=data, timeout=timeout, proxies=proxies, **kwargs)
r.elapsed_all = datetime.datetime.now() - start_time
if encoding:
r.encoding = encoding
return self._wrap_response(r, parser)
except (ssl.SSLError) as e:
if retries > 0:
retries = retries - 1
if retries_interval:
time.sleep(retries_interval)
continue
raise requests.exceptions.RequestException('SSLError %s' % e)
except (socket.error) as e:
if retries > 0:
retries = retries - 1
if retries_interval:
time.sleep(retries_interval)
continue
raise requests.exceptions.RequestException('Socket Error %s' % e)
def http_get(self, url, params=None, **kwargs):
return self.http_request('GET', url, params=params, **kwargs)
def xml_get(self, url, params=None, headers=None, **kwargs):
xml_headers = {
'X-Requested-With': 'XMLHttpRequest',
'Content-Type': 'application/json; charset=utf-8'
}
if headers:
headers = dict(xml_headers.items() + headers.items())
else:
headers = xml_headers
return self.http_get(url, params=params, headers=headers, **kwargs)
def http_post(self, url, data=None, **kwargs):
return self.http_request('POST', url, data=data, **kwargs)
def xml_post(self, url, data=None, headers=None, **kwargs):
xml_headers = {
'X-Requested-With': 'XMLHttpRequest',
'Content-Type': 'application/json; charset=utf-8'
}
if headers:
headers = dict(xml_headers.items() + headers.items())
else:
headers = xml_headers
return self.http_post(url, data=data, headers=headers, **kwargs)
def http_download(self, url, filepath, method='get', **kwargs):
if method.lower() == 'get':
response = self.http_get(url, stream=True, **kwargs)
elif method.lower() == 'post':
response = self.http_post(url, stream=True, **kwargs)
else:
raise NotImplementedError()
if not response.ok:
raise requests.exceptions.RequestException('Response not okay')
with open(filepath, 'wb') as handle:
for block in response.iter_content(1024):
if not block:
break
handle.write(block)
return filepath
def http_get_image(self, url, filepath, **kwargs):
return self.http_download(url, filepath, **kwargs)
def parse_float_or_none(self, s):
if s:
return float(str(s).strip().replace(',', '').replace('+', ''))
return s
def new_session(self):
self.session = requests.Session()
def serialize(self):
return pickle.dumps([self.session])
@classmethod
def _deserialize_key(cls, data, keys):
raw_results = pickle.loads(data)
entity = cls()
for i in range(len(keys)):
setattr(entity, keys[i], raw_results[i])
return entity
@classmethod
def deserialize(cls, data):
return cls._deserialize_key(data, ['session'])
def patch_ssl(self):
class SSLAdapter(HTTPAdapter):
def init_poolmanager(self, connections, maxsize, block=False):
self.poolmanager = PoolManager(num_pools=connections,
maxsize=maxsize,
block=block,
ssl_version=ssl.PROTOCOL_TLSv1)
if not getattr(self.session, 'is_patch', False):
self.session.is_patch = True
self.session.mount('https://', SSLAdapter())
|
shinznatkid/rupture
|
rupture/rupture.py
|
Python
|
mit
| 6,405
| 0.002186
|
from django.conf.urls.defaults import *
from django_de.apps.authors.models import Author
urlpatterns = patterns('django.views.generic.list_detail',
(r'^$', 'object_list',
dict(
queryset = Author.objects.order_by('name', 'slug'),
template_object_name = 'author',
allow_empty=True,
),
)
)
|
django-de/django-de-v2
|
django_de/apps/authors/urls.py
|
Python
|
bsd-3-clause
| 348
| 0.017241
|
from seleniumbase import BaseCase
class GitHubTests(BaseCase):
def test_github(self):
# Selenium can trigger GitHub's anti-automation system:
# "You have triggered an abuse detection mechanism."
# "Please wait a few minutes before you try again."
# To avoid this automation blocker, two steps are being taken:
# 1. self.slow_click() is being used to slow down Selenium actions.
# 2. The browser's User Agent is modified to avoid Selenium-detection
# when running in headless mode.
if self.headless:
self.get_new_driver(
agent="""Mozilla/5.0 """
"""AppleWebKit/537.36 (KHTML, like Gecko) """
"""Chrome/Version 96.0.4664.55 Safari/537.36"""
)
self.open("https://github.com/search?q=SeleniumBase")
self.slow_click('a[href="/seleniumbase/SeleniumBase"]')
self.click_if_visible('[data-action="click:signup-prompt#dismiss"]')
self.assert_element("div.repository-content")
self.assert_text("SeleniumBase", "h1")
self.slow_click('a[title="seleniumbase"]')
self.slow_click('a[title="fixtures"]')
self.slow_click('a[title="base_case.py"]')
self.assert_text("Code", "nav a.selected")
|
seleniumbase/SeleniumBase
|
examples/github_test.py
|
Python
|
mit
| 1,290
| 0
|
from spacewiki.app import create_app
from spacewiki import model
from spacewiki.test import create_test_app
import unittest
class UiTestCase(unittest.TestCase):
def setUp(self):
self._app = create_test_app()
with self._app.app_context():
model.syncdb()
self.app = self._app.test_client()
def test_index(self):
self.assertEqual(self.app.get('/').status_code, 200)
def test_no_page(self):
self.assertEqual(self.app.get('/missing-page').status_code, 200)
def test_all_pages(self):
self.assertEqual(self.app.get('/.all-pages').status_code, 200)
def test_edit(self):
self.assertEqual(self.app.get('/index/edit').status_code, 200)
|
spacewiki/spacewiki
|
spacewiki/test/ui_test.py
|
Python
|
agpl-3.0
| 718
| 0.001393
|
#######################################################################
# This file is part of Pyblosxom.
#
# Copyright (C) 2010-2011 by the Pyblosxom team. See AUTHORS.
#
# Pyblosxom is distributed under the MIT license. See the file
# LICENSE for distribution details.
#######################################################################
import tempfile
import shutil
import os
from Pyblosxom.tests import PluginTest
from Pyblosxom.plugins import tags
from Pyblosxom.pyblosxom import Request
class TagsTest(PluginTest):
def setUp(self):
PluginTest.setUp(self, tags)
self.tmpdir = tempfile.mkdtemp()
def get_datadir(self):
return os.path.join(self.tmpdir, "datadir")
def tearDown(self):
PluginTest.tearDown(self)
try:
shutil.rmtree(self.tmpdir)
except OSError:
pass
def test_get_tagsfile(self):
req = Request({"datadir": self.get_datadir()}, {}, {})
cfg = {"datadir": self.get_datadir()}
self.assertEquals(tags.get_tagsfile(cfg),
os.path.join(self.get_datadir(), os.pardir,
"tags.index"))
tags_filename = os.path.join(self.get_datadir(), "tags.db")
cfg = {"datadir": self.get_datadir(), "tags_filename": tags_filename}
self.assertEquals(tags.get_tagsfile(cfg), tags_filename)
def test_tag_cloud_no_tags(self):
# test no tags
self.request.get_data()["tagsdata"] = {}
tags.cb_head(self.args)
self.assertEquals(
str(self.args["entry"]["tagcloud"]),
"\n".join(
["<p>",
"</p>"]))
def test_tag_cloud_one_tag(self):
# test no tags
self.request.get_data()["tagsdata"] = {
"tag2": ["a"],
}
tags.cb_head(self.args)
self.assertEquals(
str(self.args["entry"]["tagcloud"]),
"\n".join(
["<p>",
'<a class="biggestTag" href="http://bl.og//tag/tag2">tag2</a>',
"</p>"]))
def test_tag_cloud_many_tags(self):
# test no tags
self.request.get_data()["tagsdata"] = {
"tag1": ["a", "b", "c", "d", "e", "f"],
"tag2": ["a", "b", "c", "d"],
"tag3": ["a"]
}
tags.cb_head(self.args)
self.assertEquals(
str(self.args["entry"]["tagcloud"]),
"\n".join(
["<p>",
'<a class="biggestTag" href="http://bl.og//tag/tag1">tag1</a>',
'<a class="biggestTag" href="http://bl.og//tag/tag2">tag2</a>',
'<a class="smallestTag" href="http://bl.og//tag/tag3">tag3</a>',
"</p>"]))
|
maru-sama/pyblosxom
|
Pyblosxom/tests/test_tags.py
|
Python
|
mit
| 2,822
| 0.003898
|
"""
This page is in the table of contents.
Winding is a script to set the winding profile for the skeinforge chain.
The displayed craft sequence is the sequence in which the tools craft the model and export the output.
On the winding dialog, clicking the 'Add Profile' button will duplicate the selected profile and give it the name in the input field. For example, if laser is selected and the name laser_10mm is in the input field, clicking the 'Add Profile' button will duplicate laser and save it as laser_10mm. The 'Delete Profile' button deletes the selected profile.
The profile selection is the setting. If you hit 'Save and Close' the selection will be saved, if you hit 'Cancel' the selection will not be saved. However; adding and deleting a profile is a permanent action, for example 'Cancel' will not bring back any deleted profiles.
To change the winding profile, in a shell in the profile_plugins folder type:
> python winding.py
"""
from __future__ import absolute_import
import __init__
from fabmetheus_utilities import settings
from skeinforge_application.skeinforge_utilities import skeinforge_profile
import sys
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__date__ = '$Date: 2008/21/04 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
def getCraftSequence():
"Get the winding craft sequence."
return 'cleave preface coil flow feed home lash fillet limit unpause alteration export'.split()
def getNewRepository():
'Get new repository.'
return WindingRepository()
class WindingRepository:
"A class to handle the winding settings."
def __init__(self):
"Set the default settings, execute title & settings fileName."
skeinforge_profile.addListsSetCraftProfile( getCraftSequence(), 'free_wire', self, 'skeinforge_application.skeinforge_plugins.profile_plugins.winding.html')
def main():
"Display the export dialog."
if len(sys.argv) > 1:
writeOutput(' '.join(sys.argv[1 :]))
else:
settings.startMainLoopFromConstructor(getNewRepository())
if __name__ == "__main__":
main()
|
AlexeyKruglov/Skeinforge-fabmetheus
|
skeinforge_application/skeinforge_plugins/profile_plugins/winding.py
|
Python
|
agpl-3.0
| 2,079
| 0.011544
|
# coding: utf-8
"""
Qc API
Qc API # noqa: E501
The version of the OpenAPI document: 3.0.0
Contact: cloudsupport@telestream.net
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from telestream_cloud_qc.configuration import Configuration
class HeaderByteCountTest(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'header_bytes': 'int',
'reject_on_error': 'bool',
'checked': 'bool'
}
attribute_map = {
'header_bytes': 'header_bytes',
'reject_on_error': 'reject_on_error',
'checked': 'checked'
}
def __init__(self, header_bytes=None, reject_on_error=None, checked=None, local_vars_configuration=None): # noqa: E501
"""HeaderByteCountTest - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._header_bytes = None
self._reject_on_error = None
self._checked = None
self.discriminator = None
if header_bytes is not None:
self.header_bytes = header_bytes
if reject_on_error is not None:
self.reject_on_error = reject_on_error
if checked is not None:
self.checked = checked
@property
def header_bytes(self):
"""Gets the header_bytes of this HeaderByteCountTest. # noqa: E501
:return: The header_bytes of this HeaderByteCountTest. # noqa: E501
:rtype: int
"""
return self._header_bytes
@header_bytes.setter
def header_bytes(self, header_bytes):
"""Sets the header_bytes of this HeaderByteCountTest.
:param header_bytes: The header_bytes of this HeaderByteCountTest. # noqa: E501
:type: int
"""
self._header_bytes = header_bytes
@property
def reject_on_error(self):
"""Gets the reject_on_error of this HeaderByteCountTest. # noqa: E501
:return: The reject_on_error of this HeaderByteCountTest. # noqa: E501
:rtype: bool
"""
return self._reject_on_error
@reject_on_error.setter
def reject_on_error(self, reject_on_error):
"""Sets the reject_on_error of this HeaderByteCountTest.
:param reject_on_error: The reject_on_error of this HeaderByteCountTest. # noqa: E501
:type: bool
"""
self._reject_on_error = reject_on_error
@property
def checked(self):
"""Gets the checked of this HeaderByteCountTest. # noqa: E501
:return: The checked of this HeaderByteCountTest. # noqa: E501
:rtype: bool
"""
return self._checked
@checked.setter
def checked(self, checked):
"""Sets the checked of this HeaderByteCountTest.
:param checked: The checked of this HeaderByteCountTest. # noqa: E501
:type: bool
"""
self._checked = checked
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, HeaderByteCountTest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, HeaderByteCountTest):
return True
return self.to_dict() != other.to_dict()
|
Telestream/telestream-cloud-python-sdk
|
telestream_cloud_qc_sdk/telestream_cloud_qc/models/header_byte_count_test.py
|
Python
|
mit
| 4,912
| 0
|
# -*- coding: utf-8 -*-
# This coding header is significant for tests, as the debug view is parsing
# files to search for such a header to decode the source file content
from __future__ import absolute_import, unicode_literals
import inspect
import os
import sys
from django.conf import settings
from django.core import mail
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.urlresolvers import reverse
from django.test import TestCase, RequestFactory
from django.test.utils import (override_settings, setup_test_template_loader,
restore_template_loaders)
from django.utils.encoding import force_text
from django.views.debug import ExceptionReporter
from .. import BrokenException, except_args
from ..views import (sensitive_view, non_sensitive_view, paranoid_view,
custom_exception_reporter_filter_view, sensitive_method_view)
@override_settings(DEBUG=True, TEMPLATE_DEBUG=True)
class DebugViewTests(TestCase):
urls = "regressiontests.views.urls"
def test_files(self):
response = self.client.get('/raises/')
self.assertEqual(response.status_code, 500)
data = {
'file_data.txt': SimpleUploadedFile('file_data.txt', b'haha'),
}
response = self.client.post('/raises/', data)
self.assertContains(response, 'file_data.txt', status_code=500)
self.assertNotContains(response, 'haha', status_code=500)
def test_403(self):
# Ensure no 403.html template exists to test the default case.
setup_test_template_loader({})
try:
response = self.client.get('/views/raises403/')
self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403)
finally:
restore_template_loaders()
def test_403_template(self):
# Set up a test 403.html template.
setup_test_template_loader(
{'403.html': 'This is a test template for a 403 Forbidden error.'}
)
try:
response = self.client.get('/views/raises403/')
self.assertContains(response, 'test template', status_code=403)
finally:
restore_template_loaders()
def test_404(self):
response = self.client.get('/views/raises404/')
self.assertEqual(response.status_code, 404)
def test_view_exceptions(self):
for n in range(len(except_args)):
self.assertRaises(BrokenException, self.client.get,
reverse('view_exception', args=(n,)))
def test_template_exceptions(self):
for n in range(len(except_args)):
try:
self.client.get(reverse('template_exception', args=(n,)))
except Exception:
raising_loc = inspect.trace()[-1][-2][0].strip()
self.assertFalse(raising_loc.find('raise BrokenException') == -1,
"Failed to find 'raise BrokenException' in last frame of traceback, instead found: %s" %
raising_loc)
def test_template_loader_postmortem(self):
response = self.client.get(reverse('raises_template_does_not_exist'))
template_path = os.path.join('templates', 'i_dont_exist.html')
self.assertContains(response, template_path, status_code=500)
class ExceptionReporterTests(TestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get('/test_view/')
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<h1>ValueError at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<h1>ValueError</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertIn('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">No exception supplied</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertIn('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertIn('<h1>Report</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
class PlainTextReportTests(TestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get('/test_view/')
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
self.assertIn('ValueError at /test_view/', text)
self.assertIn("Can't find my keys", text)
self.assertIn('Request Method:', text)
self.assertIn('Request URL:', text)
self.assertIn('Exception Type:', text)
self.assertIn('Exception Value:', text)
self.assertIn('Traceback:', text)
self.assertIn('Request information:', text)
self.assertNotIn('Request data not supplied', text)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
self.assertIn('ValueError', text)
self.assertIn("Can't find my keys", text)
self.assertNotIn('Request Method:', text)
self.assertNotIn('Request URL:', text)
self.assertIn('Exception Type:', text)
self.assertIn('Exception Value:', text)
self.assertIn('Traceback:', text)
self.assertIn('Request data not supplied', text)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
text = reporter.get_traceback_text()
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
text = reporter.get_traceback_text()
class ExceptionReportTestMixin(object):
# Mixin used in the ExceptionReporterFilterTests and
# AjaxResponseExceptionReporterFilter tests below
breakfast_data = {'sausage-key': 'sausage-value',
'baked-beans-key': 'baked-beans-value',
'hash-brown-key': 'hash-brown-value',
'bacon-key': 'bacon-value',}
def verify_unsafe_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that potentially sensitive info are displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# All variables are shown.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertContains(response, 'scrambled', status_code=500)
self.assertContains(response, 'sauce', status_code=500)
self.assertContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters are shown.
self.assertContains(response, k, status_code=500)
self.assertContains(response, v, status_code=500)
def verify_safe_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that certain sensitive info are not displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# Non-sensitive variable's name and value are shown.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertContains(response, 'scrambled', status_code=500)
# Sensitive variable's name is shown but not its value.
self.assertContains(response, 'sauce', status_code=500)
self.assertNotContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertContains(response, k, status_code=500)
# Non-sensitive POST parameters' values are shown.
self.assertContains(response, 'baked-beans-value', status_code=500)
self.assertContains(response, 'hash-brown-value', status_code=500)
# Sensitive POST parameters' values are not shown.
self.assertNotContains(response, 'sausage-value', status_code=500)
self.assertNotContains(response, 'bacon-value', status_code=500)
def verify_paranoid_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that no variables or POST parameters are displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# Show variable names but not their values.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertNotContains(response, 'scrambled', status_code=500)
self.assertContains(response, 'sauce', status_code=500)
self.assertNotContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertContains(response, k, status_code=500)
# No POST parameters' values are shown.
self.assertNotContains(response, v, status_code=500)
def verify_unsafe_email(self, view, check_for_POST_params=True):
"""
Asserts that potentially sensitive info are displayed in the email report.
"""
with self.settings(ADMINS=(('Admin', 'admin@fattie-breakie.com'),)):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body = force_text(email.body)
self.assertNotIn('cooked_eggs', body)
self.assertNotIn('scrambled', body)
self.assertNotIn('sauce', body)
self.assertNotIn('worcestershire', body)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters are shown.
self.assertIn(k, body)
self.assertIn(v, body)
def verify_safe_email(self, view, check_for_POST_params=True):
"""
Asserts that certain sensitive info are not displayed in the email report.
"""
with self.settings(ADMINS=(('Admin', 'admin@fattie-breakie.com'),)):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body = force_text(email.body)
self.assertNotIn('cooked_eggs', body)
self.assertNotIn('scrambled', body)
self.assertNotIn('sauce', body)
self.assertNotIn('worcestershire', body)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertIn(k, body)
# Non-sensitive POST parameters' values are shown.
self.assertIn('baked-beans-value', body)
self.assertIn('hash-brown-value', body)
# Sensitive POST parameters' values are not shown.
self.assertNotIn('sausage-value', body)
self.assertNotIn('bacon-value', body)
def verify_paranoid_email(self, view):
"""
Asserts that no variables or POST parameters are displayed in the email report.
"""
with self.settings(ADMINS=(('Admin', 'admin@fattie-breakie.com'),)):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body = force_text(email.body)
self.assertNotIn('cooked_eggs', body)
self.assertNotIn('scrambled', body)
self.assertNotIn('sauce', body)
self.assertNotIn('worcestershire', body)
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertIn(k, body)
# No POST parameters' values are shown.
self.assertNotIn(v, body)
class ExceptionReporterFilterTests(TestCase, ExceptionReportTestMixin):
"""
Ensure that sensitive information can be filtered out of error reports.
Refs #14614.
"""
rf = RequestFactory()
def test_non_sensitive_request(self):
"""
Ensure that everything (request info and frame variables) can bee seen
in the default error reports for non-sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(non_sensitive_view)
self.verify_unsafe_email(non_sensitive_view)
with self.settings(DEBUG=False):
self.verify_unsafe_response(non_sensitive_view)
self.verify_unsafe_email(non_sensitive_view)
def test_sensitive_request(self):
"""
Ensure that sensitive POST parameters and frame variables cannot be
seen in the default error reports for sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_view)
self.verify_unsafe_email(sensitive_view)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_view)
self.verify_safe_email(sensitive_view)
def test_paranoid_request(self):
"""
Ensure that no POST parameters and frame variables can be seen in the
default error reports for "paranoid" requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(paranoid_view)
self.verify_unsafe_email(paranoid_view)
with self.settings(DEBUG=False):
self.verify_paranoid_response(paranoid_view)
self.verify_paranoid_email(paranoid_view)
def test_custom_exception_reporter_filter(self):
"""
Ensure that it's possible to assign an exception reporter filter to
the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(custom_exception_reporter_filter_view)
self.verify_unsafe_email(custom_exception_reporter_filter_view)
with self.settings(DEBUG=False):
self.verify_unsafe_response(custom_exception_reporter_filter_view)
self.verify_unsafe_email(custom_exception_reporter_filter_view)
def test_sensitive_method(self):
"""
Ensure that the sensitive_variables decorator works with object
methods.
Refs #18379.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_method_view,
check_for_POST_params=False)
self.verify_unsafe_email(sensitive_method_view,
check_for_POST_params=False)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_method_view,
check_for_POST_params=False)
self.verify_safe_email(sensitive_method_view,
check_for_POST_params=False)
class AjaxResponseExceptionReporterFilter(TestCase, ExceptionReportTestMixin):
"""
Ensure that sensitive information can be filtered out of error reports.
Here we specifically test the plain text 500 debug-only error page served
when it has been detected the request was sent by JS code. We don't check
for (non)existence of frames vars in the traceback information section of
the response content because we don't include them in these error pages.
Refs #14614.
"""
rf = RequestFactory(HTTP_X_REQUESTED_WITH='XMLHttpRequest')
def test_non_sensitive_request(self):
"""
Ensure that request info can bee seen in the default error reports for
non-sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(non_sensitive_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_unsafe_response(non_sensitive_view, check_for_vars=False)
def test_sensitive_request(self):
"""
Ensure that sensitive POST parameters cannot be seen in the default
error reports for sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_view, check_for_vars=False)
def test_paranoid_request(self):
"""
Ensure that no POST parameters can be seen in the default error reports
for "paranoid" requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(paranoid_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_paranoid_response(paranoid_view, check_for_vars=False)
def test_custom_exception_reporter_filter(self):
"""
Ensure that it's possible to assign an exception reporter filter to
the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(custom_exception_reporter_filter_view,
check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_unsafe_response(custom_exception_reporter_filter_view,
check_for_vars=False)
|
blaze33/django
|
tests/regressiontests/views/tests/debug.py
|
Python
|
bsd-3-clause
| 22,564
| 0.001197
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/gugu/w/calibre/src/calibre/gui2/dialogs/quickview.ui'
#
# Created: Thu Jul 19 23:32:31 2012
# by: PyQt4 UI code generator 4.9.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_Quickview(object):
def setupUi(self, Quickview):
Quickview.setObjectName(_fromUtf8("Quickview"))
Quickview.resize(768, 342)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(Quickview.sizePolicy().hasHeightForWidth())
Quickview.setSizePolicy(sizePolicy)
self.gridlayout = QtGui.QGridLayout(Quickview)
self.gridlayout.setObjectName(_fromUtf8("gridlayout"))
self.items_label = QtGui.QLabel(Quickview)
self.items_label.setObjectName(_fromUtf8("items_label"))
self.gridlayout.addWidget(self.items_label, 0, 0, 1, 1)
self.items = QtGui.QListWidget(Quickview)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.items.sizePolicy().hasHeightForWidth())
self.items.setSizePolicy(sizePolicy)
self.items.setObjectName(_fromUtf8("items"))
self.gridlayout.addWidget(self.items, 1, 0, 1, 1)
self.books_label = QtGui.QLabel(Quickview)
self.books_label.setObjectName(_fromUtf8("books_label"))
self.gridlayout.addWidget(self.books_label, 0, 1, 1, 1)
self.books_table = QtGui.QTableWidget(Quickview)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(4)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.books_table.sizePolicy().hasHeightForWidth())
self.books_table.setSizePolicy(sizePolicy)
self.books_table.setColumnCount(0)
self.books_table.setRowCount(0)
self.books_table.setObjectName(_fromUtf8("books_table"))
self.gridlayout.addWidget(self.books_table, 1, 1, 1, 1)
self.hboxlayout = QtGui.QHBoxLayout()
self.hboxlayout.setObjectName(_fromUtf8("hboxlayout"))
self.search_button = QtGui.QPushButton(Quickview)
self.search_button.setObjectName(_fromUtf8("search_button"))
self.hboxlayout.addWidget(self.search_button)
spacerItem = QtGui.QSpacerItem(0, 0, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.hboxlayout.addItem(spacerItem)
self.buttonBox = QtGui.QDialogButtonBox(Quickview)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Close)
self.buttonBox.setCenterButtons(False)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.hboxlayout.addWidget(self.buttonBox)
self.gridlayout.addLayout(self.hboxlayout, 3, 0, 1, 2)
self.retranslateUi(Quickview)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), Quickview.reject)
QtCore.QMetaObject.connectSlotsByName(Quickview)
def retranslateUi(self, Quickview):
Quickview.setWindowTitle(_("Quickview"))
self.items_label.setText(_("Items"))
self.search_button.setText(_("Search"))
self.search_button.setToolTip(_("Search in the library view for the selected item"))
|
Eksmo/calibre
|
src/calibre/gui2/dialogs/quickview_ui.py
|
Python
|
gpl-3.0
| 3,673
| 0.003539
|
# -*- coding: utf-8 -*-
import ast
import base64
import csv
import glob
import itertools
import logging
import operator
import datetime
import hashlib
import os
import re
import simplejson
import time
import urllib
import urllib2
import urlparse
import xmlrpclib
import zlib
from xml.etree import ElementTree
from cStringIO import StringIO
import babel.messages.pofile
import werkzeug.utils
import werkzeug.wrappers
try:
import xlwt
except ImportError:
xlwt = None
import openerp
import openerp.modules.registry
from openerp.tools.translate import _
from openerp.tools import config
from .. import http
openerpweb = http
#----------------------------------------------------------
# OpenERP Web helpers
#----------------------------------------------------------
def rjsmin(script):
""" Minify js with a clever regex.
Taken from http://opensource.perlig.de/rjsmin
Apache License, Version 2.0 """
def subber(match):
""" Substitution callback """
groups = match.groups()
return (
groups[0] or
groups[1] or
groups[2] or
groups[3] or
(groups[4] and '\n') or
(groups[5] and ' ') or
(groups[6] and ' ') or
(groups[7] and ' ') or
''
)
result = re.sub(
r'([^\047"/\000-\040]+)|((?:(?:\047[^\047\\\r\n]*(?:\\(?:[^\r\n]|\r?'
r'\n|\r)[^\047\\\r\n]*)*\047)|(?:"[^"\\\r\n]*(?:\\(?:[^\r\n]|\r?\n|'
r'\r)[^"\\\r\n]*)*"))[^\047"/\000-\040]*)|(?:(?<=[(,=:\[!&|?{};\r\n]'
r')(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/'
r'))*((?:/(?![\r\n/*])[^/\\\[\r\n]*(?:(?:\\[^\r\n]|(?:\[[^\\\]\r\n]*'
r'(?:\\[^\r\n][^\\\]\r\n]*)*\]))[^/\\\[\r\n]*)*/)[^\047"/\000-\040]*'
r'))|(?:(?<=[\000-#%-,./:-@\[-^`{-~-]return)(?:[\000-\011\013\014\01'
r'6-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*((?:/(?![\r\n/*])[^/'
r'\\\[\r\n]*(?:(?:\\[^\r\n]|(?:\[[^\\\]\r\n]*(?:\\[^\r\n][^\\\]\r\n]'
r'*)*\]))[^/\\\[\r\n]*)*/)[^\047"/\000-\040]*))|(?<=[^\000-!#%&(*,./'
r':-@\[\\^`{|~])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/'
r'*][^*]*\*+)*/))*(?:((?:(?://[^\r\n]*)?[\r\n]))(?:[\000-\011\013\01'
r'4\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*)+(?=[^\000-\040"#'
r'%-\047)*,./:-@\\-^`|-~])|(?<=[^\000-#%-,./:-@\[-^`{-~-])((?:[\000-'
r'\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=[^'
r'\000-#%-,./:-@\[-^`{-~-])|(?<=\+)((?:[\000-\011\013\014\016-\040]|'
r'(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=\+)|(?<=-)((?:[\000-\011\0'
r'13\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=-)|(?:[\0'
r'00-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))+|(?:'
r'(?:(?://[^\r\n]*)?[\r\n])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*'
r']*\*+(?:[^/*][^*]*\*+)*/))*)+', subber, '\n%s\n' % script
).strip()
return result
def db_list(req, force=False):
proxy = req.session.proxy("db")
dbs = proxy.list(force)
h = req.httprequest.environ['HTTP_HOST'].split(':')[0]
d = h.split('.')[0]
r = openerp.tools.config['dbfilter'].replace('%h', h).replace('%d', d)
dbs = [i for i in dbs if re.match(r, i)]
return dbs
def db_monodb_redirect(req):
return db_redirect(req, not config['list_db'])
def db_redirect(req, match_first_only_if_unique):
db = False
redirect = False
dbs = db_list(req, True)
# 1 try the db in the url
db_url = req.params.get('db')
if db_url and db_url in dbs:
return (db_url, False)
# 2 use the database from the cookie if it's listable and still listed
cookie_db = req.httprequest.cookies.get('last_used_database')
if cookie_db in dbs:
db = cookie_db
# 3 use the first db if user can list databases
if dbs and not db and (not match_first_only_if_unique or len(dbs) == 1):
db = dbs[0]
# redirect to the chosen db if multiple are available
if db and len(dbs) > 1:
query = dict(urlparse.parse_qsl(req.httprequest.query_string, keep_blank_values=True))
query.update({'db': db})
redirect = req.httprequest.path + '?' + urllib.urlencode(query)
return (db, redirect)
def db_monodb(req):
# if only one db exists, return it else return False
return db_redirect(req, True)[0]
def redirect_with_hash(req, url, code=303):
# Most IE and Safari versions decided not to preserve location.hash upon
# redirect. And even if IE10 pretends to support it, it still fails
# inexplicably in case of multiple redirects (and we do have some).
# See extensive test page at http://greenbytes.de/tech/tc/httpredirects/
return "<html><head><script>window.location = '%s' + location.hash;</script></head></html>" % url
def module_topological_sort(modules):
""" Return a list of module names sorted so that their dependencies of the
modules are listed before the module itself
modules is a dict of {module_name: dependencies}
:param modules: modules to sort
:type modules: dict
:returns: list(str)
"""
dependencies = set(itertools.chain.from_iterable(modules.itervalues()))
# incoming edge: dependency on other module (if a depends on b, a has an
# incoming edge from b, aka there's an edge from b to a)
# outgoing edge: other module depending on this one
# [Tarjan 1976], http://en.wikipedia.org/wiki/Topological_sorting#Algorithms
#L ← Empty list that will contain the sorted nodes
L = []
#S ← Set of all nodes with no outgoing edges (modules on which no other
# module depends)
S = set(module for module in modules if module not in dependencies)
visited = set()
#function visit(node n)
def visit(n):
#if n has not been visited yet then
if n not in visited:
#mark n as visited
visited.add(n)
#change: n not web module, can not be resolved, ignore
if n not in modules: return
#for each node m with an edge from m to n do (dependencies of n)
for m in modules[n]:
#visit(m)
visit(m)
#add n to L
L.append(n)
#for each node n in S do
for n in S:
#visit(n)
visit(n)
return L
def module_installed(req):
# Candidates module the current heuristic is the /static dir
loadable = openerpweb.addons_manifest.keys()
modules = {}
# Retrieve database installed modules
# TODO The following code should move to ir.module.module.list_installed_modules()
Modules = req.session.model('ir.module.module')
domain = [('state','=','installed'), ('name','in', loadable)]
for module in Modules.search_read(domain, ['name', 'dependencies_id']):
modules[module['name']] = []
deps = module.get('dependencies_id')
if deps:
deps_read = req.session.model('ir.module.module.dependency').read(deps, ['name'])
dependencies = [i['name'] for i in deps_read]
modules[module['name']] = dependencies
sorted_modules = module_topological_sort(modules)
return sorted_modules
def module_installed_bypass_session(dbname):
loadable = openerpweb.addons_manifest.keys()
modules = {}
try:
registry = openerp.modules.registry.RegistryManager.get(dbname)
with registry.cursor() as cr:
m = registry.get('ir.module.module')
# TODO The following code should move to ir.module.module.list_installed_modules()
domain = [('state','=','installed'), ('name','in', loadable)]
ids = m.search(cr, 1, [('state','=','installed'), ('name','in', loadable)])
for module in m.read(cr, 1, ids, ['name', 'dependencies_id']):
modules[module['name']] = []
deps = module.get('dependencies_id')
if deps:
deps_read = registry.get('ir.module.module.dependency').read(cr, 1, deps, ['name'])
dependencies = [i['name'] for i in deps_read]
modules[module['name']] = dependencies
except Exception,e:
pass
sorted_modules = module_topological_sort(modules)
return sorted_modules
def module_boot(req, db=None):
server_wide_modules = openerp.conf.server_wide_modules or ['web']
serverside = []
dbside = []
for i in server_wide_modules:
if i in openerpweb.addons_manifest:
serverside.append(i)
monodb = db or db_monodb(req)
if monodb:
dbside = module_installed_bypass_session(monodb)
dbside = [i for i in dbside if i not in serverside]
addons = serverside + dbside
return addons
def concat_xml(file_list):
"""Concatenate xml files
:param list(str) file_list: list of files to check
:returns: (concatenation_result, checksum)
:rtype: (str, str)
"""
checksum = hashlib.new('sha1')
if not file_list:
return '', checksum.hexdigest()
root = None
for fname in file_list:
with open(fname, 'rb') as fp:
contents = fp.read()
checksum.update(contents)
fp.seek(0)
xml = ElementTree.parse(fp).getroot()
if root is None:
root = ElementTree.Element(xml.tag)
#elif root.tag != xml.tag:
# raise ValueError("Root tags missmatch: %r != %r" % (root.tag, xml.tag))
for child in xml.getchildren():
root.append(child)
return ElementTree.tostring(root, 'utf-8'), checksum.hexdigest()
def concat_files(file_list, reader=None, intersperse=""):
""" Concatenates contents of all provided files
:param list(str) file_list: list of files to check
:param function reader: reading procedure for each file
:param str intersperse: string to intersperse between file contents
:returns: (concatenation_result, checksum)
:rtype: (str, str)
"""
checksum = hashlib.new('sha1')
if not file_list:
return '', checksum.hexdigest()
if reader is None:
def reader(f):
import codecs
with codecs.open(f, 'rb', "utf-8-sig") as fp:
return fp.read().encode("utf-8")
files_content = []
for fname in file_list:
contents = reader(fname)
checksum.update(contents)
files_content.append(contents)
files_concat = intersperse.join(files_content)
return files_concat, checksum.hexdigest()
concat_js_cache = {}
def concat_js(file_list):
content, checksum = concat_files(file_list, intersperse=';')
if checksum in concat_js_cache:
content = concat_js_cache[checksum]
else:
content = rjsmin(content)
concat_js_cache[checksum] = content
return content, checksum
def fs2web(path):
"""convert FS path into web path"""
return '/'.join(path.split(os.path.sep))
def manifest_glob(req, extension, addons=None, db=None):
if addons is None:
addons = module_boot(req, db=db)
else:
addons = addons.split(',')
r = []
for addon in addons:
manifest = openerpweb.addons_manifest.get(addon, None)
if not manifest:
continue
# ensure does not ends with /
addons_path = os.path.join(manifest['addons_path'], '')[:-1]
globlist = manifest.get(extension, [])
for pattern in globlist:
for path in glob.glob(os.path.normpath(os.path.join(addons_path, addon, pattern))):
r.append((path, fs2web(path[len(addons_path):])))
return r
def manifest_list(req, extension, mods=None, db=None):
""" list ressources to load specifying either:
mods: a comma separated string listing modules
db: a database name (return all installed modules in that database)
"""
if not req.debug:
path = '/web/webclient/' + extension
if mods is not None:
path += '?' + urllib.urlencode({'mods': mods})
elif db:
path += '?' + urllib.urlencode({'db': db})
return [path]
files = manifest_glob(req, extension, addons=mods, db=db)
return [wp for _fp, wp in files]
def get_last_modified(files):
""" Returns the modification time of the most recently modified
file provided
:param list(str) files: names of files to check
:return: most recent modification time amongst the fileset
:rtype: datetime.datetime
"""
files = list(files)
if files:
return max(datetime.datetime.fromtimestamp(os.path.getmtime(f))
for f in files)
return datetime.datetime(1970, 1, 1)
def make_conditional(req, response, last_modified=None, etag=None):
""" Makes the provided response conditional based upon the request,
and mandates revalidation from clients
Uses Werkzeug's own :meth:`ETagResponseMixin.make_conditional`, after
setting ``last_modified`` and ``etag`` correctly on the response object
:param req: OpenERP request
:type req: web.common.http.WebRequest
:param response: Werkzeug response
:type response: werkzeug.wrappers.Response
:param datetime.datetime last_modified: last modification date of the response content
:param str etag: some sort of checksum of the content (deep etag)
:return: the response object provided
:rtype: werkzeug.wrappers.Response
"""
response.cache_control.must_revalidate = True
response.cache_control.max_age = 0
if last_modified:
response.last_modified = last_modified
if etag:
response.set_etag(etag)
return response.make_conditional(req.httprequest)
def login_and_redirect(req, db, login, key, redirect_url='/'):
wsgienv = req.httprequest.environ
env = dict(
base_location=req.httprequest.url_root.rstrip('/'),
HTTP_HOST=wsgienv['HTTP_HOST'],
REMOTE_ADDR=wsgienv['REMOTE_ADDR'],
)
req.session.authenticate(db, login, key, env)
return set_cookie_and_redirect(req, redirect_url)
def set_cookie_and_redirect(req, redirect_url):
redirect = werkzeug.utils.redirect(redirect_url, 303)
redirect.autocorrect_location_header = False
cookie_val = urllib2.quote(simplejson.dumps(req.session_id))
redirect.set_cookie('instance0|session_id', cookie_val)
return redirect
def load_actions_from_ir_values(req, key, key2, models, meta):
Values = req.session.model('ir.values')
actions = Values.get(key, key2, models, meta, req.context)
return [(id, name, clean_action(req, action))
for id, name, action in actions]
def clean_action(req, action):
action.setdefault('flags', {})
action_type = action.setdefault('type', 'ir.actions.act_window_close')
if action_type == 'ir.actions.act_window':
return fix_view_modes(action)
return action
# I think generate_views,fix_view_modes should go into js ActionManager
def generate_views(action):
"""
While the server generates a sequence called "views" computing dependencies
between a bunch of stuff for views coming directly from the database
(the ``ir.actions.act_window model``), it's also possible for e.g. buttons
to return custom view dictionaries generated on the fly.
In that case, there is no ``views`` key available on the action.
Since the web client relies on ``action['views']``, generate it here from
``view_mode`` and ``view_id``.
Currently handles two different cases:
* no view_id, multiple view_mode
* single view_id, single view_mode
:param dict action: action descriptor dictionary to generate a views key for
"""
view_id = action.get('view_id') or False
if isinstance(view_id, (list, tuple)):
view_id = view_id[0]
# providing at least one view mode is a requirement, not an option
view_modes = action['view_mode'].split(',')
if len(view_modes) > 1:
if view_id:
raise ValueError('Non-db action dictionaries should provide '
'either multiple view modes or a single view '
'mode and an optional view id.\n\n Got view '
'modes %r and view id %r for action %r' % (
view_modes, view_id, action))
action['views'] = [(False, mode) for mode in view_modes]
return
action['views'] = [(view_id, view_modes[0])]
def fix_view_modes(action):
""" For historical reasons, OpenERP has weird dealings in relation to
view_mode and the view_type attribute (on window actions):
* one of the view modes is ``tree``, which stands for both list views
and tree views
* the choice is made by checking ``view_type``, which is either
``form`` for a list view or ``tree`` for an actual tree view
This methods simply folds the view_type into view_mode by adding a
new view mode ``list`` which is the result of the ``tree`` view_mode
in conjunction with the ``form`` view_type.
TODO: this should go into the doc, some kind of "peculiarities" section
:param dict action: an action descriptor
:returns: nothing, the action is modified in place
"""
if not action.get('views'):
generate_views(action)
if action.pop('view_type', 'form') != 'form':
return action
if 'view_mode' in action:
action['view_mode'] = ','.join(
mode if mode != 'tree' else 'list'
for mode in action['view_mode'].split(','))
action['views'] = [
[id, mode if mode != 'tree' else 'list']
for id, mode in action['views']
]
return action
def _local_web_translations(trans_file):
messages = []
try:
with open(trans_file) as t_file:
po = babel.messages.pofile.read_po(t_file)
except Exception:
return
for x in po:
if x.id and x.string and "openerp-web" in x.auto_comments:
messages.append({'id': x.id, 'string': x.string})
return messages
def xml2json_from_elementtree(el, preserve_whitespaces=False):
""" xml2json-direct
Simple and straightforward XML-to-JSON converter in Python
New BSD Licensed
http://code.google.com/p/xml2json-direct/
"""
res = {}
if el.tag[0] == "{":
ns, name = el.tag.rsplit("}", 1)
res["tag"] = name
res["namespace"] = ns[1:]
else:
res["tag"] = el.tag
res["attrs"] = {}
for k, v in el.items():
res["attrs"][k] = v
kids = []
if el.text and (preserve_whitespaces or el.text.strip() != ''):
kids.append(el.text)
for kid in el:
kids.append(xml2json_from_elementtree(kid, preserve_whitespaces))
if kid.tail and (preserve_whitespaces or kid.tail.strip() != ''):
kids.append(kid.tail)
res["children"] = kids
return res
def content_disposition(filename, req):
filename = filename.encode('utf8')
escaped = urllib2.quote(filename)
browser = req.httprequest.user_agent.browser
version = int((req.httprequest.user_agent.version or '0').split('.')[0])
if browser == 'msie' and version < 9:
return "attachment; filename=%s" % escaped
elif browser == 'safari':
return "attachment; filename=%s" % filename
else:
return "attachment; filename*=UTF-8''%s" % escaped
#----------------------------------------------------------
# OpenERP Web web Controllers
#----------------------------------------------------------
html_template = """<!DOCTYPE html>
<html style="height: 100%%">
<head>
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1"/>
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
<title>OpenERP</title>
<link rel="shortcut icon" href="/web/static/src/img/favicon.ico" type="image/x-icon"/>
<link rel="stylesheet" href="/web/static/src/css/full.css" />
%(css)s
%(js)s
<script type="text/javascript">
$(function() {
var s = new openerp.init(%(modules)s);
%(init)s
});
</script>
</head>
<body>
<!--[if lte IE 8]>
<script src="//ajax.googleapis.com/ajax/libs/chrome-frame/1/CFInstall.min.js"></script>
<script>CFInstall.check({mode: "overlay"});</script>
<![endif]-->
</body>
</html>
"""
class Home(openerpweb.Controller):
_cp_path = '/'
@openerpweb.httprequest
def index(self, req, s_action=None, db=None, **kw):
db, redir = db_monodb_redirect(req)
if redir:
return redirect_with_hash(req, redir)
js = "\n ".join('<script type="text/javascript" src="%s"></script>' % i for i in manifest_list(req, 'js', db=db))
css = "\n ".join('<link rel="stylesheet" href="%s">' % i for i in manifest_list(req, 'css', db=db))
r = html_template % {
'js': js,
'css': css,
'modules': simplejson.dumps(module_boot(req, db=db)),
'init': 'var wc = new s.web.WebClient();wc.appendTo($(document.body));'
}
return r
@openerpweb.httprequest
def login(self, req, db, login, key):
if db not in db_list(req, True):
return werkzeug.utils.redirect('/', 303)
return login_and_redirect(req, db, login, key)
class WebClient(openerpweb.Controller):
_cp_path = "/web/webclient"
@openerpweb.jsonrequest
def csslist(self, req, mods=None):
return manifest_list(req, 'css', mods=mods)
@openerpweb.jsonrequest
def jslist(self, req, mods=None):
return manifest_list(req, 'js', mods=mods)
@openerpweb.jsonrequest
def qweblist(self, req, mods=None):
return manifest_list(req, 'qweb', mods=mods)
@openerpweb.httprequest
def css(self, req, mods=None, db=None):
files = list(manifest_glob(req, 'css', addons=mods, db=db))
last_modified = get_last_modified(f[0] for f in files)
if req.httprequest.if_modified_since and req.httprequest.if_modified_since >= last_modified:
return werkzeug.wrappers.Response(status=304)
file_map = dict(files)
rx_import = re.compile(r"""@import\s+('|")(?!'|"|/|https?://)""", re.U)
rx_url = re.compile(r"""url\s*\(\s*('|"|)(?!'|"|/|https?://|data:)""", re.U)
def reader(f):
"""read the a css file and absolutify all relative uris"""
with open(f, 'rb') as fp:
data = fp.read().decode('utf-8')
path = file_map[f]
web_dir = os.path.dirname(path)
data = re.sub(
rx_import,
r"""@import \1%s/""" % (web_dir,),
data,
)
data = re.sub(
rx_url,
r"""url(\1%s/""" % (web_dir,),
data,
)
return data.encode('utf-8')
content, checksum = concat_files((f[0] for f in files), reader)
# move up all @import and @charset rules to the top
matches = []
def push(matchobj):
matches.append(matchobj.group(0))
return ''
content = re.sub(re.compile("(@charset.+;$)", re.M), push, content)
content = re.sub(re.compile("(@import.+;$)", re.M), push, content)
matches.append(content)
content = '\n'.join(matches)
return make_conditional(
req, req.make_response(content, [('Content-Type', 'text/css')]),
last_modified, checksum)
@openerpweb.httprequest
def js(self, req, mods=None, db=None):
files = [f[0] for f in manifest_glob(req, 'js', addons=mods, db=db)]
last_modified = get_last_modified(files)
if req.httprequest.if_modified_since and req.httprequest.if_modified_since >= last_modified:
return werkzeug.wrappers.Response(status=304)
content, checksum = concat_js(files)
return make_conditional(
req, req.make_response(content, [('Content-Type', 'application/javascript')]),
last_modified, checksum)
@openerpweb.httprequest
def qweb(self, req, mods=None, db=None):
files = [f[0] for f in manifest_glob(req, 'qweb', addons=mods, db=db)]
last_modified = get_last_modified(files)
if req.httprequest.if_modified_since and req.httprequest.if_modified_since >= last_modified:
return werkzeug.wrappers.Response(status=304)
content, checksum = concat_xml(files)
return make_conditional(
req, req.make_response(content, [('Content-Type', 'text/xml')]),
last_modified, checksum)
@openerpweb.jsonrequest
def bootstrap_translations(self, req, mods):
""" Load local translations from *.po files, as a temporary solution
until we have established a valid session. This is meant only
for translating the login page and db management chrome, using
the browser's language. """
# For performance reasons we only load a single translation, so for
# sub-languages (that should only be partially translated) we load the
# main language PO instead - that should be enough for the login screen.
lang = req.lang.split('_')[0]
translations_per_module = {}
for addon_name in mods:
if openerpweb.addons_manifest[addon_name].get('bootstrap'):
addons_path = openerpweb.addons_manifest[addon_name]['addons_path']
f_name = os.path.join(addons_path, addon_name, "i18n", lang + ".po")
if not os.path.exists(f_name):
continue
translations_per_module[addon_name] = {'messages': _local_web_translations(f_name)}
return {"modules": translations_per_module,
"lang_parameters": None}
@openerpweb.jsonrequest
def translations(self, req, mods, lang):
res_lang = req.session.model('res.lang')
ids = res_lang.search([("code", "=", lang)])
lang_params = None
if ids:
lang_params = res_lang.read(ids[0], ["direction", "date_format", "time_format",
"grouping", "decimal_point", "thousands_sep"])
# Regional languages (ll_CC) must inherit/override their parent lang (ll), but this is
# done server-side when the language is loaded, so we only need to load the user's lang.
ir_translation = req.session.model('ir.translation')
translations_per_module = {}
messages = ir_translation.search_read([('module','in',mods),('lang','=',lang),
('comments','like','openerp-web'),('value','!=',False),
('value','!=','')],
['module','src','value','lang'], order='module')
for mod, msg_group in itertools.groupby(messages, key=operator.itemgetter('module')):
translations_per_module.setdefault(mod,{'messages':[]})
translations_per_module[mod]['messages'].extend({'id': m['src'],
'string': m['value']} \
for m in msg_group)
return {"modules": translations_per_module,
"lang_parameters": lang_params}
@openerpweb.jsonrequest
def version_info(self, req):
return openerp.service.web_services.RPC_VERSION_1
class Proxy(openerpweb.Controller):
_cp_path = '/web/proxy'
@openerpweb.jsonrequest
def load(self, req, path):
""" Proxies an HTTP request through a JSON request.
It is strongly recommended to not request binary files through this,
as the result will be a binary data blob as well.
:param req: OpenERP request
:param path: actual request path
:return: file content
"""
from werkzeug.test import Client
from werkzeug.wrappers import BaseResponse
return Client(req.httprequest.app, BaseResponse).get(path).data
class Database(openerpweb.Controller):
_cp_path = "/web/database"
@openerpweb.jsonrequest
def get_list(self, req):
# TODO change js to avoid calling this method if in monodb mode
try:
return db_list(req)
except xmlrpclib.Fault:
monodb = db_monodb(req)
if monodb:
return [monodb]
raise
@openerpweb.jsonrequest
def create(self, req, fields):
params = dict(map(operator.itemgetter('name', 'value'), fields))
return req.session.proxy("db").create_database(
params['super_admin_pwd'],
params['db_name'],
bool(params.get('demo_data')),
params['db_lang'],
params['create_admin_pwd'])
@openerpweb.jsonrequest
def duplicate(self, req, fields):
params = dict(map(operator.itemgetter('name', 'value'), fields))
duplicate_attrs = (
params['super_admin_pwd'],
params['db_original_name'],
params['db_name'],
)
return req.session.proxy("db").duplicate_database(*duplicate_attrs)
@openerpweb.jsonrequest
def drop(self, req, fields):
password, db = operator.itemgetter(
'drop_pwd', 'drop_db')(
dict(map(operator.itemgetter('name', 'value'), fields)))
try:
if req.session.proxy("db").drop(password, db):return True
except xmlrpclib.Fault, e:
if e.faultCode and e.faultCode.split(':')[0] == 'AccessDenied':
return {'error': e.faultCode, 'title': 'Drop Database'}
return {'error': _('Could not drop database !'), 'title': _('Drop Database')}
@openerpweb.httprequest
def backup(self, req, backup_db, backup_pwd, token):
try:
db_dump = base64.b64decode(
req.session.proxy("db").dump(backup_pwd, backup_db))
filename = "%(db)s_%(timestamp)s.dump" % {
'db': backup_db,
'timestamp': datetime.datetime.utcnow().strftime(
"%Y-%m-%d_%H-%M-%SZ")
}
return req.make_response(db_dump,
[('Content-Type', 'application/octet-stream; charset=binary'),
('Content-Disposition', content_disposition(filename, req))],
{'fileToken': token}
)
except xmlrpclib.Fault, e:
return simplejson.dumps([[],[{'error': e.faultCode, 'title': _('Backup Database')}]])
@openerpweb.httprequest
def restore(self, req, db_file, restore_pwd, new_db):
try:
data = base64.b64encode(db_file.read())
req.session.proxy("db").restore(restore_pwd, new_db, data)
return ''
except xmlrpclib.Fault, e:
if e.faultCode and e.faultCode.split(':')[0] == 'AccessDenied':
raise Exception("AccessDenied")
@openerpweb.jsonrequest
def change_password(self, req, fields):
old_password, new_password = operator.itemgetter(
'old_pwd', 'new_pwd')(
dict(map(operator.itemgetter('name', 'value'), fields)))
try:
return req.session.proxy("db").change_admin_password(old_password, new_password)
except xmlrpclib.Fault, e:
if e.faultCode and e.faultCode.split(':')[0] == 'AccessDenied':
return {'error': e.faultCode, 'title': _('Change Password')}
return {'error': _('Error, password not changed !'), 'title': _('Change Password')}
class Session(openerpweb.Controller):
_cp_path = "/web/session"
def session_info(self, req):
req.session.ensure_valid()
return {
"session_id": req.session_id,
"uid": req.session._uid,
"user_context": req.session.get_context() if req.session._uid else {},
"db": req.session._db,
"username": req.session._login,
}
@openerpweb.jsonrequest
def get_session_info(self, req):
return self.session_info(req)
@openerpweb.jsonrequest
def authenticate(self, req, db, login, password, base_location=None):
wsgienv = req.httprequest.environ
env = dict(
base_location=base_location,
HTTP_HOST=wsgienv['HTTP_HOST'],
REMOTE_ADDR=wsgienv['REMOTE_ADDR'],
)
req.session.authenticate(db, login, password, env)
return self.session_info(req)
@openerpweb.jsonrequest
def change_password (self,req,fields):
old_password, new_password,confirm_password = operator.itemgetter('old_pwd', 'new_password','confirm_pwd')(
dict(map(operator.itemgetter('name', 'value'), fields)))
if not (old_password.strip() and new_password.strip() and confirm_password.strip()):
return {'error':_('You cannot leave any password empty.'),'title': _('Change Password')}
if new_password != confirm_password:
return {'error': _('The new password and its confirmation must be identical.'),'title': _('Change Password')}
try:
if req.session.model('res.users').change_password(
old_password, new_password):
return {'new_password':new_password}
except Exception:
return {'error': _('The old password you provided is incorrect, your password was not changed.'), 'title': _('Change Password')}
return {'error': _('Error, password not changed !'), 'title': _('Change Password')}
@openerpweb.jsonrequest
def sc_list(self, req):
return req.session.model('ir.ui.view_sc').get_sc(
req.session._uid, "ir.ui.menu", req.context)
@openerpweb.jsonrequest
def get_lang_list(self, req):
try:
return req.session.proxy("db").list_lang() or []
except Exception, e:
return {"error": e, "title": _("Languages")}
@openerpweb.jsonrequest
def modules(self, req):
# return all installed modules. Web client is smart enough to not load a module twice
return module_installed(req)
@openerpweb.jsonrequest
def save_session_action(self, req, the_action):
"""
This method store an action object in the session object and returns an integer
identifying that action. The method get_session_action() can be used to get
back the action.
:param the_action: The action to save in the session.
:type the_action: anything
:return: A key identifying the saved action.
:rtype: integer
"""
saved_actions = req.httpsession.get('saved_actions')
if not saved_actions:
saved_actions = {"next":1, "actions":{}}
req.httpsession['saved_actions'] = saved_actions
# we don't allow more than 10 stored actions
if len(saved_actions["actions"]) >= 10:
del saved_actions["actions"][min(saved_actions["actions"])]
key = saved_actions["next"]
saved_actions["actions"][key] = the_action
saved_actions["next"] = key + 1
return key
@openerpweb.jsonrequest
def get_session_action(self, req, key):
"""
Gets back a previously saved action. This method can return None if the action
was saved since too much time (this case should be handled in a smart way).
:param key: The key given by save_session_action()
:type key: integer
:return: The saved action or None.
:rtype: anything
"""
saved_actions = req.httpsession.get('saved_actions')
if not saved_actions:
return None
return saved_actions["actions"].get(key)
@openerpweb.jsonrequest
def check(self, req):
req.session.assert_valid()
return None
@openerpweb.jsonrequest
def destroy(self, req):
req.session._suicide = True
class Menu(openerpweb.Controller):
_cp_path = "/web/menu"
@openerpweb.jsonrequest
def get_user_roots(self, req):
""" Return all root menu ids visible for the session user.
:param req: A request object, with an OpenERP session attribute
:type req: < session -> OpenERPSession >
:return: the root menu ids
:rtype: list(int)
"""
s = req.session
Menus = s.model('ir.ui.menu')
# If a menu action is defined use its domain to get the root menu items
user_menu_id = s.model('res.users').read([s._uid], ['menu_id'],
req.context)[0]['menu_id']
menu_domain = [('parent_id', '=', False)]
if user_menu_id:
domain_string = s.model('ir.actions.act_window').read(
[user_menu_id[0]], ['domain'],req.context)[0]['domain']
if domain_string:
menu_domain = ast.literal_eval(domain_string)
return Menus.search(menu_domain, 0, False, False, req.context)
@openerpweb.jsonrequest
def load(self, req):
""" Loads all menu items (all applications and their sub-menus).
:param req: A request object, with an OpenERP session attribute
:type req: < session -> OpenERPSession >
:return: the menu root
:rtype: dict('children': menu_nodes)
"""
Menus = req.session.model('ir.ui.menu')
fields = ['name', 'sequence', 'parent_id', 'action']
menu_root_ids = self.get_user_roots(req)
menu_roots = Menus.read(menu_root_ids, fields, req.context) if menu_root_ids else []
menu_root = {
'id': False,
'name': 'root',
'parent_id': [-1, ''],
'children': menu_roots,
'all_menu_ids': menu_root_ids,
}
if not menu_roots:
return menu_root
# menus are loaded fully unlike a regular tree view, cause there are a
# limited number of items (752 when all 6.1 addons are installed)
menu_ids = Menus.search([('id', 'child_of', menu_root_ids)], 0, False, False, req.context)
menu_items = Menus.read(menu_ids, fields, req.context)
# adds roots at the end of the sequence, so that they will overwrite
# equivalent menu items from full menu read when put into id:item
# mapping, resulting in children being correctly set on the roots.
menu_items.extend(menu_roots)
menu_root['all_menu_ids'] = menu_ids # includes menu_root_ids!
# make a tree using parent_id
menu_items_map = dict(
(menu_item["id"], menu_item) for menu_item in menu_items)
for menu_item in menu_items:
if menu_item['parent_id']:
parent = menu_item['parent_id'][0]
else:
parent = False
if parent in menu_items_map:
menu_items_map[parent].setdefault(
'children', []).append(menu_item)
# sort by sequence a tree using parent_id
for menu_item in menu_items:
menu_item.setdefault('children', []).sort(
key=operator.itemgetter('sequence'))
return menu_root
@openerpweb.jsonrequest
def load_needaction(self, req, menu_ids):
""" Loads needaction counters for specific menu ids.
:return: needaction data
:rtype: dict(menu_id: {'needaction_enabled': boolean, 'needaction_counter': int})
"""
return req.session.model('ir.ui.menu').get_needaction_data(menu_ids, req.context)
@openerpweb.jsonrequest
def action(self, req, menu_id):
# still used by web_shortcut
actions = load_actions_from_ir_values(req,'action', 'tree_but_open',
[('ir.ui.menu', menu_id)], False)
return {"action": actions}
class DataSet(openerpweb.Controller):
_cp_path = "/web/dataset"
@openerpweb.jsonrequest
def search_read(self, req, model, fields=False, offset=0, limit=False, domain=None, sort=None):
return self.do_search_read(req, model, fields, offset, limit, domain, sort)
def do_search_read(self, req, model, fields=False, offset=0, limit=False, domain=None
, sort=None):
""" Performs a search() followed by a read() (if needed) using the
provided search criteria
:param req: a JSON-RPC request object
:type req: openerpweb.JsonRequest
:param str model: the name of the model to search on
:param fields: a list of the fields to return in the result records
:type fields: [str]
:param int offset: from which index should the results start being returned
:param int limit: the maximum number of records to return
:param list domain: the search domain for the query
:param list sort: sorting directives
:returns: A structure (dict) with two keys: ids (all the ids matching
the (domain, context) pair) and records (paginated records
matching fields selection set)
:rtype: list
"""
Model = req.session.model(model)
ids = Model.search(domain, offset or 0, limit or False, sort or False,
req.context)
if limit and len(ids) == limit:
length = Model.search_count(domain, req.context)
else:
length = len(ids) + (offset or 0)
if fields and fields == ['id']:
# shortcut read if we only want the ids
return {
'length': length,
'records': [{'id': id} for id in ids]
}
records = Model.read(ids, fields or False, req.context)
index = dict((r['id'], r) for r in records)
records = [index[x] for x in ids if x in index]
return {
'length': length,
'records': records
}
@openerpweb.jsonrequest
def load(self, req, model, id, fields):
m = req.session.model(model)
value = {}
r = m.read([id], False, req.context)
if r:
value = r[0]
return {'value': value}
def call_common(self, req, model, method, args, domain_id=None, context_id=None):
return self._call_kw(req, model, method, args, {})
def _call_kw(self, req, model, method, args, kwargs):
# Temporary implements future display_name special field for model#read()
if method in ('read', 'search_read') and kwargs.get('context', {}).get('future_display_name'):
if 'display_name' in args[1]:
if method == 'read':
names = dict(req.session.model(model).name_get(args[0], **kwargs))
else:
names = dict(req.session.model(model).name_search('', args[0], **kwargs))
args[1].remove('display_name')
records = getattr(req.session.model(model), method)(*args, **kwargs)
for record in records:
record['display_name'] = \
names.get(record['id']) or "%s#%d" % (model, (record['id']))
return records
return getattr(req.session.model(model), method)(*args, **kwargs)
@openerpweb.jsonrequest
def call(self, req, model, method, args, domain_id=None, context_id=None):
return self._call_kw(req, model, method, args, {})
@openerpweb.jsonrequest
def call_kw(self, req, model, method, args, kwargs):
return self._call_kw(req, model, method, args, kwargs)
@openerpweb.jsonrequest
def call_button(self, req, model, method, args, domain_id=None, context_id=None):
action = self._call_kw(req, model, method, args, {})
if isinstance(action, dict) and action.get('type') != '':
return clean_action(req, action)
return False
@openerpweb.jsonrequest
def exec_workflow(self, req, model, id, signal):
return req.session.exec_workflow(model, id, signal)
@openerpweb.jsonrequest
def resequence(self, req, model, ids, field='sequence', offset=0):
""" Re-sequences a number of records in the model, by their ids
The re-sequencing starts at the first model of ``ids``, the sequence
number is incremented by one after each record and starts at ``offset``
:param ids: identifiers of the records to resequence, in the new sequence order
:type ids: list(id)
:param str field: field used for sequence specification, defaults to
"sequence"
:param int offset: sequence number for first record in ``ids``, allows
starting the resequencing from an arbitrary number,
defaults to ``0``
"""
m = req.session.model(model)
if not m.fields_get([field]):
return False
# python 2.6 has no start parameter
for i, id in enumerate(ids):
m.write(id, { field: i + offset })
return True
class View(openerpweb.Controller):
_cp_path = "/web/view"
@openerpweb.jsonrequest
def add_custom(self, req, view_id, arch):
CustomView = req.session.model('ir.ui.view.custom')
CustomView.create({
'user_id': req.session._uid,
'ref_id': view_id,
'arch': arch
}, req.context)
return {'result': True}
@openerpweb.jsonrequest
def undo_custom(self, req, view_id, reset=False):
CustomView = req.session.model('ir.ui.view.custom')
vcustom = CustomView.search([('user_id', '=', req.session._uid), ('ref_id' ,'=', view_id)],
0, False, False, req.context)
if vcustom:
if reset:
CustomView.unlink(vcustom, req.context)
else:
CustomView.unlink([vcustom[0]], req.context)
return {'result': True}
return {'result': False}
class TreeView(View):
_cp_path = "/web/treeview"
@openerpweb.jsonrequest
def action(self, req, model, id):
return load_actions_from_ir_values(
req,'action', 'tree_but_open',[(model, id)],
False)
class Binary(openerpweb.Controller):
_cp_path = "/web/binary"
@openerpweb.httprequest
def image(self, req, model, id, field, **kw):
last_update = '__last_update'
Model = req.session.model(model)
headers = [('Content-Type', 'image/png')]
etag = req.httprequest.headers.get('If-None-Match')
hashed_session = hashlib.md5(req.session_id).hexdigest()
id = None if not id else simplejson.loads(id)
if type(id) is list:
id = id[0] # m2o
if etag:
if not id and hashed_session == etag:
return werkzeug.wrappers.Response(status=304)
else:
date = Model.read([id], [last_update], req.context)[0].get(last_update)
if hashlib.md5(date).hexdigest() == etag:
return werkzeug.wrappers.Response(status=304)
retag = hashed_session
try:
if not id:
res = Model.default_get([field], req.context).get(field)
image_base64 = res
else:
res = Model.read([id], [last_update, field], req.context)[0]
retag = hashlib.md5(res.get(last_update)).hexdigest()
image_base64 = res.get(field)
if kw.get('resize'):
resize = kw.get('resize').split(',')
if len(resize) == 2 and int(resize[0]) and int(resize[1]):
width = int(resize[0])
height = int(resize[1])
# resize maximum 500*500
if width > 500: width = 500
if height > 500: height = 500
image_base64 = openerp.tools.image_resize_image(base64_source=image_base64, size=(width, height), encoding='base64', filetype='PNG')
image_data = base64.b64decode(image_base64)
except (TypeError, xmlrpclib.Fault):
image_data = self.placeholder(req)
headers.append(('ETag', retag))
headers.append(('Content-Length', len(image_data)))
try:
ncache = int(kw.get('cache'))
headers.append(('Cache-Control', 'no-cache' if ncache == 0 else 'max-age=%s' % (ncache)))
except:
pass
return req.make_response(image_data, headers)
def placeholder(self, req, image='placeholder.png'):
addons_path = openerpweb.addons_manifest['web']['addons_path']
return open(os.path.join(addons_path, 'web', 'static', 'src', 'img', image), 'rb').read()
@openerpweb.httprequest
def saveas(self, req, model, field, id=None, filename_field=None, **kw):
""" Download link for files stored as binary fields.
If the ``id`` parameter is omitted, fetches the default value for the
binary field (via ``default_get``), otherwise fetches the field for
that precise record.
:param req: OpenERP request
:type req: :class:`web.common.http.HttpRequest`
:param str model: name of the model to fetch the binary from
:param str field: binary field
:param str id: id of the record from which to fetch the binary
:param str filename_field: field holding the file's name, if any
:returns: :class:`werkzeug.wrappers.Response`
"""
Model = req.session.model(model)
fields = [field]
if filename_field:
fields.append(filename_field)
if id:
res = Model.read([int(id)], fields, req.context)[0]
else:
res = Model.default_get(fields, req.context)
filecontent = base64.b64decode(res.get(field, ''))
if not filecontent:
return req.not_found()
else:
filename = '%s_%s' % (model.replace('.', '_'), id)
if filename_field:
filename = res.get(filename_field, '') or filename
return req.make_response(filecontent,
[('Content-Type', 'application/octet-stream'),
('Content-Disposition', content_disposition(filename, req))])
@openerpweb.httprequest
def saveas_ajax(self, req, data, token):
jdata = simplejson.loads(data)
model = jdata['model']
field = jdata['field']
data = jdata['data']
id = jdata.get('id', None)
filename_field = jdata.get('filename_field', None)
context = jdata.get('context', {})
Model = req.session.model(model)
fields = [field]
if filename_field:
fields.append(filename_field)
if data:
res = { field: data }
elif id:
res = Model.read([int(id)], fields, context)[0]
else:
res = Model.default_get(fields, context)
filecontent = base64.b64decode(res.get(field, ''))
if not filecontent:
raise ValueError(_("No content found for field '%s' on '%s:%s'") %
(field, model, id))
else:
filename = '%s_%s' % (model.replace('.', '_'), id)
if filename_field:
filename = res.get(filename_field, '') or filename
return req.make_response(filecontent,
headers=[('Content-Type', 'application/octet-stream'),
('Content-Disposition', content_disposition(filename, req))],
cookies={'fileToken': token})
@openerpweb.httprequest
def upload(self, req, callback, ufile):
# TODO: might be useful to have a configuration flag for max-length file uploads
out = """<script language="javascript" type="text/javascript">
var win = window.top.window;
win.jQuery(win).trigger(%s, %s);
</script>"""
try:
data = ufile.read()
args = [len(data), ufile.filename,
ufile.content_type, base64.b64encode(data)]
except Exception, e:
args = [False, e.message]
return out % (simplejson.dumps(callback), simplejson.dumps(args))
@openerpweb.httprequest
def upload_attachment(self, req, callback, model, id, ufile):
Model = req.session.model('ir.attachment')
out = """<script language="javascript" type="text/javascript">
var win = window.top.window;
win.jQuery(win).trigger(%s, %s);
</script>"""
try:
attachment_id = Model.create({
'name': ufile.filename,
'datas': base64.encodestring(ufile.read()),
'datas_fname': ufile.filename,
'res_model': model,
'res_id': int(id)
}, req.context)
args = {
'filename': ufile.filename,
'id': attachment_id
}
except xmlrpclib.Fault, e:
args = {'error':e.faultCode }
return out % (simplejson.dumps(callback), simplejson.dumps(args))
@openerpweb.httprequest
def company_logo(self, req, dbname=None):
# TODO add etag, refactor to use /image code for etag
uid = None
if req.session._db:
dbname = req.session._db
uid = req.session._uid
elif dbname is None:
dbname = db_monodb(req)
if not uid:
uid = openerp.SUPERUSER_ID
if not dbname:
image_data = self.placeholder(req, 'logo.png')
else:
try:
# create an empty registry
registry = openerp.modules.registry.Registry(dbname)
with registry.cursor() as cr:
cr.execute("""SELECT c.logo_web
FROM res_users u
LEFT JOIN res_company c
ON c.id = u.company_id
WHERE u.id = %s
""", (uid,))
row = cr.fetchone()
if row and row[0]:
image_data = str(row[0]).decode('base64')
else:
image_data = self.placeholder(req, 'nologo.png')
except Exception:
image_data = self.placeholder(req, 'logo.png')
headers = [
('Content-Type', 'image/png'),
('Content-Length', len(image_data)),
]
return req.make_response(image_data, headers)
class Action(openerpweb.Controller):
_cp_path = "/web/action"
@openerpweb.jsonrequest
def load(self, req, action_id, do_not_eval=False):
Actions = req.session.model('ir.actions.actions')
value = False
try:
action_id = int(action_id)
except ValueError:
try:
module, xmlid = action_id.split('.', 1)
model, action_id = req.session.model('ir.model.data').get_object_reference(module, xmlid)
assert model.startswith('ir.actions.')
except Exception:
action_id = 0 # force failed read
base_action = Actions.read([action_id], ['type'], req.context)
if base_action:
ctx = {}
action_type = base_action[0]['type']
if action_type == 'ir.actions.report.xml':
ctx.update({'bin_size': True})
ctx.update(req.context)
action = req.session.model(action_type).read([action_id], False, ctx)
if action:
value = clean_action(req, action[0])
return value
@openerpweb.jsonrequest
def run(self, req, action_id):
return_action = req.session.model('ir.actions.server').run(
[action_id], req.context)
if return_action:
return clean_action(req, return_action)
else:
return False
class Export(openerpweb.Controller):
_cp_path = "/web/export"
@openerpweb.jsonrequest
def formats(self, req):
""" Returns all valid export formats
:returns: for each export format, a pair of identifier and printable name
:rtype: [(str, str)]
"""
return sorted([
controller.fmt
for path, controller in openerpweb.controllers_path.iteritems()
if path.startswith(self._cp_path)
if hasattr(controller, 'fmt')
], key=operator.itemgetter("label"))
def fields_get(self, req, model):
Model = req.session.model(model)
fields = Model.fields_get(False, req.context)
return fields
@openerpweb.jsonrequest
def get_fields(self, req, model, prefix='', parent_name= '',
import_compat=True, parent_field_type=None,
exclude=None):
if import_compat and parent_field_type == "many2one":
fields = {}
else:
fields = self.fields_get(req, model)
if import_compat:
fields.pop('id', None)
else:
fields['.id'] = fields.pop('id', {'string': 'ID'})
fields_sequence = sorted(fields.iteritems(),
key=lambda field: field[1].get('string', ''))
records = []
for field_name, field in fields_sequence:
if import_compat:
if exclude and field_name in exclude:
continue
if field.get('readonly'):
# If none of the field's states unsets readonly, skip the field
if all(dict(attrs).get('readonly', True)
for attrs in field.get('states', {}).values()):
continue
if not field.get('exportable', True):
continue
id = prefix + (prefix and '/'or '') + field_name
name = parent_name + (parent_name and '/' or '') + field['string']
record = {'id': id, 'string': name,
'value': id, 'children': False,
'field_type': field.get('type'),
'required': field.get('required'),
'relation_field': field.get('relation_field')}
records.append(record)
if len(name.split('/')) < 3 and 'relation' in field:
ref = field.pop('relation')
record['value'] += '/id'
record['params'] = {'model': ref, 'prefix': id, 'name': name}
if not import_compat or field['type'] == 'one2many':
# m2m field in import_compat is childless
record['children'] = True
return records
@openerpweb.jsonrequest
def namelist(self,req, model, export_id):
# TODO: namelist really has no reason to be in Python (although itertools.groupby helps)
export = req.session.model("ir.exports").read([export_id])[0]
export_fields_list = req.session.model("ir.exports.line").read(
export['export_fields'])
fields_data = self.fields_info(
req, model, map(operator.itemgetter('name'), export_fields_list))
return [
{'name': field['name'], 'label': fields_data[field['name']]}
for field in export_fields_list
]
def fields_info(self, req, model, export_fields):
info = {}
fields = self.fields_get(req, model)
if ".id" in export_fields:
fields['.id'] = fields.pop('id', {'string': 'ID'})
# To make fields retrieval more efficient, fetch all sub-fields of a
# given field at the same time. Because the order in the export list is
# arbitrary, this requires ordering all sub-fields of a given field
# together so they can be fetched at the same time
#
# Works the following way:
# * sort the list of fields to export, the default sorting order will
# put the field itself (if present, for xmlid) and all of its
# sub-fields right after it
# * then, group on: the first field of the path (which is the same for
# a field and for its subfields and the length of splitting on the
# first '/', which basically means grouping the field on one side and
# all of the subfields on the other. This way, we have the field (for
# the xmlid) with length 1, and all of the subfields with the same
# base but a length "flag" of 2
# * if we have a normal field (length 1), just add it to the info
# mapping (with its string) as-is
# * otherwise, recursively call fields_info via graft_subfields.
# all graft_subfields does is take the result of fields_info (on the
# field's model) and prepend the current base (current field), which
# rebuilds the whole sub-tree for the field
#
# result: because we're not fetching the fields_get for half the
# database models, fetching a namelist with a dozen fields (including
# relational data) falls from ~6s to ~300ms (on the leads model).
# export lists with no sub-fields (e.g. import_compatible lists with
# no o2m) are even more efficient (from the same 6s to ~170ms, as
# there's a single fields_get to execute)
for (base, length), subfields in itertools.groupby(
sorted(export_fields),
lambda field: (field.split('/', 1)[0], len(field.split('/', 1)))):
subfields = list(subfields)
if length == 2:
# subfields is a seq of $base/*rest, and not loaded yet
info.update(self.graft_subfields(
req, fields[base]['relation'], base, fields[base]['string'],
subfields
))
elif base in fields:
info[base] = fields[base]['string']
return info
def graft_subfields(self, req, model, prefix, prefix_string, fields):
export_fields = [field.split('/', 1)[1] for field in fields]
return (
(prefix + '/' + k, prefix_string + '/' + v)
for k, v in self.fields_info(req, model, export_fields).iteritems())
class ExportFormat(object):
@property
def content_type(self):
""" Provides the format's content type """
raise NotImplementedError()
def filename(self, base):
""" Creates a valid filename for the format (with extension) from the
provided base name (exension-less)
"""
raise NotImplementedError()
def from_data(self, fields, rows):
""" Conversion method from OpenERP's export data to whatever the
current export class outputs
:params list fields: a list of fields to export
:params list rows: a list of records to export
:returns:
:rtype: bytes
"""
raise NotImplementedError()
@openerpweb.httprequest
def index(self, req, data, token):
params = simplejson.loads(data)
model, fields, ids, domain, import_compat = \
operator.itemgetter('model', 'fields', 'ids', 'domain',
'import_compat')(
params)
Model = req.session.model(model)
context = dict(req.context or {}, **params.get('context', {}))
ids = ids or Model.search(domain, 0, False, False, context)
field_names = map(operator.itemgetter('name'), fields)
import_data = Model.export_data(ids, field_names, context).get('datas',[])
if import_compat:
columns_headers = field_names
else:
columns_headers = [val['label'].strip() for val in fields]
return req.make_response(self.from_data(columns_headers, import_data),
headers=[('Content-Disposition',
content_disposition(self.filename(model), req)),
('Content-Type', self.content_type)],
cookies={'fileToken': token})
class CSVExport(ExportFormat, http.Controller):
_cp_path = '/web/export/csv'
fmt = {'tag': 'csv', 'label': 'CSV'}
@property
def content_type(self):
return 'text/csv;charset=utf8'
def filename(self, base):
return base + '.csv'
def from_data(self, fields, rows):
fp = StringIO()
writer = csv.writer(fp, quoting=csv.QUOTE_ALL)
writer.writerow([name.encode('utf-8') for name in fields])
for data in rows:
row = []
for d in data:
if isinstance(d, basestring):
d = d.replace('\n',' ').replace('\t',' ')
try:
d = d.encode('utf-8')
except UnicodeError:
pass
if d is False: d = None
row.append(d)
writer.writerow(row)
fp.seek(0)
data = fp.read()
fp.close()
return data
class ExcelExport(ExportFormat, http.Controller):
_cp_path = '/web/export/xls'
fmt = {
'tag': 'xls',
'label': 'Excel',
'error': None if xlwt else "XLWT required"
}
@property
def content_type(self):
return 'application/vnd.ms-excel'
def filename(self, base):
return base + '.xls'
def from_data(self, fields, rows):
workbook = xlwt.Workbook()
worksheet = workbook.add_sheet('Sheet 1')
for i, fieldname in enumerate(fields):
worksheet.write(0, i, fieldname)
worksheet.col(i).width = 8000 # around 220 pixels
style = xlwt.easyxf('align: wrap yes')
for row_index, row in enumerate(rows):
for cell_index, cell_value in enumerate(row):
if isinstance(cell_value, basestring):
cell_value = re.sub("\r", " ", cell_value)
if cell_value is False: cell_value = None
worksheet.write(row_index + 1, cell_index, cell_value, style)
fp = StringIO()
workbook.save(fp)
fp.seek(0)
data = fp.read()
fp.close()
return data
class Reports(openerpweb.Controller):
_cp_path = "/web/report"
POLLING_DELAY = 0.25
TYPES_MAPPING = {
'doc': 'application/vnd.ms-word',
'html': 'text/html',
'odt': 'application/vnd.oasis.opendocument.text',
'pdf': 'application/pdf',
'sxw': 'application/vnd.sun.xml.writer',
'xls': 'application/vnd.ms-excel',
}
@openerpweb.httprequest
def index(self, req, action, token):
action = simplejson.loads(action)
report_srv = req.session.proxy("report")
context = dict(req.context)
context.update(action["context"])
report_data = {}
report_ids = context["active_ids"]
if 'report_type' in action:
report_data['report_type'] = action['report_type']
if 'datas' in action:
if 'ids' in action['datas']:
report_ids = action['datas'].pop('ids')
report_data.update(action['datas'])
report_id = report_srv.report(
req.session._db, req.session._uid, req.session._password,
action["report_name"], report_ids,
report_data, context)
report_struct = None
while True:
report_struct = report_srv.report_get(
req.session._db, req.session._uid, req.session._password, report_id)
if report_struct["state"]:
break
time.sleep(self.POLLING_DELAY)
report = base64.b64decode(report_struct['result'])
if report_struct.get('code') == 'zlib':
report = zlib.decompress(report)
report_mimetype = self.TYPES_MAPPING.get(
report_struct['format'], 'octet-stream')
file_name = action['report_name']
# Try to get current object model and their ids from context
if 'context' in action:
action_context = action['context']
if (action_context.get('active_model')
and action_context['active_ids']):
# Use built-in ORM method to get data from DB
m = req.session.model(action_context['active_model'])
r = []
try:
r = m.name_get(action_context['active_ids'], context)
except xmlrpclib.Fault:
#we assume this went wrong because of incorrect/missing
#_rec_name. We don't have access to _columns here to do
# a proper check
pass
# Parse result to create a better filename
item_names = [item[1] or str(item[0]) for item in r]
if action.get('name'):
item_names.insert(0, action['name'])
if item_names:
file_name = '-'.join(item_names)[:251]
file_name = '%s.%s' % (file_name, report_struct['format'])
# Create safe filename
p = re.compile('[/:(")<>|?*]|(\\\)')
file_name = p.sub('_', file_name)
return req.make_response(report,
headers=[
('Content-Disposition', content_disposition(file_name, req)),
('Content-Type', report_mimetype),
('Content-Length', len(report))],
cookies={'fileToken': token})
# vim:expandtab:tabstop=4:softtabstop=4:shiftwidth=4:
|
gdgellatly/OCB1
|
addons/web/controllers/main.py
|
Python
|
agpl-3.0
| 69,619
| 0.003433
|
"""Test that arguments passed to a script Menu.main(loop=True) execute
properly."""
##==============================================================#
## SECTION: Imports #
##==============================================================#
from testlib import *
##==============================================================#
## SECTION: Global Definitions #
##==============================================================#
SCRIPT = "script_1.py"
##==============================================================#
## SECTION: Class Definitions #
##==============================================================#
class TestCase(unittest.TestCase):
def _cleanup(self):
rmfile("foo")
rmfile("bar")
rmfile("caz")
def setUp(self):
self._cleanup()
self.assertFalse(op.exists("foo"))
self.assertFalse(op.exists("bar"))
self.assertFalse(op.exists("caz"))
def tearDown(self):
self._cleanup()
def test_script_1(self):
result = os.system("python %s x" % SCRIPT)
self.assertEqual(0, result)
self.assertFalse(op.exists("foo"))
self.assertFalse(op.exists("bar"))
self.assertFalse(op.exists("caz"))
def test_script_2(self):
result = os.system("python %s f" % SCRIPT)
self.assertEqual(0, result)
self.assertTrue(op.exists("foo"))
self.assertFalse(op.exists("bar"))
self.assertFalse(op.exists("caz"))
def test_script_3(self):
result = os.system("python %s b" % SCRIPT)
self.assertEqual(0, result)
self.assertFalse(op.exists("foo"))
self.assertTrue(op.exists("bar"))
self.assertFalse(op.exists("caz"))
def test_script_4(self):
result = os.system("python %s f b" % SCRIPT)
self.assertEqual(0, result)
self.assertTrue(op.exists("foo"))
self.assertTrue(op.exists("bar"))
self.assertFalse(op.exists("caz"))
def test_script_5(self):
result = os.system("python %s c" % SCRIPT)
self.assertEqual(0, result)
self.assertFalse(op.exists("foo"))
self.assertFalse(op.exists("bar"))
self.assertTrue(op.exists("caz"))
def test_script_6(self):
result = os.system("python %s c f" % SCRIPT)
self.assertEqual(0, result)
self.assertTrue(op.exists("foo"))
self.assertFalse(op.exists("bar"))
self.assertTrue(op.exists("caz"))
def test_script_7(self):
result = os.system("python %s -d" % SCRIPT)
self.assertEqual(0, result)
self.assertFalse(op.exists("foo"))
self.assertTrue(op.exists("bar"))
self.assertFalse(op.exists("caz"))
##==============================================================#
## SECTION: Main Body #
##==============================================================#
if __name__ == '__main__':
unittest.main()
|
jeffrimko/Qprompt
|
tests/script_test_1.py
|
Python
|
mit
| 3,045
| 0.004598
|
from io import StringIO
import csv
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.core.files.base import ContentFile
from django.test import TestCase
from django.urls import reverse
from django.utils.timezone import now
from register.forms import CSV_FIELDS, BlacklistedNumberEditForm, WhitelistedNumberEditForm
from register.models import Blacklist, Whitelist, RegistrationCenter, Registration
from register.tests.base import LibyaTest
from register.tests.factories import WhitelistFactory, BlacklistFactory, \
RegistrationCenterFactory, RegistrationFactory
from register.tests.test_center_csv import CenterFileTestMixin
from libya_elections.phone_numbers import get_random_phone_number, format_phone_number
from libya_elections.tests.utils import ResponseCheckerMixin
from libya_site.tests.factories import UserFactory, DEFAULT_USER_PASSWORD
from polling_reports.models import StaffPhone
from polling_reports.tests.factories import StaffPhoneFactory
from staff.tests.base import StaffUserMixin
class ImportBlackWhitelistViewMixin(StaffUserMixin, ResponseCheckerMixin):
"""Base class for TestImportBlacklistView and TestImportWhitelistView.
This doesn't inherit from TestCase, so it isn't executed by itself.
"""
def setUp(self):
super(ImportBlackWhitelistViewMixin, self).setUp()
# self.url = None
# self.model = None
# self.factory = None
def test_staff_can_see_form(self):
rsp = self.client.get(self.url, follow=False)
form = rsp.context['form']
self.assertNotIn('password', form.fields)
self.assertIn('import_file', form.fields)
def test_nonstaff_cant_see_form(self):
self.client.logout()
self.nonstaff_user = UserFactory(username='joe', password='puppy')
self.client.login(username='joe', password='puppy')
self.assertForbidden(self.client.get(self.url))
def test_valid_form(self):
# with all combinations of line endings (\r\n, \n, \r)
numbers = [get_random_phone_number() for i in range(4)]
punctuated_numbers = [format_phone_number(number)
for number in numbers]
file_content = ("""%s\r\n%s\n \n%s\r%s""" % (
punctuated_numbers[0],
punctuated_numbers[1],
punctuated_numbers[2],
punctuated_numbers[3],
)).encode()
blackwhitelist_file = ContentFile(file_content, name='bw.txt')
data = {'import_file': blackwhitelist_file}
rsp = self.client.post(self.url, data=data)
# Assert that we redirect
self.assertEqual(302, rsp.status_code)
bwlist = self.model.objects.values_list('phone_number', flat=True)
for number in numbers:
self.assertIn(number, bwlist)
self.assertEqual(len(bwlist), 4)
def test_import_number_twice_works(self):
"Importing a number that is already in list shouldn't cause an error"
number = get_random_phone_number()
self.factory(phone_number=number)
file_content = number.encode()
blackwhitelist_file = ContentFile(file_content, name='bw.txt')
data = {'import_file': blackwhitelist_file}
rsp = self.client.post(self.url, data=data)
# Assert that we redirect
self.assertEqual(302, rsp.status_code)
bwlist = self.model.objects.values_list('phone_number', flat=True)
self.assertEqual(len(bwlist), 1)
self.assertIn(number, bwlist)
def test_import_number_cant_start_with_2180(self):
"Ensures that the number doesn't start with 2180"
number = '218091234123'
file_content = number.encode()
blackwhitelist_file = ContentFile(file_content, name='bw.txt')
data = {'import_file': blackwhitelist_file}
rsp = self.client.post(self.url, data=data, follow=True)
self.assertEqual(200, rsp.status_code)
bwlist = self.model.objects.values_list('phone_number', flat=True)
self.assertEqual(len(bwlist), 0)
self.assertContains(rsp, 'Numbers on these lines not imported because '
'they are not valid phone numbers: 1.')
class TestImportBlacklistView(ImportBlackWhitelistViewMixin, LibyaTest):
"""Exercise uploading a list of blacklisted numbers"""
def setUp(self):
self.model = Blacklist
self.permissions = ('add_blacklist', 'browse_blacklist')
self.url = reverse('blacklisted-numbers-upload')
self.factory = BlacklistFactory
super(TestImportBlacklistView, self).setUp()
class TestImportWhitelistView(ImportBlackWhitelistViewMixin, LibyaTest):
"""Exercise uploading a list of whitelisted numbers"""
def setUp(self):
self.permissions = ('add_whitelist', 'browse_whitelist')
self.model = Whitelist
self.url = reverse('whitelisted-numbers-upload')
self.factory = WhitelistFactory
super(TestImportWhitelistView, self).setUp()
class BlackWhitelistEditFormMixin(StaffUserMixin, ResponseCheckerMixin):
"""Base class for TestBlacklistChangeForm and TestWhitelistChangeForm.
This doesn't inherit from TestCase, so it isn't executed by itself.
"""
def setUp(self):
super(BlackWhitelistEditFormMixin, self).setUp()
# self.factory = None
# self.form = None
def test_cleans_phone_number(self):
number = get_random_phone_number()
punctuated_number = format_phone_number(number)
form = self.form(data={'phone_number': punctuated_number})
self.assertTrue(form.is_valid(), form.errors)
self.assertEqual(form.cleaned_data['phone_number'], number)
def test_add_dupe_shows_form_error(self):
number = get_random_phone_number()
self.factory(phone_number=number)
form = self.form(data={'phone_number': number})
self.assertFalse(form.is_valid())
self.assertIn('Duplicate value for phone number', list(form.errors.values())[0])
def test_phone_number_cant_start_with_2180(self):
"Ensures the local prefix '0' isn't accidentally included in the phone number"
number = '218091234124'
form = self.form(data={'phone_number': number})
self.assertFalse(form.is_valid())
self.assertIn('Please enter a valid phone number', list(form.errors.values())[0][0])
class TestBlacklistChangeForm(BlackWhitelistEditFormMixin, TestCase):
"""Exercise Blacklist number editing"""
def setUp(self):
super(TestBlacklistChangeForm, self).setUp()
self.factory = BlacklistFactory
self.form = BlacklistedNumberEditForm
class TestWhitelistChangeForm(BlackWhitelistEditFormMixin, TestCase):
"""Exercise Whitelist number editing"""
def setUp(self):
super(TestWhitelistChangeForm, self).setUp()
self.factory = WhitelistFactory
self.form = WhitelistedNumberEditForm
class BlacklistDownload(StaffUserMixin, ResponseCheckerMixin, TestCase):
permissions = ['read_blacklist']
model = Blacklist
def test_download_blacklist_file(self):
bl = BlacklistFactory()
rsp = self.client.get(reverse('blacklisted-numbers-download'))
self.assertOK(rsp)
self.assertIn(bl.phone_number, rsp.content.decode())
class WhitelistDownload(StaffUserMixin, ResponseCheckerMixin, TestCase):
permissions = ['read_whitelist']
model = Whitelist
def test_download_whitelist_file(self):
wl = WhitelistFactory()
rsp = self.client.get(reverse('whitelisted-numbers-download'))
self.assertOK(rsp)
self.assertIn(wl.phone_number, rsp.content.decode())
class DeleteBlacklist(StaffUserMixin, ResponseCheckerMixin, TestCase):
permissions = ['delete_blacklist', 'browse_blacklist']
model = Blacklist
def setUp(self):
super(DeleteBlacklist, self).setUp()
self.url = reverse('blacklisted-numbers-delete')
BlacklistFactory.create_batch(size=3)
def test_get_deleted_page(self):
rsp = self.client.get(self.url)
self.assertOK(rsp)
self.assertIn('Are you sure you want to delete all 3', rsp.content.decode())
def test_post_deleted_page(self):
rsp = self.client.post(self.url, data={'ok': True})
self.assertRedirects(rsp, reverse('browse_blacklistednumbers'))
self.assertEqual(Blacklist.objects.count(), 0)
class DeleteWhitelist(StaffUserMixin, ResponseCheckerMixin, TestCase):
permissions = ['delete_whitelist', 'browse_whitelist']
model = Whitelist
def setUp(self):
super(DeleteWhitelist, self).setUp()
self.url = reverse('whitelisted-numbers-delete')
WhitelistFactory.create_batch(size=3)
def test_get_deleted_page(self):
rsp = self.client.get(self.url)
self.assertOK(rsp)
self.assertIn('Are you sure you want to delete all 3', rsp.content.decode())
def test_post_deleted_page(self):
rsp = self.client.post(self.url, data={'ok': True})
self.assertRedirects(rsp, reverse('browse_whitelistednumbers'))
self.assertEqual(Blacklist.objects.count(), 0)
class DeleteStaffPhone(StaffUserMixin, ResponseCheckerMixin, TestCase):
permissions = ['delete_staffphone', 'browse_staffphone']
model = StaffPhone
def setUp(self):
super(DeleteStaffPhone, self).setUp()
self.url = reverse('staffphones-delete')
StaffPhoneFactory.create_batch(size=3)
def test_get_deleted_page(self):
rsp = self.client.get(self.url)
self.assertOK(rsp)
self.assertIn('Are you sure you want to delete all 3', rsp.content.decode())
def test_post_deleted_page(self):
rsp = self.client.post(self.url, data={'ok': True})
self.assertRedirects(rsp, reverse('browse_staffphones'))
self.assertEqual(StaffPhone.objects.count(), 0)
class TestDeleteAllCopyCenters(StaffUserMixin, ResponseCheckerMixin, TestCase):
def setUp(self):
super(TestDeleteAllCopyCenters, self).setUp()
self.url = reverse('delete-all-copy-centers')
def add_permission(self, codename):
"""add permission with codename"""
permission = Permission.objects.get(codename=codename)
self.user.user_permissions.add(permission)
def test_permissions(self):
"""ensure permission required to access delete page"""
# no permission, no delete-o
self.assertForbidden(self.client.get(self.url))
self.assertForbidden(self.client.post(self.url, data={'ok': True}))
# Once you have permission, all is well.
self.add_permission('delete_registrationcenter')
# Also add browse so the redirect works
self.add_permission('browse_registrationcenter')
self.assertOK(self.client.get(self.url))
response = self.client.post(self.url, data={'ok': True})
self.assertRedirects(response, reverse('browse_registrationcenters'))
# not logged in ==> redirect
self.client.logout()
self.assertRedirectsToLogin(self.client.get(self.url))
def test_confirmation_page_shows_center_to_be_deleted(self):
"""Ensure user sees what's about to be deleted before it happens"""
self.add_permission('delete_registrationcenter')
self.add_permission('browse_registrationcenter')
# Create some copy centers
original = RegistrationCenterFactory()
copies = [RegistrationCenterFactory(copy_of=original) for i in range(3)]
self.assertEqual(RegistrationCenter.objects.all().count(), 4)
response = self.client.get(self.url)
self.assertOK(response)
self.assertIn('copy_centers', response.context)
context_copy_centers = sorted([center.id for center in response.context['copy_centers']])
copies = sorted([center.id for center in copies])
self.assertEqual(context_copy_centers, copies)
def test_delete_actually_deletes(self):
"""Ensure delete works as advertised"""
original = RegistrationCenterFactory()
RegistrationCenterFactory(copy_of=original)
self.assertEqual(RegistrationCenter.objects.all().count(), 2)
self.add_permission('delete_registrationcenter')
# Also add browse so the redirect works
self.add_permission('browse_registrationcenter')
response = self.client.post(self.url, data={'ok': True})
self.assertRedirects(response, reverse('browse_registrationcenters'))
centers = RegistrationCenter.objects.all()
self.assertEqual(len(centers), 1)
self.assertEqual(centers[0].id, original.id)
class TestRegistrationRead(StaffUserMixin, ResponseCheckerMixin, TestCase):
"""Test the read-registration view"""
permissions = ['read_registration']
model = Registration
def test_no_server_error_if_citizen_is_missing(self):
"""A missing citizen can cause a DoesNotExist error. Be sure to catch it."""
# create registration with a missing citizen
registration = RegistrationFactory(citizen__missing=now())
url = reverse('read_registration', kwargs={'pk': registration.pk})
response = self.client.get(url)
self.assertContains(response, registration.registration_center.center_id)
class TestRegistrationCenterDeleteLogic(StaffUserMixin, ResponseCheckerMixin, TestCase):
"""Ensure that centers with copies can't be deleted"""
permissions = ['delete_registrationcenter', 'read_registrationcenter',
'change_registrationcenter', ]
model = RegistrationCenter
def setUp(self):
super(TestRegistrationCenterDeleteLogic, self).setUp()
self.original = RegistrationCenterFactory()
self.copy = RegistrationCenterFactory(copy_of=self.original)
self.ordinary = RegistrationCenterFactory()
def test_read_and_edit_views_offer_delete_appropriately(self):
"""Ensure the Delete button is available in the read and edit views when appropriate"""
for center, should_offer_delete in ((self.original, False), (self.copy, True),
(self.ordinary, True),):
for url_name in ('read_registrationcenter', 'edit_registrationcenter'):
url = reverse(url_name, kwargs={'pk': center.id})
response = self.client.get(url)
delete_url = reverse('delete_registrationcenter', kwargs={'pk': center.id})
if should_offer_delete:
self.assertContains(response, delete_url)
else:
self.assertNotContains(response, delete_url)
def test_delete_view_available_appropriately(self):
"""Ensure the Delete view can be accessed when appropriate"""
for center, should_offer_delete in ((self.original, False), (self.copy, True),
(self.ordinary, True),):
delete_url = reverse('delete_registrationcenter', kwargs={'pk': center.id})
response = self.client.get(delete_url)
if should_offer_delete:
self.assertOK(response)
else:
self.assertForbidden(response)
class CenterDownload(CenterFileTestMixin, StaffUserMixin, TestCase):
permissions = ['read_registrationcenter']
model = RegistrationCenter
def setUp(self):
super(CenterDownload, self).setUp()
self.download_csv_url = reverse('download-centers-csv')
def test_download_link_is_on_ecc_form(self):
url = reverse('upload-centers-csv')
# Need 'add registrationcenter' to get to the upload page
content_type = ContentType.objects.get_for_model(self.model)
self.user.user_permissions.add(Permission.objects.get(content_type=content_type,
codename='add_registrationcenter'))
rsp = self.client.get(url)
self.assertEqual(200, rsp.status_code)
self.assertContains(rsp, self.download_csv_url)
def test_download_csv_file(self):
# upload the test CSV to get some data in the DB
self.upload_csv()
# Add one with null values
rc_with_nones = RegistrationCenterFactory(name="Center with no center_lat or center_lon",
center_lat=None,
center_lon=None)
self.assertEqual(rc_with_nones.center_lat, None)
self.assertEqual(rc_with_nones.center_lon, None)
# download the CSV file
rsp = self.client.get(self.download_csv_url)
self.assertEqual(200, rsp.status_code)
reader = csv.reader(StringIO(rsp.content.decode()))
for i, field in enumerate(next(reader)):
# check the header row
self.assertEqual(field, CSV_FIELDS[i])
for row in reader:
# check each row against the DB values
self.assertNotIn('None', str(row))
center_id = row[0]
center = RegistrationCenter.objects.get(center_id=center_id)
for i, field in enumerate(CSV_FIELDS):
# center_type is special because it is an integer in the DB, but a string in the CSV
if field == 'center_type':
db_field_as_str = center.get_center_type_display()
else:
db_field_as_str = str(getattr(center, field))
if db_field_as_str == 'None':
db_field_as_str = ''
self.assertEqual(row[i], db_field_as_str)
class RegistrationSearchTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.staff_user = UserFactory()
cls.staff_user.is_staff = True
cls.staff_user.save()
# give this user permission to browse
ct = ContentType.objects.get_for_model(Registration)
perm_codename = 'browse_registration'
perm = Permission.objects.get(content_type=ct, codename=perm_codename)
cls.staff_user.user_permissions.add(perm)
# create 2 registrations, one that we expect to find and one that we expect not to find
cls.nid_we_should_find = 200000000001
cls.phone_we_should_find = '218900000002'
cls.nid_we_should_not_find = 200000000003
cls.phone_we_should_not_find = '218000000004'
cls.nonexistent_nid = 200000000005
cls.nonexistent_phone = '218900000006'
cls.present_reg = RegistrationFactory(
archive_time=None,
citizen__national_id=cls.nid_we_should_find,
sms__from_number=cls.phone_we_should_find)
cls.absent_reg = RegistrationFactory(
archive_time=None,
citizen__national_id=cls.nid_we_should_not_find,
sms__from_number=cls.phone_we_should_not_find)
def setUp(self):
assert self.client.login(username=self.staff_user.username, password=DEFAULT_USER_PASSWORD)
self.browse_url = reverse('browse_registrations')
def test_search_finds_national_id(self):
rsp = self.client.get(self.browse_url, data={'q': self.nid_we_should_find})
self.assertIn(self.present_reg, rsp.context['object_list'])
self.assertNotIn(self.absent_reg, rsp.context['object_list'])
def test_search_finds_phone_number(self):
rsp = self.client.get(self.browse_url, data={'q': self.phone_we_should_find})
self.assertIn(self.present_reg, rsp.context['object_list'])
self.assertNotIn(self.absent_reg, rsp.context['object_list'])
def test_search_strips_whitespace_national_id(self):
rsp = self.client.get(self.browse_url, data={'q': ' %s ' % self.nid_we_should_find})
self.assertIn(self.present_reg, rsp.context['object_list'])
self.assertNotIn(self.absent_reg, rsp.context['object_list'])
def test_search_strips_whitespace_phone_number(self):
rsp = self.client.get(self.browse_url, data={'q': ' %s ' % self.phone_we_should_find})
self.assertIn(self.present_reg, rsp.context['object_list'])
self.assertNotIn(self.absent_reg, rsp.context['object_list'])
def test_empty_search_result(self):
rsp = self.client.get(self.browse_url, data={'q': self.nonexistent_nid})
self.assertEqual(list(rsp.context['object_list']), [])
rsp = self.client.get(self.browse_url, data={'q': self.nonexistent_phone})
self.assertEqual(list(rsp.context['object_list']), [])
def test_not_a_valid_nid_or_phone(self):
rsp = self.client.get(self.browse_url, data={'q': '1234'})
self.assertEqual(list(rsp.context['object_list']), [])
def test_search_for_nondigit(self):
search_term = self.present_reg.citizen.first_name
rsp = self.client.get(self.browse_url, data={'q': search_term})
self.assertIn(self.present_reg, rsp.context['object_list'])
self.assertNotIn(self.absent_reg, rsp.context['object_list'])
|
SmartElect/SmartElect
|
register/tests/test_views.py
|
Python
|
apache-2.0
| 21,018
| 0.001523
|
PREDEFINED_FONT_MAPS = {
(
(193, 0), (193, 1462), (297, 104), (297, 1358), (930, 104),
(930, 1358), (1034, 0), (1034, 1462),
)
: {'char' : '�', 'name' : 'glyph00000'},
(
(115, 276), (115, 804), (287, 518), (291, 653), (292, 325),
(305, 805), (376, 1116), (396, -20), (462, 977), (477, 125),
(594, 977), (596, 1116), (639, -20), (645, 125), (724, 977),
(750, -20), (802, 1116), (822, 125), (864, 653), (864, 810),
(907, 13), (995, 51), (995, 199), (1042, 518), (1042, 623),
(1042, 845),
)
: {'char' : 'e', 'name' : 'glyph00001'},
(
(31, 967), (31, 1047), (188, -20), (188, 315), (188, 967),
(188, 1116), (258, 1350), (354, 223), (354, 322), (354, 967),
(354, 1096), (354, 1350), (448, 117), (506, -20), (530, 117),
(548, -20), (574, 117), (653, -3), (656, 130), (672, 967),
(672, 1096), (680, 10), (680, 137),
)
: {'char' : 't', 'name' : 'glyph00002'},
(
(94, 148), (94, 303), (94, 635), (217, 1020), (268, 211),
(268, 301), (268, 416), (268, 893), (279, -20), (298, 1064),
(377, 117), (441, 977), (442, -20), (443, 525), (475, 117),
(491, 1114), (564, -20), (578, 977), (588, 1114), (625, 651),
(630, 117), (641, 532), (700, 977), (727, 53), (784, 1114),
(807, 287), (807, 440), (807, 539), (809, 156), (811, 657),
(811, 725), (811, 854), (817, 156), (850, 0), (973, 0),
(973, 748), (973, 940),
)
: {'char' : 'a', 'name' : 'glyph00003'},
(
(115, 376), (115, 549), (115, 817), (239, 118), (287, 339),
(287, 549), (287, 761), (383, 1116), (453, 975), (455, 119),
(467, -20), (614, -20), (621, 1116), (781, 119), (781, 975),
(851, 1116), (852, -20), (950, 338), (950, 758), (1122, 281),
(1122, 549), (1122, 810),
)
: {'char' : 'o', 'name' : 'glyph00004'},
(
(176, 0), (176, 1096), (311, 1096), (338, 946), (342, 0),
(342, 575), (342, 789), (346, 946), (397, 1027), (502, 975),
(581, 1116), (674, 975), (694, 1116), (804, 975), (892, 1116),
(926, 0), (926, 709), (926, 843), (1092, 0), (1092, 715),
(1092, 925),
)
: {'char' : 'n', 'name' : 'glyph00005'},
(
(162, 1337), (162, 1393), (162, 1450), (176, 0), (176, 1096),
(218, 1282), (218, 1503), (300, 1282), (300, 1503), (342, 0),
(342, 1096), (358, 1337), (358, 1449),
)
: {'char' : 'i', 'name' : 'glyph00006'},
(
(106, 752), (106, 827), (106, 961), (109, 49), (109, 203),
(169, 644), (188, 163), (231, -20), (264, 794), (264, 838),
(264, 903), (298, 559), (309, 732), (324, 1116), (369, 117),
(384, 977), (409, 676), (449, -20), (451, 502), (453, 117),
(502, 977), (514, 1116), (551, 623), (583, 117), (612, 440),
(636, 977), (655, -20), (691, 1116), (723, 200), (723, 285),
(723, 349), (746, 552), (801, 909), (860, 1044), (883, 146),
(883, 299), (883, 408),
)
: {'char' : 's', 'name' : 'glyph00007'},
(
(176, 0), (176, 1556), (334, 944), (342, 0), (342, 573),
(342, 787), (342, 1000), (342, 1085), (342, 1556), (344, 944),
(393, 1023), (501, 975), (574, 1114), (674, 975), (690, 1114),
(804, 975), (891, 1114), (926, 0), (926, 709), (926, 843),
(1092, 0), (1092, 715), (1092, 923),
)
: {'char' : 'h', 'name' : 'glyph00008'},
(
(176, 0), (176, 1096), (313, 1096), (332, 893), (340, 893),
(342, 0), (342, 588), (342, 749), (401, 1000), (531, 965),
(573, 1116), (664, 965), (676, 1116), (716, 965), (749, 1116),
(784, 950), (807, 1104),
)
: {'char' : 'r', 'name' : 'glyph00009'},
(
(176, 0), (176, 1556), (342, 0), (342, 1556),
)
: {'char' : 'I', 'name' : 'glyph00010'},
(
(115, 274), (115, 816), (287, 333), (287, 543), (287, 750),
(354, -20), (355, 1116), (441, 119), (442, 977), (569, -20),
(569, 1116), (588, 977), (590, 119), (758, 977), (760, 119),
(792, 1116), (798, -20), (911, 954), (913, 147), (913, 304),
(913, 510), (913, 545), (913, 778), (913, 1110), (913, 1556),
(917, 1033), (922, 147), (924, 954), (944, 0), (1079, 0),
(1079, 1556),
)
: {'char' : 'd', 'name' : 'glyph00011'},
(
(164, 170), (164, 379), (164, 1096), (332, 251), (332, 385),
(332, 1096), (363, -20), (454, 119), (563, -20), (584, 119),
(679, -20), (756, 119), (860, 66), (911, 147), (915, 307),
(915, 520), (915, 1096), (920, 147), (944, 0), (1081, 0),
(1081, 1096),
)
: {'char' : 'u', 'name' : 'glyph00012'},
(
(176, 0), (176, 1096), (311, 1096), (338, 946), (342, 0),
(342, 575), (342, 788), (346, 946), (393, 1026), (487, 975),
(564, 1116), (643, 975), (670, 1116), (762, 975), (874, 0),
(874, 713), (874, 844), (927, 1116), (1006, 930), (1014, 930),
(1040, 0), (1040, 612), (1040, 797), (1063, 1016), (1188, 975),
(1249, 1116), (1343, 975), (1368, 1116), (1461, 975), (1554, 1116),
(1573, 0), (1573, 713), (1573, 844), (1739, 0), (1739, 715),
(1739, 925),
)
: {'char' : 'm', 'name' : 'glyph00013'},
(
(39, -332), (39, -186), (39, -86), (125, 633), (125, 741),
(125, 921), (167, 60), (184, 185), (184, 238), (184, 298),
(199, -273), (199, -184), (199, -83), (232, 465), (241, 105),
(248, 388), (272, -492), (283, 86), (289, 630), (289, 745),
(289, 869), (317, 430), (325, 25), (332, 217), (332, 262),
(332, 347), (341, 1116), (349, -365), (406, 174), (415, 995),
(419, 512), (438, 25), (438, 403), (481, 395), (487, -492),
(489, -365), (496, 174), (530, 395), (532, 995), (535, 512),
(539, 1116), (625, 1116), (637, 25), (690, 174), (694, 1096),
(698, -365), (722, 395), (767, -492), (778, 512), (778, 748),
(778, 995), (789, 25), (868, 174), (870, 967), (898, 932),
(899, -240), (899, -133), (899, -44), (942, 587), (942, 748),
(942, 819), (1059, -301), (1059, -119), (1059, 24), (1073, 991),
(1073, 1096),
)
: {'char' : 'g', 'name' : 'glyph00014'},
(
(2, 1096), (16, -475), (16, -342), (71, -354), (90, -492),
(139, -354), (166, -492), (180, 1096), (302, -492), (310, -354),
(383, -162), (420, 471), (444, -6), (489, -337), (499, 257),
(518, 162), (526, 162), (539, 213), (559, -152), (622, 460),
(852, 1096), (1030, 1096),
)
: {'char' : 'y', 'name' : 'glyph00015'},
(
(115, 273), (115, 541), (115, 816), (287, 341), (287, 543),
(287, 969), (376, -20), (380, 1116), (450, 125), (610, 125),
(614, -20), (621, 969), (625, 1116), (671, 969), (704, 1116),
(747, 125), (781, -20), (801, 940), (856, 918), (862, 1082),
(891, 37), (891, 184), (907, 1059),
)
: {'char' : 'c', 'name' : 'glyph00016'},
(
(29, 967), (29, 1042), (225, 0), (225, 967), (225, 1102),
(225, 1163), (225, 1567), (391, 0), (391, 967), (391, 1096),
(391, 1167), (391, 1305), (481, 1430), (575, 1430), (578, 1567),
(643, 1430), (665, 1567), (670, 967), (670, 1096), (739, 1399),
(782, 1532),
)
: {'char' : 'f', 'name' : 'glyph00017'},
(
(23, 1096), (197, 1096), (322, 0), (359, 467), (414, 251),
(422, 176), (430, 176), (441, 233), (490, 414), (508, 467),
(514, 0), (709, 1096), (721, 641), (751, 736), (791, 911),
(799, 911), (851, 702), (870, 643), (889, 1096), (1071, 0),
(1085, 467), (1141, 295), (1161, 178), (1169, 178), (1173, 214),
(1208, 364), (1268, 0), (1399, 1096), (1571, 1096),
)
: {'char' : 'w', 'name' : 'glyph00018'},
(
(176, -492), (176, 1096), (311, 1096), (330, 141), (334, 946),
(342, -492), (342, -41), (342, 45), (342, 141), (342, 318),
(342, 549), (342, 586), (342, 946), (344, 789), (402, 59),
(406, 1036), (494, 975), (496, 119), (576, 1116), (579, -20),
(662, 975), (666, 119), (686, -20), (686, 1116), (808, 119),
(808, 975), (900, -20), (904, 1116), (969, 349), (969, 551),
(969, 756), (1141, 279), (1141, 818),
)
: {'char' : 'p', 'name' : 'glyph00019'},
(
(152, 34), (152, 106), (152, 173), (213, 242), (219, -29),
(270, -29), (327, -29), (328, 242), (393, 41), (393, 173),
)
: {'char' : '.', 'name' : 'glyph00020'},
(
(176, 0), (176, 1556), (295, 0), (330, 141), (334, 950),
(342, 141), (342, 318), (342, 780), (342, 950), (342, 1051),
(342, 1178), (342, 1556), (402, 59), (458, 1114), (492, 975),
(496, 119), (579, -20), (662, 975), (666, 119), (686, -20),
(686, 1114), (819, 119), (819, 975), (900, -20), (902, 1114),
(969, 342), (969, 551), (969, 765), (1141, 279), (1141, 819),
)
: {'char' : 'b', 'name' : 'glyph00021'},
(
(63, -264), (90, -160), (155, 146), (168, 238), (188, -264),
(241, -150), (339, 115), (350, 238), (365, 215),
)
: {'char' : ',', 'name' : 'glyph00022'},
(
(176, 0), (176, 1556), (332, 561), (340, 0), (340, 410),
(340, 561), (340, 676), (340, 731), (340, 1556), (383, 622),
(465, 518), (471, 721), (578, 629), (825, 1096), (852, 0),
(1022, 1096), (1053, 0),
)
: {'char' : 'k', 'name' : 'glyph00023'},
(
(0, 1096), (178, 1096), (414, 446), (416, 0), (494, 218),
(508, 150), (516, 150), (527, 203), (610, 0), (644, 536),
(848, 1096), (1026, 1096),
)
: {'char' : 'v', 'name' : 'glyph00024'},
(
(125, 372), (125, 733), (125, 959), (294, 1299), (307, 444),
(307, 731), (307, 1010), (476, -20), (576, 131), (586, 1331),
(613, 1483), (799, -20), (825, 131), (827, 1331), (829, 1483),
(978, 131), (993, 1331), (1022, -20), (1059, 1483), (1159, 1253),
(1174, 37), (1174, 186), (1231, 1399),
)
: {'char' : 'C', 'name' : 'glyph00025'},
(
(133, 1462), (174, 934), (279, 934), (319, 1462), (502, 1462),
(543, 934), (647, 934), (688, 1462),
)
: {'char' : '"', 'name' : 'glyph00026'},
(
(18, 1311), (18, 1462), (481, 0), (481, 1311), (651, 0),
(651, 1311), (1114, 1311), (1114, 1462),
)
: {'char' : 'T', 'name' : 'glyph00027'},
(
(201, 0), (201, 1462), (371, 147), (371, 1315), (578, 147),
(606, 0), (618, 1315), (649, 1462), (882, 147), (901, 1315),
(975, 0), (990, 1462), (1188, 446), (1188, 739), (1188, 1025),
(1368, 383), (1368, 745), (1368, 1084),
)
: {'char' : 'D', 'name' : 'glyph00028'},
(
(82, 0), (82, 133), (106, 1309), (106, 1462), (289, 154),
(858, 1309), (1065, 1329), (1065, 1462), (1087, 0), (1087, 154),
)
: {'char' : 'Z', 'name' : 'glyph00029'},
(
(201, 0), (201, 1462), (371, 0), (371, 1462),
)
: {'char' : 'l', 'name' : 'glyph00030'},
(
(133, 1462), (174, 934), (279, 934), (319, 1462),
)
: {'char' : "'", 'name' : 'glyph00031'},
(
(115, 278), (115, 814), (287, 336), (287, 750), (353, -20),
(355, 1116), (440, 119), (442, 977), (565, -20), (569, 1116),
(588, 977), (590, 119), (756, 119), (757, 977), (794, 1116),
(796, -20), (908, 297), (911, 147), (913, -492), (913, -23),
(913, 77), (913, 508), (913, 545), (913, 775), (915, 946),
(924, 147), (924, 946), (948, 1096), (1079, -492), (1079, 1096),
)
: {'char' : 'q', 'name' : 'glyph00032'},
(
(201, 0), (201, 1462), (342, 1227), (350, 1227), (358, 0),
(358, 831), (358, 1011), (393, 1462), (1149, 0), (1182, 560),
(1184, 623), (1184, 1462), (1190, 240), (1196, 267), (1198, 240),
(1343, 0), (1343, 1462),
)
: {'char' : 'N', 'name' : 'glyph00033'},
(
(201, 0), (201, 1462), (344, 1296), (352, 1296), (358, 0),
(358, 930), (358, 1142), (457, 1462), (848, 0), (920, 256),
(928, 256), (985, 0), (1395, 1462), (1479, 0), (1479, 942),
(1479, 1104), (1485, 1294), (1493, 1294), (1649, 0), (1649, 1462),
)
: {'char' : 'M', 'name' : 'glyph00034'},
(
(0, 0), (172, 0), (352, 465), (412, 618), (578, 1468),
(584, 1071), (625, 1186), (647, 1282), (682, 1157), (715, 1071),
(721, 1468), (885, 618), (938, 465), (1120, 0), (1296, 0),
)
: {'char' : 'A', 'name' : 'glyph00035'},
(
(27, 1384), (86, 1249), (183, 1298), (216, 1483), (240, 34),
(240, 106), (240, 242), (289, 403), (289, 457), (289, 574),
(308, -29), (333, 1335), (360, -29), (360, 242), (361, 725),
(412, 1335), (417, -29), (418, 242), (418, 403), (418, 436),
(418, 531), (422, 1483), (459, 809), (481, 42), (481, 172),
(483, 645), (535, 1335), (584, 731), (595, 924), (613, 1483),
(666, 1040), (666, 1122), (666, 1224), (710, 841), (786, 940),
(825, 1051), (825, 1124), (825, 1295),
)
: {'char' : '?', 'name' : 'glyph00036'},
(
(-111, -467), (-111, -332), (-52, -492), (-42, -352), (25, -352),
(43, -492), (103, -352), (162, 1337), (162, 1393), (162, 1450),
(176, -267), (176, -180), (176, 1096), (218, 1282), (218, 1503),
(300, 1282), (300, 1503), (342, -492), (342, -168), (342, 1096),
(358, 1337), (358, 1449),
)
: {'char' : 'j', 'name' : 'glyph00037'},
(
(27, 1462), (207, 1462), (416, 0), (438, 559), (486, 369),
(508, 215), (535, 398), (584, 0), (588, 573), (850, 1462),
(870, 973), (918, 1130), (940, 1262), (941, 1242), (993, 1044),
(1014, 979), (1030, 1462), (1305, 565), (1309, 0), (1353, 410),
(1386, 215), (1405, 357), (1458, 561), (1477, 0), (1688, 1462),
(1868, 1462),
)
: {'char' : 'W', 'name' : 'glyph00038'},
(
(201, 0), (201, 1462), (371, 145), (371, 692), (371, 836),
(371, 1315), (614, 1462), (621, 1315), (651, 836), (662, 692),
(676, 145), (711, 0), (813, 1315), (831, 836), (849, 692),
(853, 145), (881, 766), (881, 776), (905, 1462), (949, 0),
(989, 949), (989, 1083), (989, 1206), (1020, 801), (1032, 282),
(1032, 428), (1032, 564), (1165, 970), (1165, 1100), (1165, 1288),
(1214, 220), (1214, 416), (1214, 709),
)
: {'char' : 'B', 'name' : 'glyph00039'},
(
(201, 0), (201, 1462), (371, 0), (371, 688), (371, 840),
(371, 1462), (1141, 0), (1141, 688), (1141, 840), (1141, 1462),
(1311, 0), (1311, 1462),
)
: {'char' : 'H', 'name' : 'glyph00040'},
(
(106, 47), (106, 211), (125, 953), (125, 1114), (125, 1283),
(196, 173), (246, -20), (297, 1036), (297, 1112), (297, 1215),
(300, 753), (353, 939), (379, 1483), (408, 129), (449, 1331),
(486, 858), (504, 680), (506, -20), (512, 129), (584, 1331),
(588, 1483), (623, 809), (650, 628), (682, 129), (746, -20),
(755, 1331), (793, 546), (806, 1483), (853, 727), (854, 258),
(854, 373), (854, 449), (936, 1255), (989, 1403), (1026, 196),
(1026, 389), (1026, 539),
)
: {'char' : 'S', 'name' : 'glyph00041'},
(
(201, 0), (201, 1462), (371, 152), (371, 690), (371, 840),
(371, 1311), (977, 690), (977, 840), (1016, 0), (1016, 152),
(1016, 1311), (1016, 1462),
)
: {'char' : 'E', 'name' : 'glyph00042'},
(
(152, 34), (152, 106), (152, 242), (170, 1462), (220, -29),
(221, 403), (272, -29), (272, 242), (326, 403), (329, -29),
(330, 242), (377, 1462), (393, 42), (393, 172),
)
: {'char' : '!', 'name' : 'glyph00043'},
(
(201, 0), (201, 1462), (371, 0), (371, 625), (371, 776),
(371, 1311), (977, 625), (977, 776), (1016, 1311), (1016, 1462),
)
: {'char' : 'F', 'name' : 'glyph00044'},
(
(125, 375), (125, 735), (125, 1092), (305, 436), (305, 733),
(305, 1026), (476, -20), (477, 1485), (558, 129), (558, 1333),
(799, -20), (801, 1485), (1042, 129), (1043, 1333), (1115, -20),
(1116, 1485), (1290, 435), (1290, 733), (1290, 1028), (1470, 382),
(1470, 733), (1470, 1085),
)
: {'char' : 'O', 'name' : 'glyph00045'},
(
(-160, -358), (-160, -213), (-106, -385), (-89, -233), (-12, -385),
(-12, -233), (87, -233), (168, -385), (190, -113), (190, 0),
(190, 1462), (360, -176), (360, 14), (360, 1462),
)
: {'char' : 'J', 'name' : 'glyph00046'},
(
(86, 1090), (115, 1272), (221, 733), (397, 639), (463, 1059),
(463, 1556), (506, 1161), (557, 1001), (614, 1161), (657, 1059),
(657, 1556), (733, 639), (905, 733), (1012, 1272), (1038, 1090),
)
: {'char' : '*', 'name' : 'glyph00047'},
(
(39, 0), (59, 1096), (227, 0), (248, 1096), (440, 561),
(537, 444), (537, 676), (631, 561), (825, 1096), (844, 0),
(1012, 1096), (1032, 0),
)
: {'char' : 'x', 'name' : 'glyph00048'},
(
(94, 59), (94, 217), (102, 1331), (185, 15), (186, 1219),
(189, 170), (192, 1402), (295, 1289), (317, 684), (317, 827),
(378, -20), (404, 121), (427, 1483), (461, 684), (463, 827),
(465, 1341), (494, -20), (500, 121), (557, 1483), (561, 1341),
(634, 827), (687, 1341), (705, 762), (705, 770), (761, -20),
(770, 1483), (834, 978), (834, 1112), (834, 1219), (849, 800),
(879, 121), (879, 418), (879, 684), (881, 740), (1006, 978),
(1006, 1118), (1006, 1288), (1051, 205), (1051, 414), (1051, 560),
)
: {'char' : '3', 'name' : 'glyph00049'},
(
(0, 1462), (186, 1462), (487, 0), (487, 559), (573, 731),
(659, 0), (659, 567), (963, 1462), (1147, 1462),
)
: {'char' : 'Y', 'name' : 'glyph00050'},
(
(82, 0), (82, 113), (119, 967), (119, 1096), (272, 129),
(680, 967), (862, 967), (862, 1096), (877, 0), (877, 129),
)
: {'char' : 'z', 'name' : 'glyph00051'},
(
(201, 0), (201, 1462), (371, 154), (371, 1462), (1016, 0),
(1016, 154),
)
: {'char' : 'L', 'name' : 'glyph00052'},
(
(201, 0), (201, 1462), (371, 0), (371, 575), (371, 721),
(371, 1315), (524, 721), (543, 575), (561, 1315), (580, 1462),
(750, 721), (762, 1315), (825, 575), (952, 867), (952, 1028),
(952, 1173), (1128, 814), (1128, 1036), (1128, 1462),
)
: {'char' : 'P', 'name' : 'glyph00053'},
(
(84, 473), (84, 625), (575, 473), (575, 625),
)
: {'char' : '-', 'name' : 'glyph00054'},
(
(8, 0), (41, 1462), (188, 0), (229, 1462), (494, 764),
(588, 643), (592, 883), (686, 770), (958, 1462), (981, 0),
(1139, 1462), (1174, 0),
)
: {'char' : 'X', 'name' : 'glyph00055'},
(
(188, 1163), (276, 1049), (488, 1221), (540, 1267), (553, 0),
(553, 1042), (553, 1172), (561, 1288), (575, 1462), (715, 0),
(715, 1462),
)
: {'char' : '1', 'name' : 'glyph00056'},
(
(152, 34), (152, 106), (152, 173), (152, 917), (152, 989),
(152, 1124), (213, 242), (219, -29), (219, 854), (270, -29),
(270, 854), (270, 1124), (327, -29), (327, 854), (328, 242),
(393, 41), (393, 173), (393, 924), (393, 989), (393, 1124),
)
: {'char' : ':', 'name' : 'glyph00057'},
}
|
fake-name/ReadableWebProxy
|
WebMirror/processor/fonts/FontTables.py
|
Python
|
bsd-3-clause
| 18,234
| 0.033074
|
"""Two different implementations of merge sort. First one is the standard sort
that creates the result to new list on each level. Second one is an in-place
sort that uses two alternating buffers and offsets to limit memory usage
to O(2n).
"""
def sort(lst):
"""Standard merge sort.
Args:
lst: List to sort
Returns:
Sorted copy of the list
"""
if len(lst) <= 1:
return lst
mid = len(lst) // 2
low = sort(lst[:mid])
high = sort(lst[mid:])
res = []
i = j = 0
while i < len(low) and j < len(high):
if low[i] < high[j]:
res.append(low[i])
i += 1
else:
res.append(high[j])
j += 1
res.extend(low[i:])
res.extend(high[j:])
return res
def helper(lst, buf, start, stop, to_buf):
"""Helper function for in-place sort with alternating buffers.
Args:
lst: List to sort
buf: Buffer to store the results
start: Start index
stop: Stop index
to_buf: Boolean flag telling where result should be written to.
In case of True result should be written to buf, if False then
result should be written to l.
"""
length = stop - start
if length <= 1:
if to_buf and length == 1:
buf[start] = lst[start]
return
mid = start + length // 2
helper(lst, buf, start, mid, not to_buf)
helper(lst, buf, mid, stop, not to_buf)
# If result goes to buf swap l & buf since following code will write
# from buf to result
if to_buf:
lst, buf = buf, lst
i = start
j = mid
to = start
while i < mid and j < stop:
if buf[i] < buf[j]:
lst[to] = buf[i]
i += 1
else:
lst[to] = buf[j]
j += 1
to += 1
for i in range(i, mid):
lst[to] = buf[i]
to += 1
for j in range(j, stop):
lst[to] = buf[j]
to += 1
def sort_in_place(lst):
"""In-place merge sort.
Args:
lst: List to sort
"""
helper(lst, [None] * len(lst), 0, len(lst), False)
|
niemmi/algolib
|
algolib/sort/merge_sort.py
|
Python
|
bsd-3-clause
| 2,137
| 0
|
#!/usr/bin/env python
from distutils.core import setup
setup(name='Ajax Select',
version='1.0',
description='Django-jQuery jQuery-powered auto-complete fields for ForeignKey and ManyToMany fields',
author='Crucial Felix',
author_email='crucialfelix@gmail.com',
url='http://code.google.com/p/django-ajax-selects/',
packages=['ajax_select', ],
)
|
Ksynko/django-crm
|
sample_project/external_apps/ajax_select/setup.py
|
Python
|
bsd-3-clause
| 387
| 0.005168
|
# -*- coding: utf-8 -*-
# ===========================================================================
# eXe
# Copyright 2004-2006, University of Auckland
# Copyright 2006-2008 eXe Project, http://eXeLearning.org/
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# ===========================================================================
"""
Package represents the collection of resources the user is editing
i.e. the "package".
"""
import datetime
import shutil
import logging
import time
import zipfile
import uuid
import re
from xml.dom import minidom
from exe.engine.path import Path, TempDirPath, toUnicode
from exe.engine.node import Node
from exe.engine.genericidevice import GenericIdevice
from exe.engine.multichoiceidevice import MultichoiceIdevice
from exe.engine.quiztestidevice import QuizTestIdevice
from exe.engine.truefalseidevice import TrueFalseIdevice
from exe.engine.wikipediaidevice import WikipediaIdevice
from exe.engine.casestudyidevice import CasestudyIdevice
from exe.engine.casopracticofpdidevice import CasopracticofpdIdevice
from exe.engine.citasparapensarfpdidevice import CitasparapensarfpdIdevice
from exe.engine.clozefpdidevice import ClozefpdIdevice
from exe.engine.clozeidevice import ClozeIdevice
from exe.engine.clozelangfpdidevice import ClozelangfpdIdevice
from exe.engine.debesconocerfpdidevice import DebesconocerfpdIdevice
from exe.engine.destacadofpdidevice import DestacadofpdIdevice
from exe.engine.ejercicioresueltofpdidevice import EjercicioresueltofpdIdevice
from exe.engine.eleccionmultiplefpdidevice import EleccionmultiplefpdIdevice
from exe.engine.freetextfpdidevice import FreeTextfpdIdevice
from exe.engine.galleryidevice import GalleryIdevice
from exe.engine.imagemagnifieridevice import ImageMagnifierIdevice
from exe.engine.listaidevice import ListaIdevice
from exe.engine.multiselectidevice import MultiSelectIdevice
from exe.engine.orientacionesalumnadofpdidevice import OrientacionesalumnadofpdIdevice
from exe.engine.orientacionestutoriafpdidevice import OrientacionestutoriafpdIdevice
from exe.engine.parasabermasfpdidevice import ParasabermasfpdIdevice
from exe.engine.recomendacionfpdidevice import RecomendacionfpdIdevice
from exe.engine.reflectionfpdidevice import ReflectionfpdIdevice
from exe.engine.reflectionfpdmodifidevice import ReflectionfpdmodifIdevice
from exe.engine.reflectionidevice import ReflectionIdevice
from exe.engine.seleccionmultiplefpdidevice import SeleccionmultiplefpdIdevice
from exe.engine.verdaderofalsofpdidevice import VerdaderofalsofpdIdevice
from exe.engine.persist import Persistable, encodeObject, decodeObjectRaw
from exe import globals as G
from exe.engine.resource import Resource
from twisted.persisted.styles import doUpgrade
from twisted.spread.jelly import Jellyable, Unjellyable
from exe.engine.beautifulsoup import BeautifulSoup
from exe.engine.field import Field, TextAreaField
from exe.engine.persistxml import encodeObjectToXML, decodeObjectFromXML
from exe.engine.lom import lomsubs
from exe.engine.checker import Checker
from exe.webui import common
log = logging.getLogger(__name__)
def clonePrototypeIdevice(title):
idevice = None
for prototype in G.application.ideviceStore.getIdevices():
if prototype.get_title() == title:
log.debug('have prototype of:' + prototype.get_title())
idevice = prototype.clone()
idevice.edit = False
break
return idevice
def burstIdevice(idev_type, i, node):
# given the iDevice type and the BeautifulSoup fragment i, burst it:
idevice = clonePrototypeIdevice(idev_type)
if idevice is None:
log.warn("unable to clone " + idev_type + " idevice")
freetext_idevice = clonePrototypeIdevice('Free Text')
if freetext_idevice is None:
log.error("unable to clone Free Text for " + idev_type
+ " idevice")
return
idevice = freetext_idevice
# For idevices such as GalleryImage, where resources are being attached,
# the idevice should already be attached to a node before bursting it open:
node.addIdevice(idevice)
idevice.burstHTML(i)
return idevice
def loadNodesIdevices(node, s):
soup = BeautifulSoup(s)
body = soup.find('body')
if body:
idevices = body.findAll(name='div',
attrs={'class' : re.compile('Idevice$') })
if len(idevices) > 0:
for i in idevices:
# WARNING: none of the idevices yet re-attach their media,
# but they do attempt to re-attach images and other links.
if i.attrMap['class']=="activityIdevice":
idevice = burstIdevice('Activity', i, node)
elif i.attrMap['class']=="objectivesIdevice":
idevice = burstIdevice('Objectives', i, node)
elif i.attrMap['class']=="preknowledgeIdevice":
idevice = burstIdevice('Preknowledge', i, node)
elif i.attrMap['class']=="readingIdevice":
idevice = burstIdevice('Reading Activity', i, node)
# the above are all Generic iDevices;
# below are all others:
elif i.attrMap['class']=="RssIdevice":
idevice = burstIdevice('RSS', i, node)
elif i.attrMap['class']=="WikipediaIdevice":
# WARNING: Wiki problems loading images with accents, etc:
idevice = burstIdevice('Wiki Article', i, node)
elif i.attrMap['class']=="ReflectionIdevice":
idevice = burstIdevice('Reflection', i, node)
elif i.attrMap['class']=="GalleryIdevice":
# WARNING: Gallery problems with the popup html:
idevice = burstIdevice('Image Gallery', i, node)
elif i.attrMap['class']=="ImageMagnifierIdevice":
# WARNING: Magnifier missing major bursting components:
idevice = burstIdevice('Image Magnifier', i, node)
elif i.attrMap['class']=="AppletIdevice":
# WARNING: Applet missing file bursting components:
idevice = burstIdevice('Java Applet', i, node)
elif i.attrMap['class']=="ExternalUrlIdevice":
idevice = burstIdevice('External Web Site', i, node)
elif i.attrMap['class']=="ClozeIdevice":
idevice = burstIdevice('Cloze Activity', i, node)
elif i.attrMap['class']=="FreeTextIdevice":
idevice = burstIdevice('Free Text', i, node)
elif i.attrMap['class']=="CasestudyIdevice":
idevice = burstIdevice('Case Study', i, node)
elif i.attrMap['class']=="MultichoiceIdevice":
idevice = burstIdevice('Multi-choice', i, node)
elif i.attrMap['class']=="MultiSelectIdevice":
idevice = burstIdevice('Multi-select', i, node)
elif i.attrMap['class']=="QuizTestIdevice":
idevice = burstIdevice('SCORM Quiz', i, node)
elif i.attrMap['class']=="TrueFalseIdevice":
idevice = burstIdevice('True-False Question', i, node)
else:
# NOTE: no custom idevices burst yet,
# nor any deprecated idevices. Just burst into a FreeText:
log.warn("unburstable idevice " + i.attrMap['class'] +
"; bursting into Free Text")
idevice = burstIdevice('Free Text', i, node)
else:
# no idevices listed on this page,
# just create a free-text for the entire page:
log.warn("no idevices found on this node, bursting into Free Text.")
idevice = burstIdevice('Free Text', i, node)
else:
log.warn("unable to read the body of this node.")
def test_for_node(html_content):
# to see if this html really is an exe-generated node
exe_string = u"<!-- Created using eXe: http://exelearning.org -->"
if html_content.decode('utf-8').find(exe_string) >= 0:
return True
else:
return False
def loadNode(pass_num, resourceDir, zippedFile, node, doc, item, level):
# populate this node
# 1st pass = merely unzipping all resources such that they are available,
# 2nd pass = loading the actual node idevices.
titles = item.getElementsByTagName('title')
node.setTitle(titles[0].firstChild.data)
node_resource = item.attributes['identifierref'].value
log.debug('*' * level + ' ' + titles[0].firstChild.data + '->' + item.attributes['identifierref'].value)
for resource in doc.getElementsByTagName('resource'):
if resource.attributes['identifier'].value == node_resource:
for file in resource.childNodes:
if file.nodeName == 'file':
filename = file.attributes['href'].value
is_exe_node_html = False
if filename.endswith('.html') \
and filename != "fdl.html" \
and not filename.startswith("galleryPopup"):
# fdl.html is the wikipedia license, ignore it
# as well as any galleryPopups:
is_exe_node_html = \
test_for_node(zippedFile.read(filename))
if is_exe_node_html:
if pass_num == 1:
# 2nd pass call to actually load the nodes:
log.debug('loading idevices from node: ' + filename)
loadNodesIdevices(node, zippedFile.read(filename))
elif filename == "fdl.html" or \
filename.startswith("galleryPopup."):
# let these be re-created upon bursting.
if pass_num == 0:
# 1st pass call to unzip the resources:
log.debug('ignoring resource file: '+ filename)
else:
if pass_num == 0:
# 1st pass call to unzip the resources:
try:
zipinfo = zippedFile.getinfo(filename)
log.debug('unzipping resource file: '
+ resourceDir/filename )
outFile = open(resourceDir/filename, "wb")
outFile.write(zippedFile.read(filename))
outFile.flush()
outFile.close()
except:
log.warn('error unzipping resource file: '
+ resourceDir/filename )
##########
# WARNING: the resource is now in the resourceDir,
# BUT it is NOT YET added into any of the project,
# much less to the specific idevices or fields!
# Although they WILL be saved out with the project
# upon the next Save.
##########
break
# process this node's children
for subitem in item.childNodes:
if subitem.nodeName == 'item':
# for the first pass, of unzipping only, do not
# create any child nodes, just cruise on with this one:
next_node = node
if pass_num == 1:
# if this is actually loading the nodes:
next_node = node.createChild()
loadNode(pass_num, resourceDir, zippedFile, next_node,
doc, subitem, level+1)
def loadCC(zippedFile, filename):
"""
Load an IMS Common Cartridge or Content Package from filename
"""
package = Package(Path(filename).namebase)
xmldoc = minidom.parseString( zippedFile.read('imsmanifest.xml'))
organizations_list = xmldoc.getElementsByTagName('organizations')
level = 0
# now a two-pass system to first unzip all applicable resources:
for pass_num in range(2):
for organizations in organizations_list:
organization_list = organizations.getElementsByTagName(
'organization')
for organization in organization_list:
for item in organization.childNodes:
if item.nodeName == 'item':
loadNode(pass_num, package.resourceDir, zippedFile,
package.root, xmldoc, item, level)
return package
# ===========================================================================
class DublinCore(Jellyable, Unjellyable):
"""
Holds dublin core info
"""
def __init__(self):
self.title = ''
self.creator = ''
self.subject = ''
self.description = ''
self.publisher = ''
self.contributors = ''
self.date = ''
self.type = ''
self.format = ''
self.identifier = str(uuid.uuid4())
self.source = ''
self.language = ''
self.relation = ''
self.coverage = ''
self.rights = ''
def __setattr__(self, name, value):
self.__dict__[name] = toUnicode(value)
class Package(Persistable):
"""
Package represents the collection of resources the user is editing
i.e. the "package".
"""
persistenceVersion = 13
nonpersistant = ['resourceDir', 'filename', 'previewDir']
# Name is used in filenames and urls (saving and navigating)
_name = ''
tempFile = False # This is set when the package is saved as a temp copy file
# Title is rendered in exports
_title = ''
_author = ''
_description = ''
_backgroundImg = ''
#styledefault=u"INTEF"
# This is like a constant
defaultLevelNames = [x_(u"Topic"), x_(u"Section"), x_(u"Unit")]
def __init__(self, name):
"""
Initialize
"""
log.debug(u"init " + repr(name))
self._nextIdeviceId = 0
self._nextNodeId = 0
# For looking up nodes by ids
self._nodeIdDict = {}
self._levelNames = self.defaultLevelNames[:]
self.name = name
self._title = u''
self._backgroundImg = u''
self.backgroundImgTile = False
# Empty if never saved/loaded
self.filename = u''
self.root = Node(self, None, _(u"Home"))
self.currentNode = self.root
# self.style = u"default"
#self.styledefault=u"INTEF"
self.style = G.application.config.defaultStyle
self._isChanged = False
self.previewDir = None
self.idevices = []
self.dublinCore = DublinCore()
self._lang = G.application.config.locale.split('_')[0]
self.setLomDefaults()
self.setLomEsDefaults()
self.scolinks = False
self.scowsinglepage= False
self.scowwebsite = False
self.exportSource = True
self.exportMetadataType = "LOMES"
self.license = u''
self.footer = ""
self._objectives = u''
self._preknowledge = u''
self._learningResourceType = u''
self._intendedEndUserRoleType = u''
self._intendedEndUserRoleGroup = False
self._intendedEndUserRoleTutor = False
self._contextPlace = u''
self._contextMode = u''
self.compatibleWithVersion9 = False
#for export to Sugar (e.g. OLPC)
self.sugaractivityname = ""
self.sugarservicename = ""
#for export to Ustad Mobile
self.mxmlprofilelist = ""
self.mxmlheight = ""
self.mxmlwidth = ""
self.mxmlforcemediaonly = False
# Temporary directory to hold resources in
self.resourceDir = TempDirPath()
self.resources = {} # Checksum-[_Resource(),..]
self._docType = G.application.config.docType
def setLomDefaults(self):
self.lom = lomsubs.lomSub.factory()
self.lom.addChilds(self.lomDefaults(self.dublinCore.identifier, 'LOMv1.0'))
def setLomEsDefaults(self):
self.lomEs = lomsubs.lomSub.factory()
self.lomEs.addChilds(self.lomDefaults(self.dublinCore.identifier, 'LOM-ESv1.0', True))
# Property Handlers
def set_docType(self,value):
self._docType = toUnicode(value)
common.setExportDocType(value)
def set_name(self, value):
self._name = toUnicode(value)
def set_title(self, value):
if self.dublinCore.title == self._title:
self.dublinCore.title = value
lang_str = self.lang.encode('utf-8')
value_str = value.encode('utf-8')
for metadata in [self.lom, self.lomEs]:
title = metadata.get_general().get_title()
if title:
found = False
for string in title.get_string():
if string.get_valueOf_() == self._title.encode('utf-8'):
found = True
if value:
string.set_language(lang_str)
string.set_valueOf_(value_str)
else:
title.string.remove(string)
if not found:
if value:
title.add_string(lomsubs.LangStringSub(lang_str, value_str))
else:
if value:
title = lomsubs.titleSub([lomsubs.LangStringSub(lang_str, value_str)])
metadata.get_general().set_title(title)
self._title = toUnicode(value)
def set_lang(self, value):
if self.dublinCore.language in [self._lang, '']:
self.dublinCore.language = value
value_str = value.encode('utf-8')
for metadata in [self.lom, self.lomEs]:
language = metadata.get_general().get_language()
if language:
for LanguageId in language:
if LanguageId.get_valueOf_() == self._lang.encode('utf-8'):
LanguageId.set_valueOf_(value_str)
else:
language = [lomsubs.LanguageIdSub(value_str)]
metadata.get_general().set_language(language)
metametadata = metadata.get_metaMetadata()
if metametadata:
language = metametadata.get_language()
if language:
if language.get_valueOf_() == self._lang.encode('utf-8'):
language.set_valueOf_(value_str)
else:
language = lomsubs.LanguageIdSub(value_str)
metametadata.set_language(language)
else:
language = lomsubs.LanguageIdSub(value_str)
metametadata = lomsubs.metaMetadataSub(language=language)
metadata.set_metaMetadata(metametadata)
educationals = metadata.get_educational()
if educationals:
for educational in educationals:
language = educational.get_language()
if language:
for LanguageId in language:
if LanguageId.get_valueOf_() == self._lang.encode('utf-8'):
LanguageId.set_valueOf_(value_str)
else:
language = lomsubs.LanguageIdSub(value_str)
educational = [lomsubs.educationalSub(language=[language])]
metadata.set_educational(educational)
self._lang = toUnicode(value)
if value in G.application.config.locales:
__builtins__['c_'] = lambda s: G.application.config.locales[value].ugettext(s) if s else s
def set_author(self, value):
if self.dublinCore.creator == self._author:
self.dublinCore.creator = value
value_str = value.encode('utf-8')
vcard = 'BEGIN:VCARD VERSION:3.0 FN:%s EMAIL;TYPE=INTERNET: ORG: END:VCARD'
for metadata, source in [(self.lom, 'LOMv1.0'), (self.lomEs, 'LOM-ESv1.0')]:
src = lomsubs.sourceValueSub()
src.set_valueOf_(source)
src.set_uniqueElementName('source')
val = lomsubs.roleValueSub()
val.set_valueOf_('author')
val.set_uniqueElementName('value')
role = lomsubs.roleSub()
role.set_source(src)
role.set_value(val)
role.set_uniqueElementName('role')
entity = lomsubs.entitySub(vcard % value_str)
dateTime = lomsubs.DateTimeValueSub()
dateTime.set_valueOf_(datetime.datetime.now().strftime('%Y-%m-%d'))
dateTime.set_uniqueElementName('dateTime')
lang_str = self.lang.encode('utf-8')
value_meta_str = c_(u'Metadata creation date').encode('utf-8')
dateDescription = lomsubs.LanguageStringSub([lomsubs.LangStringSub(lang_str, value_meta_str)])
date = lomsubs.dateSub(dateTime, dateDescription)
lifeCycle = metadata.get_lifeCycle()
if lifeCycle:
contributes = lifeCycle.get_contribute()
found = False
for contribute in contributes:
entitys = contribute.get_entity()
rol = contribute.get_role()
if rol:
rolval = rol.get_value()
if rolval:
if rolval.get_valueOf_() == 'author':
for ent in entitys:
if ent.get_valueOf_() == vcard % self.author.encode('utf-8'):
found = True
if value:
ent.set_valueOf_(vcard % value_str)
else:
contribute.entity.remove(ent)
if not contribute.entity:
contributes.remove(contribute)
if not found:
contribute = lomsubs.contributeSub(role, [entity], date)
lifeCycle.add_contribute(contribute)
else:
if value:
contribute = lomsubs.contributeSub(role, [entity], date)
lifeCycle = lomsubs.lifeCycleSub(contribute=[contribute])
metadata.set_lifeCycle(lifeCycle)
val = lomsubs.roleValueSub()
val.set_valueOf_('creator')
val.set_uniqueElementName('value')
role = lomsubs.roleSub()
role.set_source(src)
role.set_value(val)
role.set_uniqueElementName('role')
metaMetadata = metadata.get_metaMetadata()
if metaMetadata:
contributes = metaMetadata.get_contribute()
found = False
for contribute in contributes:
entitys = contribute.get_entity()
rol = contribute.get_role()
if rol:
rolval = rol.get_value()
if rolval:
if rolval.get_valueOf_() == 'creator':
for ent in entitys:
if ent.get_valueOf_() == vcard % self.author.encode('utf-8'):
found = True
if value:
ent.set_valueOf_(vcard % value_str)
else:
contribute.entity.remove(ent)
if not contribute.entity:
contributes.remove(contribute)
if not found:
contribute = lomsubs.contributeMetaSub(role, [entity], date)
metaMetadata.add_contribute(contribute)
else:
if value:
contribute = lomsubs.contributeMetaSub(role, [entity], date)
metaMetadata.set_contribute([contribute])
self._author = toUnicode(value)
def set_description(self, value):
if self.dublinCore.description == self._description:
self.dublinCore.description = value
lang_str = self.lang.encode('utf-8')
value_str = value.encode('utf-8')
for metadata in [self.lom, self.lomEs]:
description = metadata.get_general().get_description()
if description:
description_found = False
for desc in description:
for string in desc.get_string():
if string.get_valueOf_() == self._description.encode('utf-8'):
description_found = True
if value:
string.set_language(lang_str)
string.set_valueOf_(value_str)
else:
desc.string.remove(string)
description.remove(desc)
if not description_found:
if value:
description = lomsubs.descriptionSub([lomsubs.LangStringSub(lang_str, value_str)])
metadata.get_general().add_description(description)
else:
if value:
description = [lomsubs.descriptionSub([lomsubs.LangStringSub(lang_str, value_str)])]
metadata.get_general().set_description(description)
self._description = toUnicode(value)
def get_backgroundImg(self):
"""Get the background image for this package"""
if self._backgroundImg:
return "file://" + self._backgroundImg.path
else:
return ""
def set_backgroundImg(self, value):
"""Set the background image for this package"""
if self._backgroundImg:
self._backgroundImg.delete()
if value:
if value.startswith("file://"):
value = value[7:]
imgFile = Path(value)
self._backgroundImg = Resource(self, Path(imgFile))
else:
self._backgroundImg = u''
def get_level1(self):
return self.levelName(0)
def set_level1(self, value):
if value != '':
self._levelNames[0] = value
else:
self._levelNames[0] = self.defaultLevelNames[0]
def get_level2(self):
return self.levelName(1)
def set_level2(self, value):
if value != '':
self._levelNames[1] = value
else:
self._levelNames[1] = self.defaultLevelNames[1]
def get_level3(self):
return self.levelName(2)
def set_level3(self, value):
if value != '':
self._levelNames[2] = value
else:
self._levelNames[2] = self.defaultLevelNames[2]
def set_objectives(self, value):
lang_str = self.lang.encode('utf-8')
value_str = c_("Objectives").upper() + ": " + value.encode('utf-8')
for metadata in [self.lom, self.lomEs]:
educationals = metadata.get_educational()
description = lomsubs.descriptionSub([lomsubs.LangStringSub(lang_str, value_str)])
if educationals:
for educational in educationals:
descriptions = educational.get_description()
found = False
if descriptions:
for desc in descriptions:
for string in desc.get_string():
if string.get_valueOf_() == c_("Objectives").upper() + ": " + self._objectives.encode('utf-8'):
found = True
if value:
string.set_language(lang_str)
string.set_valueOf_(value_str)
else:
desc.string.remove(string)
descriptions.remove(desc)
if not found:
if value:
educational.add_description(description)
else:
if value:
educational = [lomsubs.educationalSub(description=[description])]
metadata.set_educational(educational)
self._objectives = toUnicode(value)
def set_preknowledge(self, value):
lang_str = self.lang.encode('utf-8')
value_str = c_("Preknowledge").upper() + ": " + value.encode('utf-8')
for metadata in [self.lom, self.lomEs]:
educationals = metadata.get_educational()
description = lomsubs.descriptionSub([lomsubs.LangStringSub(lang_str, value_str)])
if educationals:
for educational in educationals:
descriptions = educational.get_description()
found = False
if descriptions:
for desc in descriptions:
for string in desc.get_string():
if string.get_valueOf_() == c_("Preknowledge").upper() + ": " + self._preknowledge.encode('utf-8'):
found = True
if value:
string.set_language(lang_str)
string.set_valueOf_(value_str)
else:
desc.string.remove(string)
descriptions.remove(desc)
if not found:
if value:
educational.add_description(description)
else:
if value:
educational = [lomsubs.educationalSub(description=[description])]
metadata.set_educational(educational)
self._preknowledge = toUnicode(value)
def license_map(self, source, value):
'''From document "ANEXO XIII ANÁLISIS DE MAPEABILIDAD LOM/LOM-ES V1.0"'''
if source == 'LOM-ESv1.0':
return value
elif source == 'LOMv1.0':
if value == 'not appropriate' or value == 'public domain':
return 'no'
else:
return 'yes'
def set_license(self, value):
value_str = value.rstrip(' 0123456789.').encode('utf-8')
if self.dublinCore.rights == self.license:
self.dublinCore.rights = value
for metadata, source in [(self.lom, 'LOMv1.0'), (self.lomEs, 'LOM-ESv1.0')]:
rights = metadata.get_rights()
if not rights:
metadata.set_rights(lomsubs.rightsSub())
copyrightAndOtherRestrictions = metadata.get_rights().get_copyrightAndOtherRestrictions()
if copyrightAndOtherRestrictions:
if copyrightAndOtherRestrictions.get_value().get_valueOf_() == self.license_map(source, self.license.encode('utf-8').rstrip(' 0123456789.')):
if value:
copyrightAndOtherRestrictions.get_value().set_valueOf_(self.license_map(source, value_str))
else:
metadata.get_rights().set_copyrightAndOtherRestrictions(None)
else:
if value:
src = lomsubs.sourceValueSub()
src.set_valueOf_(source)
src.set_uniqueElementName('source')
val = lomsubs.copyrightAndOtherRestrictionsValueSub()
val.set_valueOf_(self.license_map(source, value_str))
val.set_uniqueElementName('value')
copyrightAndOtherRestrictions = lomsubs.copyrightAndOtherRestrictionsSub()
copyrightAndOtherRestrictions.set_source(src)
copyrightAndOtherRestrictions.set_value(val)
copyrightAndOtherRestrictions.set_uniqueElementName('copyrightAndOtherRestrictions')
metadata.get_rights().set_copyrightAndOtherRestrictions(copyrightAndOtherRestrictions)
self.license = toUnicode(value)
def learningResourceType_map(self, source, value):
'''From document "ANEXO XIII ANÁLISIS DE MAPEABILIDAD LOM/LOM-ES V1.0"'''
if source == 'LOM-ESv1.0':
return value
elif source == 'LOMv1.0':
lomMap = {
"guided reading": "narrative text",
"master class": "lecture",
"textual-image analysis": "exercise",
"discussion activity": "problem statement",
"closed exercise or problem": "exercise",
"contextualized case problem": "exercise",
"open problem": "problem statement",
"real or virtual learning environment": "simulation",
"didactic game": "exercise",
"webquest": "problem statement",
"experiment": "experiment",
"real project": "simulation",
"simulation": "simulation",
"questionnaire": "questionnaire",
"exam": "exam",
"self assessment": "self assessment",
"": ""
}
return lomMap[value]
def set_learningResourceType(self, value):
value_str = value.encode('utf-8')
for metadata, source in [(self.lom, 'LOMv1.0'), (self.lomEs, 'LOM-ESv1.0')]:
educationals = metadata.get_educational()
src = lomsubs.sourceValueSub()
src.set_valueOf_(source)
src.set_uniqueElementName('source')
val = lomsubs.learningResourceTypeValueSub()
val.set_valueOf_(self.learningResourceType_map(source, value_str))
val.set_uniqueElementName('value')
learningResourceType = lomsubs.learningResourceTypeSub(self.learningResourceType_map(source, value_str))
learningResourceType.set_source(src)
learningResourceType.set_value(val)
if educationals:
for educational in educationals:
learningResourceTypes = educational.get_learningResourceType()
found = False
if learningResourceTypes:
for i in learningResourceTypes:
if i.get_value().get_valueOf_() == self.learningResourceType_map(source, self.learningResourceType.encode('utf-8')):
found = True
index = learningResourceTypes.index(i)
if value:
educational.insert_learningResourceType(index, learningResourceType)
else:
learningResourceTypes.pop(index)
if not found:
educational.add_learningResourceType(learningResourceType)
else:
educational = [lomsubs.educationalSub(learningResourceType=[learningResourceType])]
metadata.set_educational(educational)
self._learningResourceType = toUnicode(value)
def intendedEndUserRole_map(self, source, value):
'''From document "ANEXO XIII ANÁLISIS DE MAPEABILIDAD LOM/LOM-ES V1.0"'''
if source == 'LOM-ESv1.0':
return value
elif source == 'LOMv1.0':
if not value or value == 'tutor':
return value
else:
return 'learner'
def set_intendedEndUserRoleType(self, value):
value_str = value.encode('utf-8')
if value:
for metadata, source in [(self.lom, 'LOMv1.0'), (self.lomEs, 'LOM-ESv1.0')]:
educationals = metadata.get_educational()
src = lomsubs.sourceValueSub()
src.set_valueOf_(source)
src.set_uniqueElementName('source')
val = lomsubs.intendedEndUserRoleValueSub()
val.set_valueOf_(self.intendedEndUserRole_map(source, value_str))
val.set_uniqueElementName('value')
intendedEndUserRole = lomsubs.intendedEndUserRoleSub(self.intendedEndUserRole_map(source, value_str))
intendedEndUserRole.set_source(src)
intendedEndUserRole.set_value(val)
if educationals:
for educational in educationals:
intendedEndUserRoles = educational.get_intendedEndUserRole()
found = False
if intendedEndUserRoles:
for i in intendedEndUserRoles:
if i.get_value().get_valueOf_() == self.intendedEndUserRole_map(source, self.intendedEndUserRoleType.encode('utf-8')):
found = True
index = intendedEndUserRoles.index(i)
educational.insert_intendedEndUserRole(index, intendedEndUserRole)
if not found:
educational.add_intendedEndUserRole(intendedEndUserRole)
else:
educational = [lomsubs.educationalSub(intendedEndUserRole=[intendedEndUserRole])]
metadata.set_educational(educational)
self._intendedEndUserRoleType = toUnicode(value)
def set_intendedEndUserRole(self, value, valueOf):
for metadata, source in [(self.lom, 'LOMv1.0'), (self.lomEs, 'LOM-ESv1.0')]:
educationals = metadata.get_educational()
src = lomsubs.sourceValueSub()
src.set_valueOf_(source)
src.set_uniqueElementName('source')
val = lomsubs.intendedEndUserRoleValueSub()
mappedValueOf = self.intendedEndUserRole_map(source, valueOf)
val.set_valueOf_(mappedValueOf)
val.set_uniqueElementName('value')
intendedEndUserRole = lomsubs.intendedEndUserRoleSub(mappedValueOf)
intendedEndUserRole.set_source(src)
intendedEndUserRole.set_value(val)
if educationals:
for educational in educationals:
intendedEndUserRoles = educational.get_intendedEndUserRole()
found = False
if intendedEndUserRoles:
for i in intendedEndUserRoles:
if i.get_value().get_valueOf_() == mappedValueOf:
found = True
if value:
index = intendedEndUserRoles.index(i)
educational.insert_intendedEndUserRole(index, intendedEndUserRole)
else:
if source != 'LOMv1.0' or valueOf != 'group':
educational.intendedEndUserRole.remove(i)
if not found and value:
educational.add_intendedEndUserRole(intendedEndUserRole)
else:
if value:
educational = [lomsubs.educationalSub(intendedEndUserRole=[intendedEndUserRole])]
metadata.set_educational(educational)
def set_intendedEndUserRoleGroup(self, value):
self.set_intendedEndUserRole(value, 'group')
self._intendedEndUserRoleGroup = value
def set_intendedEndUserRoleTutor(self, value):
self.set_intendedEndUserRole(value, 'tutor')
self._intendedEndUserRoleTutor = value
def context_map(self, source, value):
'''From document "ANEXO XIII ANÁLISIS DE MAPEABILIDAD LOM/LOM-ES V1.0"'''
if source == 'LOM-ESv1.0':
return value
elif source == 'LOMv1.0':
lomMap = {
"classroom": "school",
"real environment": "training",
"face to face": "other",
"blended": "other",
"distance": "other",
"presencial": "other",
"": ""
}
return lomMap[value]
def set_context(self, value, valueOf):
value_str = value.encode('utf-8')
if value:
for metadata, source in [(self.lom, 'LOMv1.0'), (self.lomEs, 'LOM-ESv1.0')]:
educationals = metadata.get_educational()
src = lomsubs.sourceValueSub()
src.set_valueOf_(source)
src.set_uniqueElementName('source')
val = lomsubs.contextValueSub()
val.set_valueOf_(self.context_map(source, value_str))
val.set_uniqueElementName('value')
context = lomsubs.contextSub(self.context_map(source, value_str))
context.set_source(src)
context.set_value(val)
if educationals:
for educational in educationals:
contexts = educational.get_context()
found = False
if contexts:
for i in contexts:
if i.get_value().get_valueOf_() == self.context_map(source, valueOf.encode('utf-8')):
found = True
index = contexts.index(i)
educational.insert_context(index, context)
if not found:
educational.add_context(context)
else:
educational = [lomsubs.educationalSub(context=[context])]
metadata.set_educational(educational)
def set_contextPlace(self, value):
self.set_context(value, self._contextPlace)
self._contextPlace = toUnicode(value)
def set_contextMode(self, value):
self.set_context(value, self._contextMode)
self._contextMode = toUnicode(value)
def set_changed(self, changed):
self._isChanged = changed
if changed:
if hasattr(self, 'previewDir'):
if self.previewDir:
shutil.rmtree(self.previewDir, True)
self.previewDir = None
# Properties
isChanged = property(lambda self: self._isChanged, set_changed)
name = property(lambda self:self._name, set_name)
title = property(lambda self:self._title, set_title)
lang = property(lambda self: self._lang, set_lang)
author = property(lambda self:self._author, set_author)
description = property(lambda self:self._description, set_description)
newlicense = property(lambda self:self.license, set_license)
docType = property(lambda self:self._docType, set_docType)
backgroundImg = property(get_backgroundImg, set_backgroundImg)
level1 = property(get_level1, set_level1)
level2 = property(get_level2, set_level2)
level3 = property(get_level3, set_level3)
objectives = property(lambda self: self._objectives, set_objectives)
preknowledge = property(lambda self: self._preknowledge, set_preknowledge)
learningResourceType = property(lambda self: self._learningResourceType, set_learningResourceType)
intendedEndUserRoleType = property(lambda self: self._intendedEndUserRoleType, set_intendedEndUserRoleType)
intendedEndUserRoleGroup = property(lambda self: self._intendedEndUserRoleGroup, set_intendedEndUserRoleGroup)
intendedEndUserRoleTutor = property(lambda self: self._intendedEndUserRoleTutor, set_intendedEndUserRoleTutor)
contextPlace = property(lambda self: self._contextPlace, set_contextPlace)
contextMode = property(lambda self: self._contextMode, set_contextMode)
def findNode(self, nodeId):
"""
Finds a node from its nodeId
(nodeId can be a string or a list/tuple)
"""
log.debug(u"findNode" + repr(nodeId))
node = self._nodeIdDict.get(nodeId)
if node and node.package is self:
return node
else:
return None
def levelName(self, level):
"""
Return the level name
"""
if level < len(self._levelNames):
return _(self._levelNames[level])
else:
return _(u"?????")
def save(self, filename=None, tempFile=False):
"""
Save package to disk
pass an optional filename
"""
self.tempFile = tempFile
# Get the filename
if filename:
filename = Path(filename)
# If we are being given a new filename...
# Change our name to match our new filename
name = filename.splitpath()[1]
if not tempFile:
self.name = name.basename().splitext()[0]
elif self.filename:
# Otherwise use our last saved/loaded from filename
filename = Path(self.filename)
else:
# If we don't have a last saved/loaded from filename,
# raise an exception because, we need to have a new
# file passed when a brand new package is saved
raise AssertionError(u'No name passed when saving a new package')
#JR: Convertimos el nombre del paquete para evitar nombres problematicos
import string
validPackagenameChars = "-_. %s%s" % (string.ascii_letters, string.digits)
self.name = ''.join(c for c in self.name if c in validPackagenameChars).replace(' ','_')
#JR: Si por casualidad quedase vacio le damos un nombre por defecto
if self.name == "":
self.name = "invalidpackagename"
# Store our new filename for next file|save, and save the package
log.debug(u"Will save %s to: %s" % (self.name, filename))
if tempFile:
self.nonpersistant.remove('filename')
oldFilename, self.filename = self.filename, unicode(self.filename)
try:
filename.safeSave(self.doSave, _('SAVE FAILED!\nLast succesful save is %s.'))
finally:
self.nonpersistant.append('filename')
self.filename = oldFilename
else:
# Update our new filename for future saves
self.filename = filename
filename.safeSave(self.doSave, _('SAVE FAILED!\nLast succesful save is %s.'))
self.isChanged = False
self.updateRecentDocuments(filename)
def updateRecentDocuments(self, filename):
"""
Updates the list of recent documents
"""
# Don't update the list for the generic.data "package"
genericData = G.application.config.configDir/'idevices'/'generic.data'
if genericData.isfile() or genericData.islink():
if Path(filename).samefile(genericData):
return
# Save in recentDocuments list
recentProjects = G.application.config.recentProjects
if filename in recentProjects:
# If we're already number one, carry on
if recentProjects[0] == filename:
return
recentProjects.remove(filename)
recentProjects.insert(0, filename)
del recentProjects[5:] # Delete any older names from the list
G.application.config.configParser.write() # Save the settings
def doSave(self, fileObj):
"""
Actually performs the save to 'fileObj'.
"""
if self.compatibleWithVersion9:
self.downgradeToVersion9()
zippedFile = zipfile.ZipFile(fileObj, "w", zipfile.ZIP_DEFLATED)
try:
for resourceFile in self.resourceDir.files():
zippedFile.write(unicode(resourceFile.normpath()),
resourceFile.name.encode('utf8'), zipfile.ZIP_DEFLATED)
zinfo = zipfile.ZipInfo(filename='content.data',
date_time=time.localtime()[0:6])
zinfo.external_attr = 0100644<<16L
zinfo.compress_type = zipfile.ZIP_DEFLATED
zippedFile.writestr(zinfo, encodeObject(self))
zinfo2 = zipfile.ZipInfo(filename='contentv3.xml',
date_time=time.localtime()[0:6])
zinfo2.external_attr = 0100644<<16L
zinfo2.compress_type = zipfile.ZIP_DEFLATED
zippedFile.writestr(zinfo2, encodeObjectToXML(self))
zippedFile.write(G.application.config.webDir/'templates'/'content.xsd', 'content.xsd', zipfile.ZIP_DEFLATED)
finally:
zippedFile.close()
if self.compatibleWithVersion9:
self.upgradeToVersion10()
CasestudyIdevice.persistenceVersion = 9
CasopracticofpdIdevice.persistenceVersion = 9
CitasparapensarfpdIdevice.persistenceVersion = 9
ClozefpdIdevice.persistenceVersion = 7
ClozeIdevice.persistenceVersion = 7
ClozelangfpdIdevice.persistenceVersion = 7
DebesconocerfpdIdevice.persistenceVersion = 9
DestacadofpdIdevice.persistenceVersion = 9
EjercicioresueltofpdIdevice.persistenceVersion = 10
EleccionmultiplefpdIdevice.persistenceVersion = 10
TextAreaField.persistenceVersion = 2
FreeTextfpdIdevice.persistenceVersion = 8
GalleryIdevice.persistenceVersion = 8
ImageMagnifierIdevice.persistenceVersion = 4
ListaIdevice.persistenceVersion = 5
MultichoiceIdevice.persistenceVersion = 9
GenericIdevice.persistenceVersion = 11
MultiSelectIdevice.persistenceVersion = 1
OrientacionesalumnadofpdIdevice.persistenceVersion = 9
OrientacionestutoriafpdIdevice.persistenceVersion = 9
ParasabermasfpdIdevice.persistenceVersion = 9
QuizTestIdevice.persistenceVersion = 10
RecomendacionfpdIdevice.persistenceVersion = 9
ReflectionfpdIdevice.persistenceVersion = 9
ReflectionfpdmodifIdevice.persistenceVersion = 9
ReflectionIdevice.persistenceVersion = 8
SeleccionmultiplefpdIdevice.persistenceVersion = 2
TrueFalseIdevice.persistenceVersion = 11
VerdaderofalsofpdIdevice.persistenceVersion = 12
WikipediaIdevice.persistenceVersion = 9
Package.persistenceVersion = 13
def extractNode(self):
"""
Clones and extracts the currently selected node into a new package.
"""
newPackage = Package('NoName') # Name will be set once it is saved..
newPackage.title = self.currentNode.title
newPackage.style = self.style
newPackage.author = self.author
newPackage._nextNodeId = self._nextNodeId
# Copy the nodes from the original package
# and merge into the root of the new package
self.currentNode.copyToPackage(newPackage)
return newPackage
@staticmethod
def load(filename, newLoad=True, destinationPackage=None, fromxml=None):
"""
Load package from disk, returns a package.
"""
if not zipfile.is_zipfile(filename):
return None
zippedFile = zipfile.ZipFile(filename, "r")
xml = None
try:
xml = zippedFile.read(u"contentv3.xml")
except:
pass
if not xml:
try:
# Get the jellied package data
toDecode = zippedFile.read(u"content.data")
except KeyError:
log.info("no content.data, trying Common Cartridge/Content Package")
newPackage = loadCC(zippedFile, filename)
newPackage.tempFile = False
newPackage.isChanged = False
newPackage.filename = Path(filename)
return newPackage
# Need to add a TempDirPath because it is a nonpersistant member
resourceDir = TempDirPath()
# Extract resource files from package to temporary directory
for fn in zippedFile.namelist():
if unicode(fn, 'utf8') not in [u"content.data", u"content.xml", u"contentv2.xml", u"contentv3.xml", u"content.xsd" ]:
#JR: Hacemos las comprobaciones necesarias por si hay directorios
if ("/" in fn):
dir = fn[:fn.index("/")]
Dir = Path(resourceDir/dir)
if not Dir.exists():
Dir.mkdir()
Fn = Path(resourceDir/fn)
if not Fn.isdir():
outFile = open(resourceDir/fn, "wb")
outFile.write(zippedFile.read(fn))
outFile.flush()
outFile.close()
try:
validxml = False
if fromxml:
newPackage, validxml = decodeObjectFromXML(fromxml)
elif xml:
xmlinfo = zippedFile.getinfo(u"contentv3.xml")
if u"content.data" not in zippedFile.NameToInfo:
newPackage, validxml = decodeObjectFromXML(xml)
else:
datainfo = zippedFile.getinfo(u"content.data")
if xmlinfo.date_time >= datainfo.date_time:
newPackage, validxml = decodeObjectFromXML(xml)
if not validxml:
toDecode = zippedFile.read(u"content.data")
newPackage = decodeObjectRaw(toDecode)
try:
lomdata = zippedFile.read(u'imslrm.xml')
if 'LOM-ES' in lomdata:
importType = 'lomEs'
else:
importType = 'lom'
setattr(newPackage, importType, lomsubs.parseString(lomdata))
except:
pass
G.application.afterUpgradeHandlers = []
newPackage.resourceDir = resourceDir
G.application.afterUpgradeZombies2Delete = []
if not validxml and (xml or fromxml or "content.xml" in zippedFile.namelist()):
for key, res in newPackage.resources.items():
if len(res) < 1:
newPackage.resources.pop(key)
else:
if (hasattr(res[0], 'testForAndDeleteZombieResources')):
res[0].testForAndDeleteZombieResources()
if newLoad:
# provide newPackage to doUpgrade's versionUpgrade() to
# correct old corrupt extracted packages by setting the
# any corrupt package references to the new package:
#JR: Convertimos el nombre del paquete para evitar nombres problematicos
import string
validPackagenameChars = "-_. %s%s" % (string.ascii_letters, string.digits)
newPackage._name = ''.join(c for c in newPackage._name if c in validPackagenameChars).replace(' ','_')
#JR: Si por casualidad quedase vacio le damos un nombre por defecto
if newPackage._name == "":
newPackage._name = "invalidpackagename"
log.debug("load() about to doUpgrade newPackage \""
+ newPackage._name + "\" " + repr(newPackage) )
if hasattr(newPackage, 'resourceDir'):
log.debug("newPackage resourceDir = "
+ newPackage.resourceDir)
else:
# even though it was just set above? should not get here:
log.error("newPackage resourceDir has NO resourceDir!")
doUpgrade(newPackage)
# after doUpgrade, compare the largest found field ID:
if G.application.maxFieldId >= Field.nextId:
Field.nextId = G.application.maxFieldId + 1
if hasattr(newPackage,'_docType'):
common.setExportDocType(newPackage.docType)
else:
newPackage.set_docType(toUnicode('XHTML'))
else:
# and when merging, automatically set package references to
# the destinationPackage, into which this is being merged:
log.debug("load() about to merge doUpgrade newPackage \""
+ newPackage._name + "\" " + repr(newPackage)
+ " INTO destinationPackage \""
+ destinationPackage._name + "\" "
+ repr(destinationPackage))
log.debug("using their resourceDirs:")
if hasattr(newPackage, 'resourceDir'):
log.debug(" newPackage resourceDir = "
+ newPackage.resourceDir)
else:
log.error("newPackage has NO resourceDir!")
if hasattr(destinationPackage, 'resourceDir'):
log.debug(" destinationPackage resourceDir = "
+ destinationPackage.resourceDir)
else:
log.error("destinationPackage has NO resourceDir!")
doUpgrade(destinationPackage,
isMerge=True, preMergePackage=newPackage)
# after doUpgrade, compare the largest found field ID:
if G.application.maxFieldId >= Field.nextId:
Field.nextId = G.application.maxFieldId + 1
except:
import traceback
traceback.print_exc()
raise
if newPackage.tempFile:
# newPackage.filename was stored as it's original filename
newPackage.tempFile = False
else:
# newPackage.filename is the name that the package was last loaded from
# or saved to
newPackage.filename = Path(filename)
checker = Checker(newPackage)
inconsistencies = checker.check()
for inconsistency in inconsistencies:
inconsistency.fix()
# Let idevices and nodes handle any resource upgrading they may need to
# Note: Package afterUpgradeHandlers *must* be done after Resources'
# and the package should be updated before everything else,
# so, prioritize with a 3-pass, 3-level calling setup
# in order of: 1) resources, 2) package, 3) anything other objects
for handler_priority in range(3):
for handler in G.application.afterUpgradeHandlers:
if handler_priority == 0 and \
repr(handler.im_class)=="<class 'exe.engine.resource.Resource'>":
# level-0 handlers: Resource
handler()
elif handler_priority == 1 and \
repr(handler.im_class)=="<class 'exe.engine.package.Package'>":
# level-1 handlers: Package (requires resources first)
if handler.im_self == newPackage:
handler()
else:
log.warn("Extra package object found, " \
+ "ignoring its afterUpgradeHandler: " \
+ repr(handler))
elif handler_priority == 2 and \
repr(handler.im_class)!="<class 'exe.engine.resource.Resource'>" \
and \
repr(handler.im_class)!="<class 'exe.engine.package.Package'>":
# level-2 handlers: all others
handler()
G.application.afterUpgradeHandlers = []
num_zombies = len(G.application.afterUpgradeZombies2Delete)
for i in range(num_zombies-1, -1, -1):
zombie = G.application.afterUpgradeZombies2Delete[i]
# now, the zombie list can contain nodes OR resources to delete.
# if zombie is a node, then also pass in a pruning parameter..
zombie_is_node = False
if isinstance(zombie, Node):
zombie_is_node = True
if zombie_is_node:
zombie.delete(pruningZombies=True)
else:
#JR: Eliminamos el recurso del idevice
if hasattr(zombie._idevice, 'userResources'):
for i in range(len(zombie._idevice.userResources)-1, -1, -1):
if hasattr(zombie._idevice.userResources[i], 'storageName'):
if zombie._idevice.userResources[i].storageName == zombie.storageName:
aux = zombie._idevice.userResources[i]
zombie._idevice.userResources.remove(aux)
aux.delete
#Eliminamos el recurso de los recursos del sistema
#for resource in newPackage.resources.keys():
# if hasattr(newPackage.resources[resource][0], 'storageName'):
# if newPackage.resources[resource][0].storageName == zombie.storageName:
# del newPackage.resources[resource]
#JR: Esto ya no haria falta
#zombie.delete()
del zombie
userResourcesFiles = newPackage.getUserResourcesFiles(newPackage.root)
#JR: Borramos recursos que no estan siendo utilizados
newPackage.cleanUpResources(userResourcesFiles)
G.application.afterUpgradeZombies2Delete = []
newPackage.updateRecentDocuments(newPackage.filename)
newPackage.isChanged = False
nstyle=Path(G.application.config.stylesDir/newPackage.style)
if not nstyle.isdir():
newPackage.style=G.application.config.defaultStyle
newPackage.lang = newPackage._lang
return newPackage
def getUserResourcesFiles(self, node):
resourceFiles = set()
for idevice in node.idevices:
if hasattr(idevice, 'userResources'):
for i in range(len(idevice.userResources) - 1, -1, -1):
if hasattr(idevice.userResources[i], 'storageName'):
resourceFiles.add(idevice.userResources[i].storageName)
for child in node.children:
resourceFiles = resourceFiles | self.getUserResourcesFiles(child)
return resourceFiles
def cleanUpResources(self, userResourcesFiles=set()):
"""
Removes duplicate resource files
"""
# Delete unused resources.
# Only really needed for upgrading to version 0.20,
# but upgrading of resources and package happens in no particular order
# and must be done after all resources have been upgraded
# some earlier .elp files appear to have been corrupted with
# two packages loaded, *possibly* from some strange extract/merge
# functionality in earlier eXe versions?
# Regardless, only the real package will have a resourceDir,
# and the other will fail.
# For now, then, put in this quick and easy safety check:
if not hasattr(self,'resourceDir'):
log.warn("cleanUpResources called on a redundant package")
return
existingFiles = set([fn.basename() for fn in self.resourceDir.files()])
#JR
usedFiles = set([])
for reses in self.resources.values():
if hasattr(reses[0], 'storageName'):
usedFiles.add(reses[0].storageName)
#usedFiles = set([reses[0].storageName for reses in self.resources.values()])
for fn in existingFiles - usedFiles - userResourcesFiles:
log.debug('Removing unused resource %s' % fn)
(self.resourceDir/fn).remove()
def findResourceByName(self, queryName):
"""
Support for merging, and anywhere else that unique names might be
checked before actually comparing against the files (as will be
done by the resource class itself in its _addOurselvesToPackage() )
"""
foundResource = None
queryResources = self.resources
for this_checksum in queryResources:
for this_resource in queryResources[this_checksum]:
if queryName == this_resource.storageName:
foundResource = this_resource
return foundResource
return foundResource
def upgradeToVersion1(self):
"""
Called to upgrade from 0.3 release
"""
self._nextNodeId = 0
self._nodeIdDict = {}
# Also upgrade all the nodes.
# This needs to be done here so that draft gets id 0
# If it's done in the nodes, the ids are assigned in reverse order
draft = getattr(self, 'draft')
draft._id = self._regNewNode(draft)
draft._package = self
setattr(self, 'editor', Node(self, None, _(u"iDevice Editor")))
# Add a default idevice to the editor
idevice = GenericIdevice("", "", "", "", "")
editor = getattr(self, 'editor')
idevice.parentNode = editor
editor.addIdevice(idevice)
def superReg(node):
"""Registers all our nodes
because in v0 they were not registered
in this way"""
node._id = self._regNewNode(node)
node._package = self
for child in node.children:
superReg(child)
superReg(self.root)
def _regNewNode(self, node):
"""
Called only by nodes,
stores the node in our id lookup dict
returns a new unique id
"""
id_ = unicode(self._nextNodeId)
self._nextNodeId += 1
self._nodeIdDict[id_] = node
return id_
def getNewIdeviceId(self):
"""
Returns an iDevice Id which is unique for this package.
"""
id_ = unicode(self._nextIdeviceId)
self._nextIdeviceId += 1
return id_
def upgradeToVersion2(self):
"""
Called to upgrade from 0.4 release
"""
getattr(self, 'draft').delete()
getattr(self, 'editor').delete()
delattr(self, 'draft')
delattr(self, 'editor')
# Need to renumber nodes because idevice node and draft nodes are gone
self._nextNodeId = 0
def renumberNode(node):
"""
Gives the old node a number
"""
node._id = self._regNewNode(node)
for child in node.children:
renumberNode(child)
renumberNode(self.root)
def upgradeToVersion3(self):
"""
Also called to upgrade from 0.4 release
"""
self._nextIdeviceId = 0
def upgradeToVersion4(self):
"""
Puts properties in their place
Also called to upgrade from 0.8 release
"""
self._name = toUnicode(self.__dict__['name'])
self._author = toUnicode(self.__dict__['author'])
self._description = toUnicode(self.__dict__['description'])
def upgradeToVersion5(self):
"""
For version 0.11
"""
self._levelNames = self.levelNames
del self.levelNames
def upgradeToVersion6(self):
"""
For version 0.14
"""
self.dublinCore = DublinCore()
# Copy some of the package properties to dublin core
self.title = self.root.title
self.dublinCore.title = self.root.title
self.dublinCore.creator = self._author
self.dublinCore.description = self._description
self.scolinks = False
def upgradeToVersion7(self):
"""
For version 0.15
"""
self._backgroundImg = ''
self.backgroundImgTile = False
def upgradeToVersion8(self):
"""
For version 0.20, alpha, for nightlies r2469
"""
self.license = 'None'
self.footer = ""
self.idevices = []
def upgradeToVersion9(self):
"""
For version >= 0.20.4
"""
if not hasattr(self, 'resources'):
# The hasattr is needed, because sometimes, Resource instances are upgraded
# first and they also set this attribute on the package
self.resources = {}
G.application.afterUpgradeHandlers.append(self.cleanUpResources)
def lomDefaults(self, entry, schema, rights=False):
defaults = {'general': {'identifier': [{'catalog': c_('My Catalog'), 'entry': entry}],
'aggregationLevel': {'source': schema, 'value': '2'}
},
'metaMetadata': {'metadataSchema': [schema]},
}
if rights:
defaults['rights'] = {'access': {'accessType': {'source': schema, 'value': 'universal'},
'description': {'string': [{'valueOf_': c_('Default'), 'language': str(self.lang)}]}}}
return defaults
oldLicenseMap = {"None": "None",
"GNU Free Documentation License": u"license GFDL",
"Creative Commons Attribution 3.0 License": u"creative commons: attribution 3.0",
"Creative Commons Attribution Share Alike 3.0 License": u"creative commons: attribution - share alike 3.0",
"Creative Commons Attribution No Derivatives 3.0 License": u"creative commons: attribution - non derived work 3.0",
"Creative Commons Attribution Non-commercial 3.0 License": u"creative commons: attribution - non commercial 3.0",
"Creative Commons Attribution Non-commercial Share Alike 3.0 License": u"creative commons: attribution - non commercial - share alike 3.0",
"Creative Commons Attribution Non-commercial No Derivatives 3.0 License": u"creative commons: attribution - non derived work - non commercial 3.0",
"Creative Commons Attribution 2.5 License": u"creative commons: attribution 2.5",
"Creative Commons Attribution-ShareAlike 2.5 License": u"creative commons: attribution - share alike 2.5",
"Creative Commons Attribution-NoDerivs 2.5 License": u"creative commons: attribution - non derived work 2.5",
"Creative Commons Attribution-NonCommercial 2.5 License": u"creative commons: attribution - non commercial 2.5",
"Creative Commons Attribution-NonCommercial-ShareAlike 2.5 License": u"creative commons: attribution - non commercial - share alike 2.5",
"Creative Commons Attribution-NonCommercial-NoDerivs 2.5 License": u"creative commons: attribution - non derived work - non commercial 2.5",
"Developing Nations 2.0": u""
}
def upgradeToVersion10(self):
"""
For version >= 2.0
"""
if not hasattr(self, 'lang'):
self._lang = G.application.config.locale.split('_')[0]
entry = str(uuid.uuid4())
if not hasattr(self, 'lomEs') or not isinstance(self.lomEs, lomsubs.lomSub):
self.lomEs = lomsubs.lomSub.factory()
self.lomEs.addChilds(self.lomDefaults(entry, 'LOM-ESv1.0', True))
if not hasattr(self, 'lom') or not isinstance(self.lom, lomsubs.lomSub):
self.lom = lomsubs.lomSub.factory()
self.lom.addChilds(self.lomDefaults(entry, 'LOMv1.0'))
if not hasattr(self, 'scowsinglepage'):
self.scowsinglepage = False
if not hasattr(self, 'scowwebsite'):
self.scowwebsite = False
if not hasattr(self, 'exportSource'):
self.exportSource = True
if not hasattr(self, 'exportMetadataType'):
self.exportMetadataType = "LOMES"
if not hasattr(self, 'objectives'):
self._objectives = u''
if not hasattr(self, 'preknowledge'):
self._preknowledge = u''
if not hasattr(self, 'learningResourceType'):
self._learningResourceType = u''
if not hasattr(self, 'intendedEndUserRoleType'):
self._intendedEndUserRoleType = u''
if not hasattr(self, 'intendedEndUserRoleGroup'):
self._intendedEndUserRoleGroup = False
if not hasattr(self, 'intendedEndUserRoleTutor'):
self._intendedEndUserRoleTutor = False
if not hasattr(self, 'contextPlace'):
self._contextPlace = u''
if not hasattr(self, 'contextMode'):
self._contextMode = u''
if hasattr(self, 'scowsource'):
del self.scowsource
try:
if not self.license in self.oldLicenseMap.values():
self.newlicense = self.oldLicenseMap[self.license]
except:
self.license = u''
if not hasattr(self, 'mxmlprofilelist'):
self.mxmlprofilelist = ""
if not hasattr(self, 'mxmlforcemediaonly'):
self.mxmlforcemediaonly = False
if not hasattr(self, 'mxmlheight'):
self.mxmlheight = ""
if not hasattr(self, 'mxmlwidth'):
self.mxmlwidth = ""
if not hasattr(self, 'compatibleWithVersion9'):
self.compatibleWithVersion9 = False
self.set_title(self._title)
self.set_author(self._author)
self.set_description(self._description)
def upgradeToVersion11(self):
pass
def upgradeToVersion12(self):
#because actually version 11 was exe-next-gen
self.upgradeToVersion9()
self.upgradeToVersion10()
def upgradeToVersion13(self):
if not hasattr(self, '_docType'):
self._docType = G.application.config.docType
def downgradeToVersion9(self):
for attr in ['lomEs', 'lom', 'scowsinglepage', 'scowwebsite',
'exportSource', 'exportMetadataType', '_lang',
'_objectives', '_preknowledge', '_learningResourceType',
'_intendedEndUserRoleType', '_intendedEndUserRoleGroup',
'_intendedEndUserRoleTutor', '_contextPlace',
'_contextMode', 'scowsource', 'mxmlprofilelist',
'mxmlforcemediaonly', 'mxmlheight', 'mxmlwidth']:
if hasattr(self, attr):
delattr(self, attr)
self.license = u''
CasestudyIdevice.persistenceVersion = 8
CasopracticofpdIdevice.persistenceVersion = 7
CitasparapensarfpdIdevice.persistenceVersion = 7
ClozefpdIdevice.persistenceVersion = 4
ClozeIdevice.persistenceVersion = 4
ClozelangfpdIdevice.persistenceVersion = 4
DebesconocerfpdIdevice.persistenceVersion = 7
DestacadofpdIdevice.persistenceVersion = 7
EjercicioresueltofpdIdevice.persistenceVersion = 8
EleccionmultiplefpdIdevice.persistenceVersion = 7
TextAreaField.persistenceVersion = 1
FreeTextfpdIdevice.persistenceVersion = 7
GalleryIdevice.persistenceVersion = 7
ImageMagnifierIdevice.persistenceVersion = 2
ListaIdevice.persistenceVersion = 4
MultichoiceIdevice.persistenceVersion = 7
GenericIdevice.persistenceVersion = 9
delattr(MultiSelectIdevice, "persistenceVersion")
OrientacionesalumnadofpdIdevice.persistenceVersion = 7
OrientacionestutoriafpdIdevice.persistenceVersion = 7
ParasabermasfpdIdevice.persistenceVersion = 7
QuizTestIdevice.persistenceVersion = 8
RecomendacionfpdIdevice.persistenceVersion = 7
ReflectionfpdIdevice.persistenceVersion = 7
ReflectionfpdmodifIdevice.persistenceVersion = 7
ReflectionIdevice.persistenceVersion = 7
delattr(SeleccionmultiplefpdIdevice, "persistenceVersion")
TrueFalseIdevice.persistenceVersion = 9
VerdaderofalsofpdIdevice.persistenceVersion = 9
WikipediaIdevice.persistenceVersion = 8
Package.persistenceVersion = 9
def getExportDocType(self):
return self._docType
def delNotes(self, node):
"""
Delete all notes
"""
for idevice in node.idevices:
if idevice.klass == 'NotaIdevice':
idevice.delete()
for child in node.children:
self.delNotes(child)
# ===========================================================================
|
RichDijk/eXe
|
exe/engine/package.py
|
Python
|
gpl-2.0
| 77,805
| 0.004679
|
"""Defines the URL routes for the Team API."""
from django.conf import settings
from django.conf.urls import patterns, url
from .views import (
MembershipDetailView,
MembershipListView,
TeamsDetailView,
TeamsListView,
TopicDetailView,
TopicListView
)
TEAM_ID_PATTERN = r'(?P<team_id>[a-z\d_-]+)'
TOPIC_ID_PATTERN = r'(?P<topic_id>[A-Za-z\d_.-]+)'
urlpatterns = patterns(
'',
url(
r'^v0/teams/$',
TeamsListView.as_view(),
name="teams_list"
),
url(
r'^v0/teams/{team_id_pattern}$'.format(
team_id_pattern=TEAM_ID_PATTERN,
),
TeamsDetailView.as_view(),
name="teams_detail"
),
url(
r'^v0/topics/$',
TopicListView.as_view(),
name="topics_list"
),
url(
r'^v0/topics/{topic_id_pattern},{course_id_pattern}$'.format(
topic_id_pattern=TOPIC_ID_PATTERN,
course_id_pattern=settings.COURSE_ID_PATTERN,
),
TopicDetailView.as_view(),
name="topics_detail"
),
url(
r'^v0/team_membership/$',
MembershipListView.as_view(),
name="team_membership_list"
),
url(
r'^v0/team_membership/{team_id_pattern},{username_pattern}$'.format(
team_id_pattern=TEAM_ID_PATTERN,
username_pattern=settings.USERNAME_PATTERN,
),
MembershipDetailView.as_view(),
name="team_membership_detail"
)
)
|
miptliot/edx-platform
|
lms/djangoapps/teams/api_urls.py
|
Python
|
agpl-3.0
| 1,464
| 0
|
import unittest
from pytba import VERSION
from pytba import api as client
class TestApiMethods(unittest.TestCase):
def setUp(self):
client.set_api_key("WesJordan", "PyTBA-Unit-Test", VERSION)
def test__tba_get(self):
# Query with proper key should succeed
team = client.tba_get('team/frc2363')
self.assertEqual(team['key'], 'frc2363')
# Query with invalid key should fail
with self.assertRaises(TypeError):
client.tba_get('team/frc2363', app_id='invalid key')
def test__event_get(self):
event = client.event_get('2016tes')
self.assertEqual(len(event.teams), 75)
self.assertEqual(event.info['name'], 'Tesla Division')
self.assertEqual(len(event.matches), 140)
self.assertEqual(event.rankings[1][1], '2056')
def test__team_matches(self):
matches = client.team_matches('frc2363', 2016)
self.assertEqual(len(matches), 62)
self.assertEqual(matches[-1]['alliances']['opponent']['score'], 89)
if __name__ == '__main__':
unittest.main()
|
Ninjakow/TrueSkill
|
lib/pytba/test/api_test.py
|
Python
|
gpl-3.0
| 1,080
| 0.001852
|
input = """
a(1).
a(2) | a(3).
ok1 :- #max{V:a(V)} = 3.
b(3).
b(1) | b(2).
ok2 :- #max{V:b(V)} = 3.
"""
output = """
a(1).
a(2) | a(3).
ok1 :- #max{V:a(V)} = 3.
b(3).
b(1) | b(2).
ok2 :- #max{V:b(V)} = 3.
"""
|
veltri/DLV2
|
tests/parser/aggregates.max.propagation.7.test.py
|
Python
|
apache-2.0
| 213
| 0
|
'''
we all know the classic "guessing game" with higher or lower prompts. lets do a role reversal; you create a program that will guess numbers between 1-100, and respond appropriately based on whether users say that the number is too high or too low. Try to make a program that can guess your number based on user input and great code!
'''
import random
import numpy
got_answer = False
max = 100
min = 0
try_count = 0
while not got_answer:
try_count += 1
num = -1
while (num > 1) or (num < 0):
num = .125 * numpy.random.randn() + 0.5
print(num)
guess = int(((max - min) * num) + min)
print('1. Higher')
print('2. Correct!')
print('3. Lower')
print('\nIs your number {}'.format(guess))
response = input('> ')
if response == '2':
got_answer = True
if try_count > 1:
print('\nHurray! I guessed {} in {} tries!!!'.format(guess, try_count))
else:
print('\nHurray! I guessed {} in the first try!!! WOOHOO!'.format(guess, try_count))
elif response == '1':
min = guess + 1
elif response == '3':
max = guess - 1
if min > max:
got_answer = True
print('ERROR! ERROR! ERROR! Master did not answer the questions properly!')
|
DayGitH/Python-Challenges
|
DailyProgrammer/20120209C.py
|
Python
|
mit
| 1,271
| 0.004721
|
import sht21
with sht21.SHT21(1) as sht21:
print "temp: %s"%sht21.read_temperature()
print "humi: %s"%sht21.read_humidity()
|
BollMose/daynote
|
test_sht.py
|
Python
|
apache-2.0
| 132
| 0.015152
|
import os
import sys
from src import impl as rlcs
import utils as ut
import analysis as anls
import matplotlib.pyplot as plt
import logging
import pickle as pkl
import time
config = ut.loadConfig('config')
sylbSimFolder=config['sylbSimFolder']
transFolder=config['transFolder']
lblDir=config['lblDir']
onsDir=config['onsDir']
resultDir=config['resultDir']
queryList = [['DHE','RE','DHE','RE','KI','TA','TA','KI','NA','TA','TA','KI','TA','TA','KI','NA'],['TA','TA','KI','TA','TA','KI','TA','TA','KI','TA','TA','KI','TA','TA','KI','TA'], ['TA','KI','TA','TA','KI','TA','TA','KI'], ['TA','TA','KI','TA','TA','KI'], ['TA', 'TA','KI', 'TA'],['KI', 'TA', 'TA', 'KI'], ['TA','TA','KI','NA'], ['DHA','GE','TA','TA']]
queryLenCheck = [4,6,8,16]
for query in queryList:
if len(query) not in queryLenCheck:
print 'The query is not of correct length!!'
sys.exit()
masterData = ut.getAllSylbData(tPath = transFolder, lblDir = lblDir, onsDir = onsDir)
res = anls.getPatternsInTransInGTPos(masterData, queryList)
|
swapnilgt/percPatternDiscovery
|
rlcs/preAnalysisRun.py
|
Python
|
agpl-3.0
| 1,024
| 0.06543
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Abstraction for array data structures."""
from numbers import Integral
import tvm._ffi
from tvm._ffi.base import string_types
from tvm.runtime import Object, convert
from tvm.ir import PrimExpr, PointerType, PrimType
from . import _ffi_api
@tvm._ffi.register_object("tir.Buffer")
class Buffer(Object):
"""Symbolic data buffer in TVM.
Buffer provide a way to represent data layout
specialization of data structure in TVM.
Do not construct directly, use :py:func:`~decl_buffer` instead.
See the documentation of :py:func:`decl_buffer` for more details.
See Also
--------
decl_buffer : Declare a buffer
"""
READ = 1
WRITE = 2
def access_ptr(self, access_mask, ptr_type="handle", content_lanes=1, offset=0):
"""Get an access pointer to the head of buffer.
This is the recommended method to get buffer data
ptress when interacting with external functions.
Parameters
----------
access_mask : int
The access pattern MASK. Indicate whether the
access will read or write to the data content.
ptr_type : str, optional
The data type of the result pointer. Do not specify
unless we want to cast pointer to specific type.
content_lanes: int, optional
The number of lanes for the data type. This value
is greater than one for vector types.
offset: Expr, optional
The offset of pointer. We can use it to offset by
the number of elements from the address of ptr.
Examples
--------
.. code-block:: python
# Get access ptr for read
buffer.access_ptr("r")
# Get access ptr for read/write with bitmask
buffer.access_ptr(Buffer.READ | Buffer.WRITE)
# Get access ptr for read/write with str flag
buffer.access_ptr("rw")
# Get access ptr for read with offset
buffer.access_ptr("r", offset = 100)
"""
if isinstance(access_mask, string_types):
mask = 0
for value in access_mask:
if value == "r":
mask = mask | Buffer.READ
elif value == "w":
mask = mask | Buffer.WRITE
else:
raise ValueError("Unknown access_mask %s" % access_mask)
access_mask = mask
offset = convert(offset)
return _ffi_api.BufferAccessPtr(self, access_mask, ptr_type, content_lanes, offset)
def vload(self, begin, dtype=None):
"""Generate an Expr that loads dtype from begin index.
Parameters
----------
begin : Array of Expr
The beginning index in unit of Buffer.dtype
dtype : str
The data type to be loaded,
can be vector type which have lanes that is multiple of Buffer.dtype
Returns
-------
load : Expr
The corresponding load expression.
"""
begin = (begin,) if isinstance(begin, (int, PrimExpr)) else begin
dtype = dtype if dtype else self.dtype
return _ffi_api.BufferVLoad(self, begin, dtype)
def vstore(self, begin, value):
"""Generate a Stmt that store value into begin index.
Parameters
----------
begin : Array of Expr
The beginning index in unit of Buffer.dtype
value : Expr
The value to be stored.
Returns
-------
store : Stmt
The corresponding store stmt.
"""
begin = (begin,) if isinstance(begin, (int, PrimExpr)) else begin
return _ffi_api.BufferVStore(self, begin, value)
def decl_buffer(
shape,
dtype=None,
name="buffer",
data=None,
strides=None,
elem_offset=None,
scope="",
data_alignment=-1,
offset_factor=0,
buffer_type="",
):
"""Declare a new symbolic buffer.
Normally buffer is created automatically during lower and build.
This is only needed if user want to specify their own buffer layout.
See the note below for detailed discussion on usage of buffer.
Parameters
----------
shape : tuple of Expr
The shape of the buffer.
dtype : str, optional
The data type of the buffer.
name : str, optional
The name of the buffer.
data : Var, optional
The data pointer in the buffer.
strides: array of Expr
The stride of the buffer.
elem_offset: Expr, optional
The beginning offset of the array to data.
In terms of number of elements of dtype.
scope: str, optional
The storage scope of the buffer, if not global.
If scope equals empty string, it means it is global memory.
data_alignment: int, optional
The alignment of data pointer in bytes.
If -1 is passed, the alignment will be set to TVM's internal default.
offset_factor: int, optional
The factor of elem_offset field, when set,
elem_offset is required to be multiple of offset_factor.
If 0 is pssed, the alignment will be set to 1.
if non-zero is passed, we will created a Var for elem_offset if elem_offset is not None.
buffer_type: str, optional, {"", "auto_broadcast"}
auto_broadcast buffer allows one to implement broadcast computation
without considering whether dimension size equals to one.
TVM maps buffer[i][j][k] -> buffer[i][0][k] if dimension j's shape equals 1.
Returns
-------
buffer : Buffer
The created buffer
Example
-------
Here's an example of how broadcast buffer can be used to define a symbolic broadcast operation,
.. code-block:: python
m0, m1, m2 = te.var("m0"), te.var("m1"), te.var("m2")
n0, n1, n2 = te.var("n0"), te.var("n1"), te.var("n2")
o0, o1, o2 = te.var("o0"), te.var("o1"), te.var("o2")
A = te.placeholder((m0, m1, m2), name='A')
B = te.placeholder((n0, n1, n2), name='B')
C = te.compute((o0, o1, o2), lambda i, j, k: A[i, j, k] + B[i, j, k], name='C')
Ab = tvm.tir.decl_buffer(A.shape, A.dtype, name="Ab", buffer_type="auto_broadcast")
Bb = tvm.tir.decl_buffer(B.shape, B.dtype, name="Bb", buffer_type="auto_broadcast")
s = te.create_schedule(C.op)
fadd = tvm.build(s, [A, B, C], target='llvm', name='bcast_add', binds={A:Ab, B:Bb})
ctx = tvm.cpu(0)
a = tvm.nd.array(np.random.uniform(size=(2, 4, 3)).astype(A.dtype), ctx)
b = tvm.nd.array(np.random.uniform(size=(2, 1, 3)).astype(B.dtype), ctx)
c = tvm.nd.array(np.zeros((2, 4, 3), dtype=C.dtype), ctx)
fadd(a, b, c)
tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + b.asnumpy())
Note
----
Buffer data structure reflects the DLTensor structure in dlpack.
While DLTensor data structure is very general, it is usually helpful
to create function that only handles specific case of data structure
and make compiled function benefit from it.
If user pass strides and elem_offset is passed as None
when constructing the function, then the function will be specialized
for the DLTensor that is compact and aligned.
If user pass a fully generic symbolic array to the strides,
then the resulting function becomes fully generic.
"""
# pylint: disable=import-outside-toplevel
from .expr import Var
shape = (shape,) if isinstance(shape, (PrimExpr, Integral)) else shape
dtype = "float32" if dtype is None else dtype
strides = () if strides is None else strides
if offset_factor != 0 and elem_offset is None:
shape_dtype = shape[0].dtype if hasattr(shape[0], "dtype") else "int32"
elem_offset = Var("%s_elem_offset" % name, shape_dtype)
if data is None:
data = Var(name, PointerType(PrimType(dtype)))
return _ffi_api.Buffer(
data,
dtype,
shape,
strides,
elem_offset,
name,
scope,
data_alignment,
offset_factor,
buffer_type,
)
@tvm._ffi.register_object("tir.DataProducer")
class DataProducer(Object):
pass
|
tqchen/tvm
|
python/tvm/tir/buffer.py
|
Python
|
apache-2.0
| 9,009
| 0.001332
|
#-*- coding: utf-8 -*-
'''
Created on 23 mar 2014
@author: mariusz
@author: tomasz
'''
import unittest
from selearea import get_ast, get_workareas
class seleareaTest(unittest.TestCase):
def get_fc_pages(self):
urls = {
"http://fc.put.poznan.pl",
"http://fc.put.poznan.pl/rekrutacja/post-powanie-kwalifikacyjne%2C29.html",
"http://fc.put.poznan.pl/o-wydziale/witamy%2C39.html"
}
return [get_ast(url) for url in urls]
def get_fce_pages(self):
urls = {
"http://www.bis.put.poznan.pl/",
"http://www.bis.put.poznan.pl/evPages/show/id/182"
}
return [get_ast(url) for url in urls]
def get_identical_pages(self):
urls = {
"http://www.bis.put.poznan.pl/",
"http://www.bis.put.poznan.pl/"
}
return [get_ast(url) for url in urls]
def test_get_wrong_page(self):
url = "putpoznan.pl"
with self.assertRaises(ValueError):
get_ast(url)
def test_get_none_page(self):
with self.assertRaises(ValueError):
get_ast(None)
def test_get_workarea_identical_pages(self):
asts = self.get_identical_pages()
workareas = get_workareas(asts)
self.assertEqual(0, len(workareas), "AssertionFailed: work area found on identical pages.")
def test_get_ast_fc_count(self):
asts = self.get_fc_pages()
self.assertEqual(3, len(asts), "AssertionFailed: count for fc pages.")
def test_get_workarea_fc_content(self):
asts = self.get_fc_pages()
workareas = get_workareas(asts)
xpath = str("//html[@class='js']/body/div[@id='right']/div[@id='content']")
self.assertEqual(xpath, workareas[0], "AssertionFailed: xpaths for fc pages.")
def test_get_ast_fce_count(self):
asts = self.get_fce_pages()
self.assertEqual(2, len(asts), "AssertionFailed: count for fc pages.")
def test_get_workarea_fce_content(self):
asts = self.get_fce_pages()
workareas = get_workareas(asts)
xpath = str("//html/body/div[@id='main']/div/div[@id='left_menu']/div[@id='left_menu_box']")
self.assertEqual(xpath, workareas[1], "AssertionFailed: xpaths for fc pages.")
if __name__ == "__main__":
unittest.main()
|
perfidia/selearea
|
tests/seleareaTest.py
|
Python
|
mit
| 2,350
| 0.00383
|
"""
Copyright (c) 2011, 2012, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
@author Stephen Dawson-Haggerty <stevedh@eecs.berkeley.edu>
"""
import sys
import os
import logging
from twisted.python import log
# default configuration
SERVER = {
'port' : 8080,
}
LOGGING = {
# configure logging to sentry via raven
# 'raven': {
# 'dsn': 'twisted+http://a888206fd60f4307a7b1a880d1fe04fe:15ecf70787b0490880c712d8469459bd@localhost:9000/2'
# },
'console': {
'level': 'INFO'
}
}
# guess where the html might be...
try:
if not 'docroot' in SERVER:
path = os.path.dirname(sys.modules[__name__].__file__)
path = os.path.join(path, "data")
SERVER['docroot'] = path
except:
SERVER['docroot'] = None
class InverseFilter(logging.Filter):
def filter(self, record):
return not logging.Filter.filter(self, record)
def start_logging():
observer = log.PythonLoggingObserver()
observer.start()
for logtype, config in LOGGING.iteritems():
if logtype == "raven":
from raven.handlers.logging import SentryHandler
lvl = getattr(logging, config.get('level', 'info').upper())
handler = SentryHandler(config["dsn"])
handler.setLevel(lvl)
# don't try to log sentry errors with sentry
handler.addFilter(InverseFilter('sentry'))
logging.getLogger().addHandler(handler)
print "Starting sentry logging [%s] with destination %s"% (
config.get('level', 'info').upper(), config["dsn"])
elif logtype == 'console':
console = logging.StreamHandler()
lvl = getattr(logging, config.get('level', 'info').upper())
console.setLevel(lvl)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger().addHandler(console)
print "Starting console logging [%s]" % config.get('level', 'info').upper()
|
SoftwareDefinedBuildings/smap
|
python/smap/smapconf.py
|
Python
|
bsd-2-clause
| 3,347
| 0.008664
|
# -*- coding: utf-8 -*-
'''
Created on 17/2/16.
@author: love
'''
import paho.mqtt.client as mqtt
import json
import ssl
def on_connect(client, userdata, flags, rc):
print("Connected with result code %d"%rc)
client.publish("Login/HD_Login/1", json.dumps({"userName": user, "passWord": "Hello,anyone!"}),qos=0,retain=False)
def on_message(client, userdata, msg):
print ('---------------')
print ("topic :"+msg.topic)
print ("payload :"+msg.payload)
client.subscribe([("chat",2),("aaa",2)])
client.unsubscribe(["chat"])
#client.publish("login/addUser", json.dumps({"user": user, "say": "Hello,anyone!"}),qos=2,retain=False)
#print(msg.topic+":"+str(msg.payload.decode()))
#print(msg.topic+":"+msg.payload.decode())
#payload = json.loads(msg.payload.decode())
#print(payload.get("user")+":"+payload.get("say"))
def mylog(self,userdata,level, buf):
print buf
if __name__ == '__main__':
client = mqtt.Client(protocol=mqtt.MQTTv31)
client.username_pw_set("admin", "password") # 必须设置,否则会返回「Connected with result code 4」
client.on_connect = on_connect
client.on_message = on_message
#链接测试服务器 需要用tls请求 python tls功能比较弱。
# 需要一个证书,这里使用的这个网站提供的证书https://curl.haxx.se/docs/caextract.html
HOST = "mqant.com"
# client.tls_set(ca_certs="caextract.pem", certfile=None, keyfile=None, cert_reqs=ssl.CERT_REQUIRED,
# tls_version=ssl.PROTOCOL_TLSv1, ciphers=None)
client.connect(HOST, 3563, 60)
#client.loop_forever()
user = raw_input("请输入用户名:")
client.user_data_set(user)
client.loop_start()
while True:
s = raw_input("请先输入'join'加入房间,然后输入任意聊天字符:\n")
if s:
if s=="join":
client.publish("Chat/HD_JoinChat/2", json.dumps({"roomName": "mqant"}),qos=0,retain=False)
elif s=="start":
client.publish("Master/HD_Start_Process/2", json.dumps({"ProcessID": "001"}),qos=0,retain=False)
elif s=="stop":
client.publish("Master/HD_Stop_Process/2", json.dumps({"ProcessID": "001"}),qos=0,retain=False)
else:
client.publish("Chat/HD_Say/2", json.dumps({"roomName": "mqant","from":user,"target":"*","content": s}),qos=0,retain=False)
|
liangdas/mqantserver
|
client/mqtt_chat_client.py
|
Python
|
apache-2.0
| 2,416
| 0.021183
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.