repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
WQuanfeng/wagtail
|
wagtail/wagtailadmin/menu.py
|
Python
|
bsd-3-clause
| 4,777
| 0.002303
|
from __future__ import unicode_literals
from django.forms import MediaDefiningClass, Media
from django.forms.utils import flatatt
from django.utils.text import slugify
from django.utils.safestring import mark_safe
from django.utils.six import text_type
from django.utils.six import with_metaclass
from wagtail.utils.compat import render_to_string
from wagtail.wagtailcore import hooks
class MenuItem(with_metaclass(MediaDefiningClass)):
template = 'wagtailadmin/shared/menu_item.html'
def __init__(self, label, url, name=None, classnames='', attrs=None, order=1000):
self.label = label
self.url = url
self.classnames = classnames
self.name = (name or slugify(text_type(label)))
self.order = order
if attrs:
self.attr_string = flatatt(attrs)
else:
self.attr_string = ""
def is_shown(self, request):
"""
Whether this menu item should be shown for the given request; permission
checks etc should go here. By default, menu items are shown all the time
"""
return True
def is_active(self, request):
return request.path.startswith(self.url)
def render_html(self, request):
return render_to_string(self.template, {
'name': self.name,
'url': self.url,
'classnames': self.classnames,
'attr_string': self.attr_string,
'label': self.label,
'active': self.is_active(request)
}, request=request)
class Menu(object):
def __init__(self, register_hook_name, construct_hook_name=None):
self.register_hook_name = register_hook_name
self.construct_hook_name = construct_hook_name
# _registered_menu_items will be populated on first access to the
# registered_menu_items property. We can't populate it in __init__ because
# we can't rely on all hooks modules to have been imported at the point that
# we create the admin_menu and settings_menu instances
self._registered_menu_items = None
@property
def registered_menu_items(self):
if self._registered_menu_items is None:
self._registered_menu_items = [fn() for fn in hooks.get_hooks(self.register_hook_name)]
return self._registered_menu_items
def menu_items_for_request(self, request):
return [item for item in self.registered_menu_items if item.is_shown(request)]
def active_menu_items(self, request):
return [item for item in self.menu_items_for_request(request) if item.is_active(request)]
@property
def media(self):
media = Media()
for item in self.registered_menu_items:
media += item.media
return media
def render_html(self, request):
menu_items = self.menu_items_for_request(request)
# provide a hook for modifying the menu, if construct_hook_name has been set
if self.construct_hook_name:
for fn in hooks.get_hooks(self.construct_hook_name):
fn(request, menu_items)
rendered_menu_items = []
for item in sorted(menu_items, key=lambda i: i.order):
try:
rendered_menu_items.append(item.render_html(request))
except TypeError:
# fallback for older render_html methods that don't accept a request arg
rendered_menu_items.append(item.render_html(request))
return mark_safe(''.join(rendered_menu_items))
class SubmenuMenuItem(MenuItem):
template = 'wagtailadmin/shared/menu_submenu_item.html'
"""A MenuItem which wraps an inner Menu object"""
def __init__(self, label, menu, **kwargs):
self.menu = menu
super(SubmenuMenuItem, self).__init__(label, '#', **kwargs)
@property
def media(self):
return Media(js=['wagtailadmin/js/submenu.js']) + self.menu.media
def is_shown(self, request):
# show the submenu if one or more of its children is shown
return bool(self.menu.menu_items_for_request(request))
def is_active(self, request):
return bool(self.menu.active_menu_items(request))
def render_html(self, request):
return render_to_string(self.template, {
'name': self.name,
'u
|
rl'
|
: self.url,
'classnames': self.classnames,
'attr_string': self.attr_string,
'menu_html': self.menu.render_html(request),
'label': self.label,
'request': request,
'active': self.is_active(request)
}, request=request)
admin_menu = Menu(register_hook_name='register_admin_menu_item', construct_hook_name='construct_main_menu')
settings_menu = Menu(register_hook_name='register_settings_menu_item')
|
atruberg/django-custom
|
tests/model_validation/models.py
|
Python
|
bsd-3-clause
| 614
| 0
|
from django.db import models
class ThingItem(object):
def __init__(self, value, display):
self.value = value
self.display = display
def __iter__(self):
|
return (x for x in [self.value, self.display])
def __len__(self):
return 2
class Things(object):
def __iter__(self):
return (x for x in [ThingItem(1, 2), ThingItem(3, 4)])
class ThingWithIterableChoices(models.Model):
# Testing choices= Iterable of Iterables
# See: https://code.djangoproject.com/ticket/20430
thing = models.CharField(max_length=100, blank=True, choices=Things())
| |
hansomesong/First_LISP_measurement
|
LISP-Sonar.py
|
Python
|
gpl-2.0
| 9,113
| 0.006145
|
#!/usr/bin/python
#
# $Id: LISP-Sonar.py 18 2014-10-06 13:23:37Z ggx $
#
# -------------------------------Important Marks-------------------------------
# Surprisingly, we found that when manually executing the current Python script,
# the point symbol in file path is recognized,but not recognized when called via NEPI !!!
# Therefore, we should cover this issue replacing explicitly point symbol
# (in file path with current directory)
# Also, it is supposed to verify the existence of file path in JSON file.
# -------------------------------End of Important Marks-------------------------
#Library import
import subprocess
import socket
import os
import sys
import time
import random
import threading
import json
import Queue
import ipaddress
import resource
from jsoncomment import JsonComment
#Custom import
from SonarPulse import Pulse, PulseTarget
#-------------------------------------------------------------------
# Variables and Setting
#
#Error Exit Value
ERR = 1
Revision = "$Revision: 18 $"
# Define Default Configuration File
# Note that avoiding to use point symbol(meaning current directory in this context) in file
# path to assure the portability(we found Python scripts called by NEPI do not recognize this
# symbol)
# Refer to https://infohost.nmt.edu/tcc/help/pubs/python/web/new-str-format.html to
# get more information about Python string format's usage.
CURRENTDIR = os.path.dirname(os.path.realpath(__file__))+'/' # for example : /Users/qipengsong/Documents/First_LISP_measurement
ConfigFile = '{0}LISP-Sonar-Config.json'.format(CURRENTDIR)
#-------------------------------------------------------------------
# SubRoutines
#
######
# Logs Directory & Files Verification
#
def BootstrapFilesCheck(TimeStamp):
#Check if the root log directory exists, if not create it.
itexists = os.path.isdir(LogRootDirectory)
if itexists == False :
try:
os.makedirs(LogRootDirectory)
except os.error:
print '=====> Critical Error: Creating ' + LogRootDirectory
sys.exit(ERR)
print '\tRoot Log Dir. [Created]\t: ' + LogRootDirectory
else:
print '\tRoot Log Dir. [Found]\t: ' + LogRootDirectory
#Get Date to check/create date-based directory tree
rundate = time.gmtime(TimeStamp)
DateDirectory = str(rundate.tm_year) + '/' + str(rundate.tm_mon) + '/' + str(rundate.tm_mday) +'/'
#Check if the date-based sub-directory exists, if not create it.
itexists = os.path.isdir(LogRootDirectory + DateDirectory)
if itexists == False :
try:
os.makedirs(LogRootDirectory + DateDirectory)
except os.error:
print '=====> Critical Error: Creating ' + LogRootDirectory + DateDirectory
sys.exit(ERR)
print '\tDate Directory [Created]: ' + LogRootDirectory + DateDirectory
else:
print '\tDate Directory [Found]\t: ' + LogRootDirectory + DateDirectory
return LogRootDirectory + DateDirectory
######
# Read a list from file shuffle the order and return it
#
def LoadList(FILE):
try:
F = open( FILE, "r" )
except IOError:
print '=====> Critical Error:' + FILE + ' Not Found!!!'
sys.exit(ERR)
LLIST = F.read().split('\n')
F.close()
if LLIST.count('') > 0:
#If closing empty line exists remove it
LLIST.remove('')
# Randomize List so to not follow the same order at each experiment
random.shuffle(LLIST)
return LLIST
######
# Pulse Thread Class
#
class SonarThread (threading.Thread):
def __init__(self, threadID, tname, prqueue):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = tname
self.prqueue = prqueue
def run(self):
while True:
item = self.prqueue.get()
if item is None:
break # End Loop and finish thread
#print 'Thread ' + self.name + ' Working on: ' + str(item.eid) + '\n'
Evalue = Pulse(item)
if not (Evalue is None):
print '\tError \t(!)\t\t: ' + str(Evalue)
print >> sys.stderr, 'LISP-Sonar Error: ' + str(Evalue)
#-------------------------------------------------------------------
# Main
#
TimeStamp = int(time.time())
print 'LISP-Sonar \t\t\t: ' + Revision
print '\tRun \t\t\t: '+ time.strftime("%d.%m.%Y %H:%M:%S")
# Identify Machine and Date to Mark Logs
HOST = socket.gethostname()
print '\tHost Name \t\t: ' + HOST
# Read Configuration File
if (len(sys.argv) > 2):
print '=====> Exiting! Too many arguments... \n'
sys.exit(ERR)
if (len(sys.argv) == 2):
#Always take the first argument as configuration file
ConfigFile = str(sys.argv[1])
try:
JsonFile = open(ConfigFile)
except:
print '=====> Exiting! Error opening configuration file: '+ConfigFile+'\n'
sys.exit(ERR)
Cfg = json.load(JsonFile)
JsonFile.close()
try:
# Remember to replace "CURRENTDIR" with real current directory path
# for example, for item "DirsConfig"
# "DirsConfig":
# {
# "LogRootDirectory":"CURRENTDIR/SonarOutput/",
# "MRListDirectory":"CURRENTDIR",
# "MRListFile":"MR-Current-List.txt",
# "EIDListDirectory":"CURRENTDIR",
# "EIDListFile":"EID-Current-List.txt"
# },
# Replace "CURRENTDIR" with variable CURRENTDIR defined at the beginning
LogRootDirectory = Cfg["DirsConfig"]["LogRootDirectory"].replace("$CURRENTDIR", CURRENTDIR)
MRListDirectory = Cfg["DirsConfig"]["MRListDirectory"].replace("$CURRENTDIR", CURRENTDIR)
MRListFile = Cfg["DirsConfig"]["MRListFile"]
EIDListDirectory = Cfg
|
["DirsConfig"]["EIDListDirectory"].replace("$CURRENTDIR", CURRENTDIR)
EIDListFile = Cfg["DirsConfig"]["EIDListFile"]
SpawnTimeGap = Cfg["ThreadSpawn"]["TimeGap"]
SpawnRandomization = Cfg["ThreadS
|
pawn"]["Randomization"]
SpawnMaxThreads = Cfg["ThreadSpawn"]["MaxThreads"]
LIGRequestTimeOut = Cfg["Lig"]["TimeOut"]
LIGMaxRetries = Cfg["Lig"]["MaxTries"]
LIGSrcAddr = Cfg["Lig"]["SourceAddress"]
except KeyError:
print '=====> Exiting! Configuration Error for '+str(sys.exc_value)+' in file '+ConfigFile+'\n'
sys.exit(ERR)
# Final directory where results of this instance will be written
InstanceDirectory = BootstrapFilesCheck(TimeStamp)
#Load and shuffle list of Map-Resolvers
MRList = LoadList(MRListDirectory + MRListFile)
print '\tMR List File \t\t: ' + MRListDirectory + MRListFile
print '\tMR Loaded \t\t: ' + str(len(MRList))
#Load and shuffle list of EID to lookup
EIDList = LoadList(EIDListDirectory + EIDListFile)
print '\tEID List File \t\t: ' + EIDListDirectory + EIDListFile
print '\tEID Loaded \t\t: ' + str(len(EIDList))
# CHeck Valid Source Address
if (LIGSrcAddr != "None"):
try:
LIGSrcIP = ipaddress.ip_address(LIGSrcAddr)
except ValueError:
print 'Not Valid Source Address: ' + LIGSrcAddr
sys.exit(ERR)
else:
LIGSrcIP = None
print '\tQuery Source Address \t: ' + str(LIGSrcIP)
# Spawn sonar threads
threads = []
threadID = 1
resource.setrlimit(resource.RLIMIT_NOFILE,(SpawnMaxThreads*4+256, resource.getrlimit(resource.RLIMIT_NOFILE)[1]))
PulseRequestQueue = Queue.Queue(SpawnMaxThreads)
for t in range(SpawnMaxThreads):
# Create the pool of threads
tName = 'Sonar Thread ' + `threadID`
thread = SonarThread(threadID, tName, PulseRequestQueue)
thread.start()
threads.append(thread)
threadID += 1
print '\tThreads [Now Working]\t: ' + str(SpawnMaxThreads) + ' [' + str(SpawnTimeGap) + ' +/- ' + str(SpawnRandomization) + ']'
for EID in EIDList:
for MR in MRList:
# Validate Addresses
try:
EIDIP = ipaddress.ip_address(EID)
except ValueError:
print 'Not Valid EID address: ' + str(EID)
print >> sys.stderr, 'Not Valid EID address: ' + str(EID)
continue
try:
MRIP = ipaddress.ip_address(MR)
except ValueError:
print 'Not Valid MR address: ' + str(MR)
print >> sys.stderr, 'Not Valid MR address: ' + str(MR)
continue
# Put Metadata for
|
vorwerkc/pymatgen
|
pymatgen/io/abinit/tests/test_abiobjects.py
|
Python
|
mit
| 7,463
| 0.000804
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import warnings
from pymatgen.core.structure import Structure
from pymatgen.core.units import Ha_to_eV, bohr_to_ang
from pymatgen.io.abinit.abiobjects import *
from pymatgen.util.testing import PymatgenTest
class LatticeFromAbivarsTest(PymatgenTest):
def test_rprim_acell(self):
l1 = lattice_from_abivars(acell=3 * [10], rprim=np.eye(3))
self.assertAlmostEqual(l1.volume, bohr_to_ang ** 3 * 1000)
assert l1.angles == (90, 90, 90)
l2 = lattice_from_abivars(acell=3 * [10], angdeg=(90, 90, 90))
assert l1 == l2
l2 = lattice_from_abivars(acell=3 * [8], angdeg=(60, 60, 60))
abi_rprimd = (
np.reshape(
[
4.6188022,
0.0000000,
6.5319726,
-2.3094011,
4.0000000,
6.5319726,
-2.3094011,
-4.0000000,
6.5319726,
],
(3, 3),
)
* bohr_to_ang
)
self.assertArrayAlmostEqual(l2.matrix, abi_rprimd)
l3 = lattice_from_abivars(acell=[3, 6, 9], angdeg=(30, 40, 50))
abi_rprimd = (
np.reshape(
[
3.0000000,
0.0000000,
0.0000000,
3.8567257,
4.5962667,
0.0000000,
6.8944000,
4.3895544,
3.7681642,
],
(3, 3),
)
* bohr_to_ang
)
self.assertArrayAlmostEqual(l3.matrix, abi_rprimd)
with self.assertRaises(ValueError):
lattice_from_abivars(acell=[1, 1, 1], angdeg=(90, 90, 90), rprim=np.eye(3))
with self.assertRaises(ValueError):
lattice_from_abivars(acell=[1, 1, 1], angdeg=(-90, 90, 90))
def test_znucl_typat(self):
"""Test the order of typat and znucl in the Abinit input and enforce_typat, enforce_znucl."""
# Ga Ga1 1 0.33333333333333 0.666666666666667 0.500880 1.0
# Ga Ga2 1 0.66666666666667 0.333333333333333 0.000880 1.0
# N N3 1 0.333333333333333 0.666666666666667 0.124120 1.0
# N N4 1 0.666666666666667 0.333333333333333 0.624120 1.0
gan = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "abinit", "gan.cif"))
# By default, znucl is filled using the first new type found in sites.
def_vars = structure_to_abivars(gan)
def_znucl = def_vars["znucl"]
self.assertArrayEqual(def_znucl, [31, 7])
def_typat = def_vars["typat"]
self.assertArrayEqual(def_typat, [1, 1, 2, 2])
# But it's possible to enforce a particular value of typat and znucl.
enforce_znucl = [7, 31]
enforce_typat = [2, 2, 1, 1]
enf_vars = structure_to_abivars(gan, enforce_znucl=enforce_znucl, enforce_typat=enforce_typat)
self.assertArrayEqual(enf_vars["znucl"], enforce_znucl)
self.assertArrayEqual(enf_vars["typat"], enforce_typat)
self.assertArrayEqual(def_vars["xred"], enf_vars["xred"])
assert [s.symbol for s in species_by_znucl(gan)] == ["Ga", "N"]
for itype1, itype2 in zip(def_typat, enforce_typat):
assert def_znucl[itype1 - 1] == enforce_znucl[itype2 - 1]
with self.assertRaises(Exception):
structure_to_abivars(gan, enforce_znucl=enforce_znucl, enforce_typat=None)
class SpinModeTest(PymatgenTest):
def test_base(self):
polarized = SpinMode.as_spinmode("polarized")
other_polarized = SpinMode.as_spinmode("polarized")
unpolarized = SpinMode.as_spinmode("unpolarized")
polarized.to_abivars()
self.assertTrue(polarized is other_polarized)
self.assertTrue(polarized == other_polarized)
self.assertTrue(polarized != unpolarized)
# Test pickle
self.serialize_with_pickle(polarized)
# Test dict methods
self.assertMSONable(polarized)
self.assertMSONable(unpolarized)
class SmearingTest(PymatgenTest):
def test_base(self):
fd1ev = Smearing.as_smearing("fermi_dirac:1 eV")
fd1ev.to_abivars()
self.assertTrue(fd1ev)
same_fd = Smearing.as_smearing("fermi_dirac:" + str(1.0 / Ha_to_eV))
self.assertTrue(same_fd == fd1ev)
nosmear = Smearing.nosmearing()
assert nosmear == Smearing.as_smearing("nosmearing")
self.assertFalse(nosmear)
self.assertTrue(nosmear != fd1ev)
self.assertMSONable(nosmear)
new_fd1ev = Smearing.from_dict(fd1ev.as_dict())
self.assertTrue(new_fd1ev == fd1ev)
# Test pickle
self.serialize_with_pickle(fd1ev)
# Test dict methods
self.assertMSONable(fd1ev)
class ElectronsAlgorithmTest(PymatgenTest):
def test_base(self):
algo = ElectronsAlgorithm(nstep=70)
abivars = algo.to_abivars()
# Test pickle
self.serialize_with_pickle(algo)
# Test dict methods
self.assertMSONable(algo)
class ElectronsTest(PymatgenTest):
def test_base(self):
default_electrons = Electrons()
self.assertTrue(default_electrons.nsppol == 2)
self.assertTrue(default_electrons.nspinor == 1)
self.assertTrue(default_electrons.nspden == 2)
abivars = default_electrons.to_abivars()
# new = Electron.from_dict(default_electrons.as_dict())
# Test pickle
self.serialize_with_pickle(default_electrons, test_eq=False)
custom_electrons = Electrons(
spin_mode="unpolarized",
smearing="marzari4:0.2 eV",
algorithm=ElectronsAlgorithm(nstep=70),
nband=10,
charge=1.0,
comment="Test comment",
)
# Test dict methods
self.assertMSONable(custom_electrons)
class KSamplingTest(PymatgenTest):
def test_base(self):
monkhorst = KSampling.monkhorst((3, 3, 3), (0.5, 0.5, 0.5), 0, False, False)
gamma_centered = KSampling.gamma_centered((3, 3, 3), False, False)
monkhorst.to_abivars()
# Test dict methods
self.assertMSONable(monkhorst)
self.assertMSONable(gamma_centered)
class RelaxationTest(PymatgenTest):
def test_base(self):
atoms_and_cell = RelaxationMethod.atoms_and_cell()
atoms_only = RelaxationMethod.atoms_only()
|
atoms_and_cell.to_abivars()
# Test dict methods
self.assertMSONable(atoms_and_cell)
self.assertMSONable(atoms_only)
class PPModelTest(PymatgenTest):
def test_base(self):
godby = PPModel.as_ppmodel("godby:12 eV")
# print(godby)
# print(repr(godby))
godby.to_abivars()
self.assertTrue(godby)
same_godby = PPModel.as_ppmodel("godby:" + str(12.0 / Ha_to_eV))
self.assertTrue(same_godby == godby)
|
noppm = PPModel.get_noppmodel()
self.assertFalse(noppm)
self.assertTrue(noppm != godby)
new_godby = PPModel.from_dict(godby.as_dict())
self.assertTrue(new_godby == godby)
# Test pickle
self.serialize_with_pickle(godby)
# Test dict methods
self.assertMSONable(godby)
|
chienlieu2017/it_management
|
odoo/addons/point_of_sale/__manifest__.py
|
Python
|
gpl-3.0
| 2,501
| 0.0012
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICEN
|
SE file for full copyright and licensing details.
{
'name': 'Point of Sale',
'version': '1.0.1',
'category': 'Point Of Sale',
'sequence': 20,
'summary
|
': 'Touchscreen Interface for Shops',
'description': """
Quick and Easy sale process
===========================
This module allows you to manage your shop sales very easily with a fully web based touchscreen interface.
It is compatible with all PC tablets and the iPad, offering multiple payment methods.
Product selection can be done in several ways:
* Using a barcode reader
* Browsing through categories of products or via a text search.
Main Features
-------------
* Fast encoding of the sale
* Choose one payment method (the quick way) or split the payment between several payment methods
* Computation of the amount of money to return
* Create and confirm the picking list automatically
* Allows the user to create an invoice automatically
* Refund previous sales
""",
'depends': ['stock_account', 'barcodes'],
'data': [
'security/point_of_sale_security.xml',
'security/ir.model.access.csv',
'data/default_barcode_patterns.xml',
'wizard/pos_box.xml',
'wizard/pos_details.xml',
'wizard/pos_discount.xml',
'wizard/pos_open_statement.xml',
'wizard/pos_payment.xml',
'views/pos_templates.xml',
'views/point_of_sale_template.xml',
'views/point_of_sale_report.xml',
'views/point_of_sale_view.xml',
'views/pos_order_view.xml',
'views/product_view.xml',
'views/pos_category_view.xml',
'views/account_journal_view.xml',
'views/pos_config_view.xml',
'views/pos_session_view.xml',
'views/point_of_sale_sequence.xml',
'data/point_of_sale_data.xml',
'views/pos_order_report_view.xml',
'views/account_statement_view.xml',
'views/account_statement_report.xml',
'views/res_users_view.xml',
'views/res_partner_view.xml',
'views/res_config_view.xml',
'views/report_statement.xml',
'views/report_userlabel.xml',
'views/report_saledetails.xml',
'views/point_of_sale.xml',
'views/point_of_sale_dashboard.xml',
],
'demo': [
'data/point_of_sale_demo.xml',
],
'installable': True,
'application': True,
'qweb': ['static/src/xml/pos.xml'],
'website': 'https://www.odoo.com/page/point-of-sale',
}
|
zzz14/LOST-FOUND
|
wechat/wrapper.py
|
Python
|
gpl-3.0
| 10,069
| 0.002582
|
# -*- coding: utf-8 -*-
#
import datetime
import hashlib
import json
import logging
import random
import string
import time
import urllib.request
import xml.etree.ElementTree as ET
from LostAndFound.settings import WECHAT_TOKEN, WECHAT_APPID, WECHAT_SECRET
from django.http import Http404, HttpResponse
from django.template.loader import get_template
from LostAndFound import settings
from codex.baseview import BaseView
from wechat.models import Lost, Found, User
__author__ = "Epsirom"
class WeChatHandler(object):
logger = logging.getLogger('WeChat')
def __init__(self, view, msg, user):
"""
:type view: WeChatView
:type msg: dict
:type user: User or None
"""
self.input = msg
self.user = user
self.view = view
def check(self):
raise NotImplementedError('You should implement check() in sub-class of WeChatHandler')
def handle(self):
raise NotImplementedError('You should implement handle() in sub-class of WeChatHandler')
def get_context(self, **extras):
return dict(
FromUserName=self.input['ToUserName'],
ToUserName=self.input['FromUserName'],
**extras
)
def reply_text(self, content):
return get_template('text.xml').render(self.get_context(
Content=content
))
def reply_news(self, articles):
if len(articles) > 10:
self.logger.warn('Reply with %d articles, keep only 10', len(articles))
return get_template('news.xml').render(self.get_context(
Articles=articles[:10]
))
|
def reply_single_news(self, article):
return self.reply_news([article])
def get_message(self, name, **data):
if name.endswith('.html'):
name = name[: -5]
return get_template('
|
messages/' + name + '.html').render(dict(
handler=self, user=self.user, **data
))
def is_msg_type(self, check_type):
return self.input['MsgType'] == check_type
def is_text(self, *texts):
return self.is_msg_type('text') and (self.input['Content'].lower() in texts)
def is_event_click(self, *event_keys):
return self.is_msg_type('event') and (self.input['Event'] == 'CLICK') and (self.input['EventKey'] in event_keys)
def is_event(self, *events):
return self.is_msg_type('event') and (self.input['Event'] in events)
def is_text_command(self, *commands):
return self.is_msg_type('text') and ((self.input['Content'].split() or [None])[0] in commands)
def url_help(self):
return settings.get_url('u/help')
def url_lost_list(self):
return settings.get_url('u/lost/list', {'user': self.user.open_id})
def url_lost_new(self):
return settings.get_url('u/lost/new', {'user': self.user.open_id})
def url_found_list(self):
return settings.get_url('u/found/list', {'user': self.user.open_id})
def url_mine(self):
return settings.get_url('u/mine',{'user':self.user.open_id})
class WeChatEmptyHandler(WeChatHandler):
def check(self):
return True
def handle(self):
return self.reply_text('The server is busy')
class WeChatError(Exception):
def __init__(self, errcode, errmsg, *args, **kwargs):
super(WeChatError, self).__init__(errmsg, *args, **kwargs)
self.errcode = errcode
self.errmsg = errmsg
def __repr__(self):
return '[errcode=%d] %s' % (self.errcode, self.errmsg)
class Sign:
def __init__(self, jsapi_ticket, url):
self.ret = {
'jsapi_ticket': jsapi_ticket,
'nonceStr': self.__create_nonce_str(),
'timestamp': self.__create_timestamp(),
'url': url
}
def __create_nonce_str(self):
return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(15))
def __create_timestamp(self):
return int(time.time())
def sign(self):
string = '&'.join(['%s=%s' % (key.lower(), self.ret[key]) for key in sorted(self.ret)])
print(string)
self.ret['signature'] = hashlib.sha1(string.encode('utf-8')).hexdigest()
return self.ret
class WeChatLib(object):
logger = logging.getLogger('wechatlib')
access_token = ''
access_token_expire = datetime.datetime.fromtimestamp(0)
jsapi_ticket = ''
jsapi_ticket_expire = datetime.datetime.fromtimestamp(0)
token = WECHAT_TOKEN
appid = WECHAT_APPID
secret = WECHAT_SECRET
def __init__(self, token, appid, secret):
super(WeChatLib, self).__init__()
self.token = token
self.appid = appid
self.secret = secret
def check_signature(self, signature, timestamp, nonce):
tmp_list = sorted([self.token, timestamp, nonce])
tmpstr = hashlib.sha1(''.join(tmp_list).encode('utf-8')).hexdigest()
return tmpstr == signature
@classmethod
def _http_get(cls, url):
req = urllib.request.Request(url=url)
res_data = urllib.request.urlopen(req)
res = res_data.read()
return res.decode()
@classmethod
def _http_post(cls, url, data):
req = urllib.request.Request(
url=url, data=data if isinstance(data, bytes) else data.encode()
)
res_data = urllib.request.urlopen(req)
res = res_data.read()
return res.decode()
@classmethod
def _http_post_dict(cls, url, data):
return cls._http_post(url, json.dumps(data, ensure_ascii=False))
@classmethod
def get_wechat_access_token(cls):
if datetime.datetime.now() >= cls.access_token_expire:
print("appid=%s secret=%s" %(cls.appid, cls.secret))
res = cls._http_get(
'https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&appid=%s&secret=%s' % (
cls.appid, cls.secret
)
)
rjson = json.loads(res)
if rjson.get('errcode'):
raise WeChatError(rjson['errcode'], rjson['errmsg'])
cls.access_token = rjson['access_token']
cls.access_token_expire = datetime.datetime.now() + datetime.timedelta(seconds=rjson['expires_in'] - 300)
cls.logger.info('Got access token %s', cls.access_token)
return cls.access_token
@classmethod
def get_wechat_jsapi_ticket(cls):
if datetime.datetime.now() >= cls.jsapi_ticket_expire:
at = cls.get_wechat_access_token()
print("access token=%s" %(at))
res = cls._http_get(
'https://api.weixin.qq.com/cgi-bin/ticket/getticket?access_token=%s&type=jsapi' % (at)
)
rjson = json.loads(res)
if rjson.get('errcode'):
raise WeChatError(rjson['errcode'], rjson['errmsg'])
cls.jsapi_ticket = rjson['ticket']
cls.jsapi_ticket_expire = datetime.datetime.now() + datetime.timedelta(seconds=rjson['expires_in'] - 300)
cls.logger.info('Got jsapi ticket %s', cls.jsapi_ticket)
return cls.jsapi_ticket
@classmethod
def get_wechat_wx_config(cls, url):
sign = Sign(cls.get_wechat_jsapi_ticket(), url)
config = sign.sign()
wx_config = {
'appId': settings.WECHAT_APPID,
'timestamp': config['timestamp'],
'nonceStr': config['nonceStr'],
'signature': config['signature']
}
return wx_config
def get_wechat_menu(self):
res = self._http_get(
'https://api.weixin.qq.com/cgi-bin/menu/get?access_token=%s' % (
self.get_wechat_access_token()
)
)
rjson = json.loads(res)
return rjson.get('menu', {}).get('button', [])
def set_wechat_menu(self, data):
res = self._http_post_dict(
'https://api.weixin.qq.com/cgi-bin/menu/create?access_token=%s' % (
self.get_wechat_access_token()
), data
)
rjson = json.loads(res)
if rjson.get('errcode'):
raise WeChatError(rjson['errcode'], rjson['errmsg'])
c
|
felix-dumit/campusbot
|
yowsup2/yowsup/layers/protocol_ib/layer.py
|
Python
|
mit
| 861
| 0.002323
|
from yowsup.layers import YowLayer, YowLayerEvent, YowProtocolLayer
from .protocolentities import *
class YowIbProtocolLayer(YowProtocolLayer):
def __init__(self):
handleMap = {
"ib": (self.recvI
|
b, self.sendIb),
"iq": (None, self.sendIb)
}
super(YowIbProtocolLayer, self).__init__(handleMap)
def __str__(self):
return "Ib Layer"
def sendIb(self, entity):
if enti
|
ty.__class__ == CleanIqProtocolEntity:
self.toLower(entity.toProtocolTreeNode())
def recvIb(self, node):
if node.getChild("dirty"):
self.toUpper(DirtyIbProtocolEntity.fromProtocolTreeNode(node))
elif node.getChild("offline"):
self.toUpper(OfflineIbProtocolEntity.fromProtocolTreeNode(node))
else:
raise ValueError("Unkown ib node %s" % node)
|
nesdis/djongo
|
tests/django_tests/tests/v22/tests/admin_scripts/app_waiting_migration/migrations/0001_initial.py
|
Python
|
agpl-3.0
| 434
| 0.002304
|
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
|
name='Bar',
|
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
],
),
]
|
johntellsall/shotglass
|
dec/test_release.py
|
Python
|
mit
| 380
| 0
|
import datetime
import release
def test_release():
rel = release.Release("mysql-3.23.22-beta", "1234-05-06")
print(vars(rel))
assert vars(rel) == {
"raw_label": "mysql-3.23.22-beta",
"raw_date": "1234-05-06",
"majormin": "3.23",
"pre": "mysql-",
"pos
|
t": ".22-beta",
"date": datetime.datetime(1234, 5, 6, 0, 0),
}
|
|
lycheng/leetcode
|
others/count_primes.py
|
Python
|
mit
| 488
| 0
|
# -*- coding: utf-8 -*-
class Solution(object):
''' https://leetcode.com/problems/c
|
ount-primes/
'''
def countPrimes(self, n):
if n <= 2:
return 0
is_prime = [True] * n
ret = 0
for i in range(2, n):
if not is_prime[i]:
continue
ret += 1
for m in range(2, n):
if i * m >= n:
|
continue
is_prime[i*m] = False
return ret
|
cornell-brg/pymtl
|
pymtl/tools/integration/systemc_tests/sequential/RegIncrSC.py
|
Python
|
bsd-3-clause
| 446
| 0.049327
|
#======================================================
|
=================
# RegIncrSC.py
#=======================================================================
from pymtl import *
class RegIncrSC( SystemCModel ):
sclinetrace = True
def __init__( s ):
s.in_ = InPort ( Bits(32) )
s.out = OutPort( Bits(32) )
s.set_ports({
"clk" : s.clk,
"rst" : s.reset,
"in_" : s.in_
|
,
"out" : s.out,
})
|
nacc/autotest
|
client/tests/btreplay/btreplay.py
|
Python
|
gpl-2.0
| 4,454
| 0.003368
|
import time, os
from autotest.client import test, os_dep, utils
from autotest.client.shared import error
class btreplay(test.test):
version = 1
# http://brick.kernel.dk/snaps/blktrace-git-latest.tar.gz
def setup(self, tarball = 'blktrace-git-latest.tar.gz'):
tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir)
utils.extract_tarball_to_dir(tarball, self.srcdir)
self.job.setup_dep(['libaio'])
libs = '-L' + self.autodir + '/deps/libaio/lib -laio'
cflags = '-I ' + self.autodir + '/deps/libaio/include'
var_libs = 'LIBS="' + libs + '"'
var_cflags = 'CFLAGS="' + cflags + '"'
self.make_flags = var_libs + ' ' + var_cflags
os.chdir(self.srcdir)
utils.system('patch -p1 < ../Makefile.patch')
utils.system(self.make_flags + ' make')
def initialize(self):
self.job.require_gcc()
self.ldlib = 'LD_LIBRARY_PATH=%s/deps/libaio/lib'%(self.autodir)
self.results = []
def run_once(self, dev="", devices="", extra_args='', tmpdir=None):
# @dev: The device against which the trace will be replayed.
# e.g. "sdb" or "md_d1"
# @devices: A space-separated list of the underlying devices
# which make up dev, e.g. "sdb sdc". You only need to set
# devices if dev is an MD, LVM, or similar device;
# otherwise leave it as an empty string.
if not tmpdir:
tmpdir = self.tmpdir
os.chdir(self.srcdir)
alldevs = "-d /dev/" + dev
alldnames = dev
for d in devices.split():
alldevs += " -d /dev/" + d
alldnames += " " + d
# convert the trace (assumed to be in this test's base
# directory) into btreplay's required format
#
# TODO: The test currently halts here as there is no trace in the
# test's base directory.
cmd = "./btreplay/btrecord -d .. -D %s %s" % (tmpdir, dev)
self.results.append(utils.system_output(cmd, retain_output=True))
# time a replay that omits "thinktime" between requests
# (by use of the -N flag)
cmd = self.ldlib + " /usr/bin/time ./btreplay/btreplay -d "+\
tmpdir+" -N -W "+dev+" "+extra_args+" 2>&1"
self.results.append(utils.system_output(cmd, retain_output=True))
# trace a replay that reproduces inter-request delays, and
# analyse the trace with btt to determine the average request
# completion latency
utils.system("./blktrace -D %s %s >/dev/null &" % (tmpdir, alldevs))
cmd = self.ldlib + " ./btreplay/btreplay -d %s -W %s %s" %\
(tmpdir, dev, extra_args)
self.results.append(utils.system_output(cmd, retain_output=True))
utils.system("killall -INT blktrace")
# wait until blktrace is really done
slept = 0.0
while utils.system("ps -C blktrace > /dev/null",
ignore_status=True) == 0:
time.sleep(0.1)
slept += 0.1
if slept > 30.0:
utils.system("killall -9 blktrace")
raise error.TestError("blktrace failed to exit in 30 seconds")
utils.system("./blkparse -q -D %s -d %s/trace.bin -O %s >/dev/null" %
(tmpdir, tmpdir, alldnames))
cmd = "./btt/btt -i %s/trace.bin" % tmpdir
self.results.append(utils.system_output(cmd, retain_output=True))
def postprocess(self):
for n in range(len(self.results)):
if self.results[n].strip() == "==================== All Devices ====================":
words = self.results[n-2].split()
s = words[1
|
].strip('sytem').split(':')
e = words[2].strip('elapsd').split(':')
break
systime = 0.0
for n in range(len(s)):
i = (len(s)-1) - n
systime += float(s[i]) * (60**n)
elapsed = 0.0
for n in range(len(e)):
i = (len(e)-1) - n
elapsed += float(e[i]) * (60**n)
q2c = 0.0
for line in self.results:
words =
|
line.split()
if len(words) < 3:
continue
if words[0] == 'Q2C':
q2c = float(words[2])
break
self.write_perf_keyval({'time':elapsed, 'systime':systime,
'avg_q2c_latency':q2c})
|
lidan-fnst/samba
|
source4/torture/drs/python/replica_sync.py
|
Python
|
gpl-3.0
| 36,840
| 0.004642
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Tests various schema replication scenarios
#
# Copyright (C) Kamen Mazdrashki <kamenim@samba.org> 2011
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# Usage:
# export DC1=dc1_dns_name
# export DC2=dc2_dns_name
# export SUBUNITRUN=$samba4srcdir/scripting/bin/subunitrun
# PYTHONPATH="$PYTHONPATH:$samba4srcdir/torture/drs/python" $SUBUNITRUN replica_sync -U"$DOMAIN/$DC_USERNAME"%"$DC_PASSWORD"
#
import drs_base
import samba.tests
import time
import ldb
from ldb import (
SCOPE_BASE, LdbError, ERR_NO_SUCH_OBJECT)
class DrsReplicaSyncTestCase(drs_base.DrsBaseTestCase):
"""Intended as a black box test case for DsReplicaSync
implementation. It should test the behavior of this
case in cases when inbound replication is disabled"""
def setUp(self):
super(DrsReplicaSyncTestCase, self).setUp()
self.ou1 = None
self.ou2 = None
def tearDown(self):
self._cleanup_object(self.ou1)
self._cleanup_object(self.ou2)
# re-enable replication
self._enable_inbound_repl(self.dnsname_dc1)
self._enable_inbound_repl(self.dnsname_dc2)
super(DrsReplicaSyncTestCase, self).tearDown()
def _cleanup_object(self, guid):
"""Cleans up a test object, if it still exists"""
if guid is not None:
try:
self.ldb_dc2.delete('<GUID=%s>' % guid, ["tree_delete:1"])
except LdbError, (num, _):
self.assertEquals(num, ERR_NO_SUCH_OBJECT)
try:
self.ldb_dc1.delete('<GUID=%s>' % guid, ["tree_delete:1"])
except LdbError, (num, _):
self.assertEquals(num, ERR_NO_SUCH_OBJECT)
def test_ReplEnabled(self):
"""Tests we can replicate when replication is enabled"""
self._enable_inbound_repl(self.dnsname_dc1)
self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=False)
def test_ReplDisabled(self):
"""Tests we cann't replicate when replication is disabled"""
self._disable_inbound_repl(self.dnsname_dc1)
ccache_name = self.get_creds_ccache_name()
# Tunnel the command line credentials down to the
# subcommand to avoid a new kinit
cmdline_auth = "--krb5-ccache=%s" % ccache_name
# bin/samba-tool drs <drs_command> <cmdline_auth>
cmd_list = ["drs", "replicate", cmdline_auth]
nc_dn = self.domain_dn
# bin/samba-tool drs replicate <Dest_DC_NAME> <Src_DC_NAME> <Naming Context>
cmd_list += [self.dnsname_dc1, self.dnsname_dc2, nc_dn]
(result, out, err) = self.runsubcmd(*cmd_list)
self.assertCmdFail(result)
self.assertTrue('WERR_DS_DRA_SINK_DISABLED' in err)
def test_ReplDisabledForced(self):
"""Tests we can force replicate when replication is disabled"""
self._disable_inbound_repl(self.dnsname_dc1)
out = self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
def test_ReplLocal(self):
"""Tests we can replicate direct to the local db"""
self._enable_inbound_repl(self.dnsname_dc1)
self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=False, local=True, full_sync=True)
def _create_ou(self, samdb, name):
ldif = """
dn: %s,%s
objectClass: organizationalUnit
""" % (name, self.domain_dn)
samdb.add_ldif(ldif)
res = samdb.search(base="%s,%s" % (name, self.domain_dn),
scope=SCOPE_BASE, attrs=["objectGUID"])
return self._GUID_string(res[0]["objectGUID"][0])
def _check_deleted(self, sam_ldb, guid):
# search the user by guid as it may be deleted
res = sam_ldb.search(base='<GUID=%s>' % guid,
controls=["show_deleted:1"],
attrs=["isDeleted", "objectCategory", "ou"])
self.assertEquals(len(res), 1)
ou_cur = res[0]
# Deleted Object base DN
dodn = self._deleted_objects_dn(sam_ldb)
# now check properties of the user
name_cur = ou_cur["ou"][0]
self.assertEquals(ou_cur["isDeleted"][0],"TRUE")
self.assertTrue(not("objectCategory" in ou_cur))
self.assertTrue(dodn in str(ou_cur["dn"]),
"OU %s is deleted but it is not located under %s!" % (name_cur, dodn))
def test_ReplConflictsFullSync(self):
"""Tests that objects created in conflict become conflict DNs (honour full sync override)"""
# First confirm local replication (so when we test against windows, this fails fast without creating objects)
self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, local=True, forced=True, full_sync=True)
self._disable_inbound_repl(self.dnsname_dc1)
self._disable_inbound_repl(self.dnsname_dc2)
# Create conflicting objects on DC1 and DC2, with DC1 object created first
self.ou1 = self._create_ou(self.ldb_dc1, "OU=Test Full Sync")
# We have to sleep to ensure that the two objects have different timestamps
time.sleep(1)
self.ou2 = self._create_ou(self.ldb_dc2, "OU=Test Full Sync")
self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, local=True, forced=True, full_sync=True)
# Check that DC2 got the DC1 object, and OU1 was make into conflict
|
res1 = self.ldb_dc2.search(base="<GUID
|
=%s>" % self.ou1,
scope=SCOPE_BASE, attrs=["name"])
res2 = self.ldb_dc2.search(base="<GUID=%s>" % self.ou2,
scope=SCOPE_BASE, attrs=["name"])
print res1[0]["name"][0]
print res2[0]["name"][0]
self.assertFalse('CNF:%s' % self.ou2 in str(res2[0]["name"][0]))
self.assertTrue('CNF:%s' % self.ou1 in str(res1[0]["name"][0]))
self.assertTrue(self._lost_and_found_dn(self.ldb_dc2, self.domain_dn) not in str(res1[0].dn))
self.assertTrue(self._lost_and_found_dn(self.ldb_dc2, self.domain_dn) not in str(res2[0].dn))
self.assertEqual(str(res1[0]["name"][0]), res1[0].dn.get_rdn_value())
self.assertEqual(str(res2[0]["name"][0]), res2[0].dn.get_rdn_value())
# Delete both objects by GUID on DC2
self.ldb_dc2.delete('<GUID=%s>' % self.ou1)
self.ldb_dc2.delete('<GUID=%s>' % self.ou2)
self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=True)
self._check_deleted(self.ldb_dc1, self.ou1)
self._check_deleted(self.ldb_dc1, self.ou2)
# Check deleted on DC2
self._check_deleted(self.ldb_dc2, self.ou1)
self._check_deleted(self.ldb_dc2, self.ou2)
def test_ReplConflictsRemoteWin(self):
"""Tests that objects created in conflict become conflict DNs"""
self._disable_inbound_repl(self.dnsname_dc1)
self._disable_inbound_repl(self.dnsname_dc2)
# Create conflicting objects on DC1 and DC2, with DC1 object created first
self.ou1 = self._create_ou(self.ldb_dc1, "OU=Test Remote Conflict")
# We have to sleep to ensure that the two objects have different timestamps
time.sleep(1)
self.ou2 = self._create_ou(self.ldb_dc2, "OU=Test Remote Conflict")
self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
# Check that DC2 got the DC1 object, and OU1 was make into conflict
res1 = self.ldb_dc1.search(base="<GUID=%s>" %
|
ktan2020/legacy-automation
|
win/Lib/ntpath.py
|
Python
|
mit
| 18,605
| 0.002634
|
# Module 'ntpath' -- common operations on WinNT/Win95 pathnames
"""Common pathname manipulations, WindowsNT/95 version.
Instead of importing this module directly, import os and refer to this
module as os.path.
"""
import os
import sys
import stat
import genericpath
import warnings
from genericpath import *
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","getctime", "islink","exists","lexists","isdir","isfile",
"ismount","walk","expanduser","expandvars","normpath","abspath",
"splitunc","curdir","pardir","sep","pathsep","defpath","altsep",
"extsep","devnull","realpath","supports_unicode_filenames","relpath"]
# strings representing various path-related bits and pieces
curdir = '.'
pardir = '..'
extsep = '.'
sep = '\\'
pathsep = ';'
altsep = '/'
defpath = '.;C:\\bin'
if 'ce' in sys.builtin_module_names:
defpath = '\\Windows'
elif 'os2' in sys.builtin_module_names:
# OS/2 w/ VACPP
altsep = '/'
devnull = 'nul'
# Normalize the case of a pathname and map slashes to backslashes.
# Other normalizations (such as optimizing '../' away) are not done
# (this is done by normpath).
def normcase(s):
"""Normalize case of
|
pathname.
|
Makes all characters lowercase and all slashes into backslashes."""
return s.replace("/", "\\").lower()
# Return whether a path is absolute.
# Trivial in Posix, harder on the Mac or MS-DOS.
# For DOS it is absolute if it starts with a slash or backslash (current
# volume), or if a pathname after the volume letter and colon / UNC resource
# starts with a slash or backslash.
def isabs(s):
"""Test whether a path is absolute"""
s = splitdrive(s)[1]
return s != '' and s[:1] in '/\\'
# Join two (or more) paths.
def join(a, *p):
"""Join two or more pathname components, inserting "\\" as needed.
If any component is an absolute path, all previous path components
will be discarded."""
path = a
for b in p:
b_wins = 0 # set to 1 iff b makes path irrelevant
if path == "":
b_wins = 1
elif isabs(b):
# This probably wipes out path so far. However, it's more
# complicated if path begins with a drive letter:
# 1. join('c:', '/a') == 'c:/a'
# 2. join('c:/', '/a') == 'c:/a'
# But
# 3. join('c:/a', '/b') == '/b'
# 4. join('c:', 'd:/') = 'd:/'
# 5. join('c:/', 'd:/') = 'd:/'
if path[1:2] != ":" or b[1:2] == ":":
# Path doesn't start with a drive letter, or cases 4 and 5.
b_wins = 1
# Else path has a drive letter, and b doesn't but is absolute.
elif len(path) > 3 or (len(path) == 3 and
path[-1] not in "/\\"):
# case 3
b_wins = 1
if b_wins:
path = b
else:
# Join, and ensure there's a separator.
assert len(path) > 0
if path[-1] in "/\\":
if b and b[0] in "/\\":
path += b[1:]
else:
path += b
elif path[-1] == ":":
path += b
elif b:
if b[0] in "/\\":
path += b
else:
path += "\\" + b
else:
# path is not empty and does not end with a backslash,
# but b is empty; since, e.g., split('a/') produces
# ('a', ''), it's best if join() adds a backslash in
# this case.
path += '\\'
return path
# Split a path in a drive specification (a drive letter followed by a
# colon) and the path specification.
# It is always true that drivespec + pathspec == p
def splitdrive(p):
"""Split a pathname into drive and path specifiers. Returns a 2-tuple
"(drive,path)"; either part may be empty"""
if p[1:2] == ':':
return p[0:2], p[2:]
return '', p
# Parse UNC paths
def splitunc(p):
"""Split a pathname into UNC mount point and relative path specifiers.
Return a 2-tuple (unc, rest); either part may be empty.
If unc is not empty, it has the form '//host/mount' (or similar
using backslashes). unc+rest is always the input path.
Paths containing drive letters never have an UNC part.
"""
if p[1:2] == ':':
return '', p # Drive letter present
firstTwo = p[0:2]
if firstTwo == '//' or firstTwo == '\\\\':
# is a UNC path:
# vvvvvvvvvvvvvvvvvvvv equivalent to drive letter
# \\machine\mountpoint\directories...
# directory ^^^^^^^^^^^^^^^
normp = normcase(p)
index = normp.find('\\', 2)
if index == -1:
##raise RuntimeError, 'illegal UNC path: "' + p + '"'
return ("", p)
index = normp.find('\\', index + 1)
if index == -1:
index = len(p)
return p[:index], p[index:]
return '', p
# Split a path in head (everything up to the last '/') and tail (the
# rest). After the trailing '/' is stripped, the invariant
# join(head, tail) == p holds.
# The resulting head won't end in '/' unless it is the root.
def split(p):
"""Split a pathname.
Return tuple (head, tail) where tail is everything after the final slash.
Either part may be empty."""
d, p = splitdrive(p)
# set i to index beyond p's last slash
i = len(p)
while i and p[i-1] not in '/\\':
i = i - 1
head, tail = p[:i], p[i:] # now tail has no slashes
# remove trailing slashes from head, unless it's all slashes
head2 = head
while head2 and head2[-1] in '/\\':
head2 = head2[:-1]
head = head2 or head
return d + head, tail
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
def splitext(p):
return genericpath._splitext(p, sep, altsep, extsep)
splitext.__doc__ = genericpath._splitext.__doc__
# Return the tail (basename) part of a path.
def basename(p):
"""Returns the final component of a pathname"""
return split(p)[1]
# Return the head (dirname) part of a path.
def dirname(p):
"""Returns the directory component of a pathname"""
return split(p)[0]
# Is a path a symbolic link?
# This will always return false on systems where posix.lstat doesn't exist.
def islink(path):
"""Test for symbolic link.
On WindowsNT/95 and OS/2 always returns false
"""
return False
# alias exists to lexists
lexists = exists
# Is a path a mount point? Either a root (with or without drive letter)
# or an UNC path with at most a / or \ after the mount point.
def ismount(path):
"""Test whether a path is a mount point (defined as root of drive)"""
unc, rest = splitunc(path)
if unc:
return rest in ("", "/", "\\")
p = splitdrive(path)[1]
return len(p) == 1 and p[0] in '/\\'
# Directory tree walk.
# For each directory under top (including top itself, but excluding
# '.' and '..'), func(arg, dirname, filenames) is called, where
# dirname is the name of the directory and filenames is the list
# of files (and subdirectories etc.) in the directory.
# The func may modify the filenames list, to implement a filter,
# or to impose a different order of visiting.
def walk(top, func, arg):
"""Directory tree walk with callback function.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), call func(arg, dirname, fnames).
dirname is the name of the directory, and fnames a list of the names of
the files and subdirectories in dirname (excluding '.' and
|
richardcardona/learnpython-exercises
|
dirhelp.py
|
Python
|
apache-2.0
| 75
| 0.013333
|
#!/usr/bin/python
import ur
|
llib
print dir(urllib)
hel
|
p(urllib.urlopen)
|
ilpianista/ansible
|
test/support/integration/plugins/modules/azure_rm_mariadbdatabase_info.py
|
Python
|
gpl-3.0
| 6,304
| 0.001904
|
#!/usr/bin/python
#
# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
# Copyright (c) 2019 Matti Ranta, (@techknowlogick)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_mariadbdatabase_info
version_added: "2.9"
short_description: Get Azure MariaDB Database facts
description:
- Get facts of MariaDB Database.
options:
resource_group:
description:
- The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
required: True
type: str
server_name:
description:
- The name of the server.
required: True
type: str
name:
description:
- The name of the database.
type: str
extends_documentation_fragment:
- azure
author:
- Zim Kalinowski (@zikalino)
- Matti Ranta (@techknowlogick)
'''
EXAMPLES = '''
- name: Get instance of MariaDB Database
azure_rm_mariadbdatabase_info:
resource_group: myResourceGroup
server_name: server_name
name: database_name
- name: List instances of MariaDB Database
azure_rm_mariadbdatabase_info:
resource_group: myResourceGroup
server_name: server_name
'''
RETURN = '''
databases:
description:
- A list of dictionaries containing facts for MariaDB Databases.
returned: always
type: complex
contains:
id:
description:
- Resource ID.
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/testser
ver/databases/db1"
resource_group:
description:
- Resource group name.
returned: always
type: str
sample: testrg
server_name:
description:
- Server name.
returned: always
type: str
sample: testserver
name:
description:
- Resource name.
returned: always
type: str
sample: db1
charset:
description:
- The charset of the database.
returned: always
type: str
sample: UTF8
collation:
description:
- The collation of the database.
returned: always
type: str
sample: English_United States.1252
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.rdbms.mariadb import MariaDBManagementClient
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class AzureRMMariaDbDatabaseInfo(AzureRMModuleBase):
def __init__(self):
# define user inputs into argument
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
server_name=dict(
type='str',
required=True
),
name=dict(
type='str'
)
)
# store the results of the module operation
self.results = dict(
changed=False
)
self.resource_group = None
self.server_name = None
self.name = None
super(AzureRMMariaDbDatabaseInfo, self).__init__(self.module_arg_spec, supports_tags=False)
def exec_module(self, **kwargs):
is_old_facts = self.module._name == 'azure_rm_mariadbdatabase_facts'
if is_old_facts:
self.module.deprecate("The 'azure_rm_mariadbdatabase_facts' module has been renamed to 'azure_rm_mariadbdatabase_info'", version='2.13')
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
if (self.resource_group is not None and
self.server_name is not None and
self.name is not None):
self.results['databases'] = self.get()
elif (self.resource_group is not None and
self.server_name is not None):
self.results['databases'] = self.list_by_server()
return self.results
def get(self):
response = None
results = []
try:
response = self.mariadb_client.databases.get(resource_group_name=self.resource_group,
server_name=self.server_name,
database_name=self.name)
self.log("Response : {0}".format(response))
except CloudError as e:
self.log('Could not get facts for Databases.')
if response is not None:
results.append(self.format_item(response))
return results
def list_by_server(self):
response = None
results = []
try:
response = self.mariadb_client.databases.list_by_server(resource_group_name=self.resource_group,
server_name=self.server_name)
|
self.log("Response : {0}".format(response))
except CloudError as e:
self.fail("Error listing for server {0} - {1}".format(self.server_name, str(e)))
if response is not None:
for item in response:
results.append(self.format_item(item))
return results
def format_item(self, item):
d = item.as_dict()
d = {
'resource_group': self.resource_group,
'server_name': self.server_name,
'name'
|
: d['name'],
'charset': d['charset'],
'collation': d['collation']
}
return d
def main():
AzureRMMariaDbDatabaseInfo()
if __name__ == '__main__':
main()
|
hmpf/nav
|
python/nav/web/messages/urls.py
|
Python
|
gpl-3.0
| 1,573
| 0.000636
|
#
# Copyright (C) 2013 Uninett AS
#
# This file is part of Network Administration Visualized (NAV).
#
# NAV is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 3 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details. You should have received a copy of the GNU General Public
# License along with NAV. If not, see <http://www.gnu.org/licenses/>.
#
"""Django URL configuration for messages tool"""
from django.conf.urls import url
from nav.web.messages import views
from nav.web.messages.feeds import ActiveMessagesFeed
urlpatterns = [
url(r'^$', views.redirect_to_active),
url(r'^active/$', views.active, name='messages-home'),
url(r'^create/$', views.save, name='messages-create'),
url(r'^edit/(?P<message_id>\d+)$', views.save, name='messages-edit'),
url(r'^active/$', views.active, name='messages-active'),
|
url(r'^scheduled/$', views.planned, name='messages-planned'),
url(r'^archive/$', views.historic, name='messages-historic'),
url(r'^view/(?P<message_id>\d+)$', views.view, name='messages-view'),
url(r'^expire/(?P<message_id>\d+)$', views.expire, name='messages-expire'),
url(r'^followup/(?P<message_id>\d+)$', views.followup, name='messages-followup'),
url(r'^rss/$', Active
|
MessagesFeed(), name='messages-rss'),
]
|
jpflori/mpir
|
yasm/tools/python-yasm/pyxelator/node.py
|
Python
|
gpl-3.0
| 8,966
| 0.019295
|
#!/usr/bin/env python
""" cdecl.py - parse c declarations
(c) 2002, 2003, 2004, 2005 Simon Burton <simon@arrowtheory.com>
Released under GNU LGPL license.
version 0.xx
"""
import string
class Node(list):
" A node in a parse tree "
def __init__(self,*items,**kw):
list.__init__( self, items )
self.lock1 = 0 # these two should be properties (simplifies serializing)
self.lock2 = 0
self.verbose = 0
for key in kw.keys():
self.__dict__[key] = kw[key]
def __str__(self):
attrs = []
for item in self:
if isinstance(item,Node):
attrs.append( str(item) )
else:
attrs.append( repr(item) )
attrs = ','.join(attrs)
return "%s(%s)"%(self.__class__.__name__,attrs)
def safe_repr( self, tank ):
tank[ str(self) ] = None
attrs = []
for item in self:
if isinstance(item,Node):
attrs.append( item.safe_repr(tank) ) # can we use repr here ?
else:
attrs.append( repr(item) )
# this is the dangerous bit:
for key, val in self.__dict__.items():
if isinstance(val,Node):
if str(val) not in tank:
attrs.append( '%s=%s'%(key,val.safe_repr(tank)) )
else:
attrs.append( '%s=%s'%(key,repr(val)) )
attrs = ','.join(attrs)
return "%s(%s)"%(self.__class__.__name__,attrs)
def __repr__(self):
#attrs = ','.join( [repr(item) for item in self] + \
# [ '%s=%s'%(key,repr(val)) for key,val in self.__dict__.items() ] )
#return "%s%s"%(self.__class__.__name__,tuple(attrs))
return self.safe_repr({})
def __eq__(self,other):
if not isinstance(other,Node):
return 0
if len(self)!=len(other):
return 0
for i in range(len(self)):
if not self[i]==other[i]:
return 0
return 1
def __ne__(self,other):
return not self==other
def filter(self,cls):
return [x for x in self if isinstance(x,cls)]
#return filter( lambda x:isinstance(x,cls), self )
def deepfilter(self,cls):
" bottom-up "
return [x for x in self.nodes() if isinstance(x,cls)]
def find(self,cls):
for x in self:
if isinstance(x,cls):
return x
return None
def deepfind(self,cls):
" bottom-up isinstance search "
for x in self:
if isinstance(x,Node):
if isinstance(x,cls):
return x
node = x.deepfind(cls)
if node is not None:
return node
if isinstance(self,cls):
return self
return None
def leaves(self):
for i in self:
if isinstance( i, Node ):
for j in i.leaves():
yield j
else:
yield i
def nodes(self):
" bottom-up iteration "
for i in self:
if isinstance( i, Node ):
for j in i.nodes():
yield j
yield self
def deeplen(self):
i=0
if not self.lock2:
self.lock2=1
for item in self:
i+=1
if isinstance(item,Node):
i+=item.deeplen()
self.lock2=0
else:
i+=1
return i
def deepstr(self,level=0,comment=False,nl='\n',indent=' '):
if self.deeplen() < 4:
nl = ""; indent = ""
#else:
#nl="\n"; indent = " "
s = []
if not self.lock1:
self.lock1=1
for item in self:
if isinstance(item,Node):
s.append( indent*(level+1)+item.deepstr(level+1,False,nl,indent) )
else:
s.append( indent*(level+1)+repr(item) )
self.lock1=0
else:
for item in self:
if isinstance(item,Node):
s.append( indent*(level+1)+"<recursion...>" )
else:
s.append( indent*(level+1)+"%s"%repr(item) )
s = "%s(%s)"%(self.__class__.__name__,nl+string.join(s,","+nl))
if comment:
s = '#' + s.replace('\n','\n#')
return s
def clone(self):
items = []
for item in self:
if isinstance(item,Node):
item = item.clone()
items.append(item)
# we skip any attributes...
return self.__class__(*items)
def fastclone(self):
# XX is it faster ???
#print "clone"
nodes = [self]
idxs = [0]
itemss = [ [] ]
while nodes:
assert len(nodes)==len(idxs)==len(itemss)
node = nodes[-1]
items = itemss[-1]
assert idxs[-1] == len(items)
while idxs[-1]==len(node):
# pop
_node = node.__class__( *items )
_node.__dict__.update( node.__dict__ )
nodes.pop(-1)
idxs.pop(-1)
itemss.pop(-1)
if not nodes:
#for node0 in self.nodes():
#for node1 in _node.nodes():
|
#assert node0 is not node1
#assert _node == self
return _node # Done !!
node = nodes[-1]
items = itemss[-1]
items.append(_node) # set
idxs[-1] += 1
|
assert idxs[-1] == len(items)
#assert idxs[-1] < len(node), str( (node,nodes,idxs,itemss) )
_node = node[ idxs[-1] ]
# while idxs[-1]<len(node):
if isinstance(_node,Node):
# push
nodes.append( _node )
idxs.append( 0 )
itemss.append( [] )
else:
# next
items.append(_node)
idxs[-1] += 1
assert idxs[-1] == len(items)
def expose(self,cls):
' expose children of any <cls> instance '
# children first
for x in self:
if isinstance(x,Node):
x.expose(cls)
# now the tricky bit
i=0
while i < len(self):
if isinstance(self[i],cls):
node=self.pop(i)
for x in node:
assert not isinstance(x,cls)
# pass on some attributes
if hasattr(node,'lines') and not hasattr(x,'lines'):
x.lines=node.lines
if hasattr(node,'file') and not hasattr(x,'file'):
x.file=node.file
self.insert(i,x) # expose
i=i+1
assert i<=len(self)
else:
i=i+1
def get_parent( self, item ): # XX 25% CPU time here XX
assert self != item
if item in self:
return self
for child in self:
if isinstance(child, Node):
parent = child.get_parent(item)
if parent is not None:
return parent
return None
def expose_node( self, item ):
assert self != item
parent = self.get_parent(item)
idx = parent.index( item )
parent[idx:idx+1] = item[:]
def delete(self,cls):
' delete any <cls> subtree '
for x in self:
if isinstance(x,Node):
x.delete(cls)
# now the tricky bit
i=0
while i < len(self):
if isinstance(self[i],cls):
self.pop(i)
else:
i=i+1
def deeprm(self,item):
' remove any items matching <item> '
for x in self:
if isinstance(x,Node):
x.deeprm(item)
# now the tricky bit
i=0
while i < len(self):
if self[i] == item:
self.pop(i)
el
|
jfoote/vulture
|
vlib/analyzers/reproducibility.py
|
Python
|
mit
| 2,072
| 0.005309
|
import json, logging, os, re, subprocess, shlex
from tools import get_category_by_status
log = logging.getLogger()
meta_files = ['Disassembly', 'Stacktrace', 'Registers',
'SegvAnalysis', 'ProcMaps', "BootLog" , "CoreDump",
"BootDmesg", "syslog", "UbiquityDebug.gz", "Casper.gz",
"UbiquityPartman.gz", "UbiquityDm.gz", "GdmLog", "XorgLog"
"log", "Log"]
def get(metadata, bugdir):
indicators = {}
# look for file arg; this needs work TODO
cmdline = None
uri = None
for line in metadata['description'].splitlines():
if "proccmdline" in line.lower():
cmdline = ":".join(line.split(":")[1:]).strip()
try:
toks = shlex.split(cmdline)
except ValueError as e:
log.error("error while parsing cmdline: %s" % cmdline)
log.exception(e)
continue
if len(toks) > 1:
if ("//" in toks[-1]) or ("." in toks[-1]):
uri = toks[-1].strip()
indicators['cmdline'] = cmdline
indicators['cmdline_uri'] = uri
# look for interesting attachments; ugly
interesting_files = []
for f in os.listdir(bugdir):
fpath = os.path.join(bugdir, f)
if not os.path.isfile(fpath):
continue
for fn in meta_files:
if fn.lower() in f.lower():
break
else:
# no break in loop above, i.e. s
|
till interested
out =
|
subprocess.check_output(["file", fpath])
ftype = out.split(":")[-1]
if ftype.strip() == "empty":
continue
for tstr in ["ASCII", "text", "core file"]:
if tstr in ftype:
break
else:
# only runs if we didn't break, i.e., this might be interesting
interesting_files.append(f)
indicators['files'] = interesting_files
# TODO: look for recv, etc. in stacks (shoudl this be in exploitability maybe (remote?))
return indicators
|
chouseknecht/ansible-container
|
container/k8s/base_config.py
|
Python
|
lgpl-3.0
| 932
| 0.001073
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from ..config import BaseAnsibleContainerConfig
from ..utils.visibility import getLogger
logger = getLogger(__name__)
class K8sBaseConfig(BaseAnsibleContainerConfig):
@property
def image_namespace(self):
|
namespace = self.project_name
if self._config.get('settings', {}).get('k8s_namespace', {}).get('name'):
namespace = self._config['settings']['k8s_namespace']['name']
return namespace
def set_env(self, env):
super(K8sBaseConfig, self).set_env(env)
if self._config.get('volumes'):
for vol_key in self._config['volumes']:
# Remove settings not meant for this engine
|
for engine_name in self.remove_engines:
if engine_name in self._config['volumes'][vol_key]:
del self._config['volumes'][vol_key][engine_name]
|
rldotai/deepy
|
deepy/layers/recurrent.py
|
Python
|
mit
| 6,446
| 0.003258
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from . import NeuralLayer
from deepy.utils import build_activation, FLOATX
import numpy as np
import theano
import theano.tensor as T
from collections import OrderedDict
OUTPUT_TYPES = ["sequence", "one"]
INPUT_TYPES = ["sequence", "one"]
class RNN(NeuralLayer):
"""
Recurrent neural network layer.
"""
def __init__(self, hidden_size, input_type="sequence", output_type="sequence", vector_core=None,
hidden_activation="tanh", hidden_init=None, input_init=None, steps=None,
persistent_state=False, reset_state_for_input=None, batch_size=None,
go_backwards=False, mask=None, second_input_size=None, second_input=None):
super(RNN, self).__init__("rnn")
self._hidden_size = hidden_size
self.output_dim = self._hidden_size
self._input_type = input_type
self._output_type = output_type
self._hidden_activation = hidden_activation
self._hidden_init = hidden_init
self._vector_core
|
= vector_core
self._input_init = input_init
self.persistent_state = persistent_state
self.reset_state_for_input = reset_state_for_input
self.batch_size = batch_size
self._steps = steps
self._go_backwards = go_backwards
self._mask = mask.dimshuffle((1,0)) if mask else None
self._second_input_size = second_input_size
self._second_input = second_input
self._sequence_map = OrderedDict()
if input_type not in INPU
|
T_TYPES:
raise Exception("Input type of RNN is wrong: %s" % input_type)
if output_type not in OUTPUT_TYPES:
raise Exception("Output type of RNN is wrong: %s" % output_type)
if self.persistent_state and not self.batch_size:
raise Exception("Batch size must be set for persistent state mode")
if mask and input_type == "one":
raise Exception("Mask only works with sequence input")
def _hidden_preact(self, h):
return T.dot(h, self.W_h) if not self._vector_core else h * self.W_h
def step(self, *vars):
# Parse sequence
sequence_map = dict(zip(self._sequence_map.keys(), vars[:len(self._sequence_map)]))
if self._input_type == "sequence":
x = sequence_map["x"]
h = vars[-1]
# Reset part of the state on condition
if self.reset_state_for_input != None:
h = h * T.neq(x[:, self.reset_state_for_input], 1).dimshuffle(0, 'x')
# RNN core step
z = x + self._hidden_preact(h) + self.B_h
else:
h = vars[-1]
z = self._hidden_preact(h) + self.B_h
# Second input
if "second_input" in sequence_map:
z += sequence_map["second_input"]
new_h = self._hidden_act(z)
# Apply mask
if "mask" in sequence_map:
mask = sequence_map["mask"].dimshuffle(0, 'x')
new_h = mask * new_h + (1 - mask) * h
return new_h
def produce_input_sequences(self, x, mask=None, second_input=None):
self._sequence_map.clear()
if self._input_type == "sequence":
self._sequence_map["x"] = T.dot(x, self.W_i)
# Mask
if mask:
# (batch)
self._sequence_map["mask"] = mask
elif self._mask:
# (time, batch)
self._sequence_map["mask"] = self._mask
# Second input
if second_input:
self._sequence_map["second_input"] = T.dot(second_input, self.W_i2)
elif self._second_input:
self._sequence_map["second_input"] = T.dot(self._second_input, self.W_i2)
return self._sequence_map.values()
def produce_initial_states(self, x):
h0 = T.alloc(np.cast[FLOATX](0.), x.shape[0], self._hidden_size)
if self._input_type == "sequence":
if self.persistent_state:
h0 = self.state
else:
h0 = x
return [h0]
def output(self, x):
if self._input_type == "sequence":
# Move middle dimension to left-most position
# (sequence, batch, value)
sequences = self.produce_input_sequences(x.dimshuffle((1,0,2)))
else:
sequences = self.produce_input_sequences(None)
step_outputs = self.produce_initial_states(x)
hiddens, _ = theano.scan(self.step, sequences=sequences, outputs_info=step_outputs,
n_steps=self._steps, go_backwards=self._go_backwards)
# Save persistent state
if self.persistent_state:
self.register_updates((self.state, hiddens[-1]))
if self._output_type == "one":
return hiddens[-1]
elif self._output_type == "sequence":
return hiddens.dimshuffle((1,0,2))
def setup(self):
if self._input_type == "one" and self.input_dim != self._hidden_size:
raise Exception("For RNN receives one vector as input, "
"the hidden size should be same as last output dimension.")
self._setup_params()
self._setup_functions()
def _setup_functions(self):
self._hidden_act = build_activation(self._hidden_activation)
def _setup_params(self):
if not self._vector_core:
self.W_h = self.create_weight(self._hidden_size, self._hidden_size, suffix="h", initializer=self._hidden_init)
else:
self.W_h = self.create_bias(self._hidden_size, suffix="h")
self.W_h.set_value(self.W_h.get_value() + self._vector_core)
self.B_h = self.create_bias(self._hidden_size, suffix="h")
self.register_parameters(self.W_h, self.B_h)
if self.persistent_state:
self.state = self.create_matrix(self.batch_size, self._hidden_size, "rnn_state")
self.register_free_parameters(self.state)
else:
self.state = None
if self._input_type == "sequence":
self.W_i = self.create_weight(self.input_dim, self._hidden_size, suffix="i", initializer=self._input_init)
self.register_parameters(self.W_i)
if self._second_input_size:
self.W_i2 = self.create_weight(self._second_input_size, self._hidden_size, suffix="i2", initializer=self._input_init)
self.register_parameters(self.W_i2)
|
philanthropy-u/edx-platform
|
common/djangoapps/util/testing.py
|
Python
|
agpl-3.0
| 6,042
| 0.001159
|
"""
Utility Mixins for unit tests
"""
import json
import sys
from django.conf import settings
from django.urls import clear_url_caches, resolve
from django.test import TestCase
from mock import patch
from util.db import CommitOnSuccessManager, OuterAtomic
class UrlResetMixin(object):
"""Mixin to reset urls.py before and after a test
Django memoizes the function that reads the urls module (whatever module
urlconf names). The module itself is also stored by python in sys.modules.
To fully reload it, we need to reload the python module, and also clear django's
cache of the parsed urls.
However, the order in which we do this doesn't matter, because neither one will
get reloaded until the next request
Doing this is expensive, so it should only be added to tests that modify settings
that affect the contents of urls.py
"""
URLCONF_MODULES = None
def reset_urls(self, urlconf_modules=None):
"""Reset `urls.py` for a set of Django apps."""
if urlconf_modules is None:
urlconf_modules = [settings.ROOT_URLCONF]
if self.URLCONF_MODULES is not None:
urlconf_modules.extend(self.URLCONF_MODULES)
for urlconf in urlconf_modules:
if urlconf in sys.modules:
reload(sys.modules[urlconf])
clear_url_caches()
# Resolve a URL so that the new urlconf gets loaded
resolve('/')
def setUp(self):
"""Reset Django urls before tests and after tests
If you need to reset `urls.py` from a particular Django app (or apps),
specify these modules by setting the URLCONF_MODULES class attribute.
Examples:
# Reload only the root urls.py
URLCONF_MODULES = None
# Reload urls from my_app
URLCONF_MODULES = ['myapp.url']
# Reload urls from my_app and another_app
URLCONF_MODULES = ['myapp.url', 'another_app.urls']
"""
super(UrlResetMixin, self).setUp()
self.reset_urls()
self.addCleanup(self.reset_urls)
class EventTestMixin(object):
"""
Generic mixin for verifying that events were emitted during a test.
"""
def setUp(self, tracker):
super(EventTestMixin, self).setUp()
patcher = patch(tracker)
self.mock_tracker = patcher.start()
self.addCleanup(patcher.stop)
def assert_no_events_were_emitted(self):
"""
Ensures no events were emitted since the last event related assertion.
"""
self.assertFalse(self.mock_tracker.emit.called) # pylint: disable=maybe-no-member
def assert_event_emitted(self, event_name, **kwargs):
"""
Verify that an event was emitted with the given parameters.
"""
self.mock_tracker.emit.assert_any_call( # pylint: disable=maybe-no-member
event_name,
kwargs
)
def assert_event_emission_count(self, event_name, expected_count):
"""
Verify that the event with the given name was emitted
a specific number of times.
"""
actual_count = 0
for call_args in self.mock_tracker.emit.call_args_list:
if call_args[0][0] == event_name:
actual
|
_count += 1
self.assertEqual(actual_count, expected_count)
def reset_tracker(self):
"""
Reset the mock tracker in order to forget about old events.
"""
self.mock_tracker.reset_mock()
def get_l
|
atest_call_args(self):
"""
Return the arguments of the latest call to emit.
"""
return self.mock_tracker.emit.call_args[0]
class PatchMediaTypeMixin(object):
"""
Generic mixin for verifying unsupported media type in PATCH
"""
def test_patch_unsupported_media_type(self):
response = self.client.patch(
self.url,
json.dumps({}),
content_type=self.unsupported_media_type
)
self.assertEqual(response.status_code, 415)
def patch_testcase():
"""
Disable commit_on_success decorators for tests in TestCase subclasses.
Since tests in TestCase classes are wrapped in an atomic block, we
cannot use transaction.commit() or transaction.rollback().
https://docs.djangoproject.com/en/1.8/topics/testing/tools/#django.test.TransactionTestCase
"""
def enter_atomics_wrapper(wrapped_func):
"""
Wrapper for TestCase._enter_atomics
"""
wrapped_func = wrapped_func.__func__
def _wrapper(*args, **kwargs):
"""
Method that performs atomic-entering accounting.
"""
CommitOnSuccessManager.ENABLED = False
OuterAtomic.ALLOW_NESTED = True
if not hasattr(OuterAtomic, 'atomic_for_testcase_calls'):
OuterAtomic.atomic_for_testcase_calls = 0
OuterAtomic.atomic_for_testcase_calls += 1
return wrapped_func(*args, **kwargs)
return classmethod(_wrapper)
def rollback_atomics_wrapper(wrapped_func):
"""
Wrapper for TestCase._rollback_atomics
"""
wrapped_func = wrapped_func.__func__
def _wrapper(*args, **kwargs):
"""
Method that performs atomic-rollback accounting.
"""
CommitOnSuccessManager.ENABLED = True
OuterAtomic.ALLOW_NESTED = False
OuterAtomic.atomic_for_testcase_calls -= 1
return wrapped_func(*args, **kwargs)
return classmethod(_wrapper)
# pylint: disable=protected-access
TestCase._enter_atomics = enter_atomics_wrapper(TestCase._enter_atomics)
TestCase._rollback_atomics = rollback_atomics_wrapper(TestCase._rollback_atomics)
def patch_sessions():
"""
Override the Test Client's session and login to support safe cookies.
"""
from openedx.core.djangoapps.safe_sessions.testing import safe_cookie_test_session_patch
safe_cookie_test_session_patch()
|
Yayg/rift
|
tests/Acceptance_Tests/main.py
|
Python
|
mit
| 97
| 0
|
#! /u
|
sr/bin/env python2
import rift
rift.init("main.so")
print(rift.call(lib.main, rift.c_in
|
t))
|
greggy/pylessons
|
exam12.py
|
Python
|
lgpl-2.1
| 2,149
| 0.005573
|
# -*- coding: utf-8 -*-
# --------------------------------------------------
# Задача 1
# --------------------------------------------------
"""
Напишите функцию-генератор, которая будет принимать
последовательность, где каждый элемент кортеж с двумя
значениями (длинна катетов треугольника) и возвращать
длинну гипотенузы.
"""
l = [(8, 4), (5, 7), (9, 2), (5, 4), (6, 4)]
# --------------------------------------------------
# Задача 2
# --------------------------------------------------
"""
Напишите генератор-выражение, которое будет вычислять
и возвращать длинну окружности. Каждый элемент является
радиусом.
"""
l = [7, 9.06, 44, 21.3, 6, 10.00001, 53]
# --------------------------------------------------
# Задача 3
# --------------------------------------------------
"""
Напишите пример реализации встроенной функции filter.
"""
def myfilter1(fun, l):
pass
# --------------------------------------------------
# Задача 4
# --------------------------------------------------
"""
Напишите пример реализации встроенной функции reduce.
"""
def myreduce1(fun, l):
pass
# ---------------------------------------------
|
-----
# Задача 5
# --------------------------------------------------
"""
Перепишите функции из задач 3 и 4 так, чтобы они
стали генераторами.
"""
def myfilter2(fun, l):
pass
def myreduce21(fun, l):
pass
# -----------------
|
---------------------------------
# Задача 6
# --------------------------------------------------
"""
Перепишите вашу реализацию функций filter и map из
урока так, чтоб вторым аргументом принималось любое
количество последовательностей.
"""
|
pre-commit/pre-commit
|
pre_commit/languages/coursier.py
|
Python
|
mit
| 2,157
| 0
|
from __future__ import annotations
import contextlib
import os
from typing import Generator
from typing import Sequence
from pre_commit.envcontext import envcontext
from pre_commit.envcontext import PatchesT
from pre_commit.envcontext import Var
from pre_commit
|
.hook import Hook
from pre_commit.languages import helpers
from pre_commit.prefix import Prefix
from pre_commit.util import clean_path_on_failure
ENVIRONMENT_DIR = 'coursier'
get_default_version = helpers.basic_get_default_version
healthy = helpers.basic_healthy
|
def install_environment(
prefix: Prefix,
version: str,
additional_dependencies: Sequence[str],
) -> None: # pragma: win32 no cover
helpers.assert_version_default('coursier', version)
helpers.assert_no_additional_deps('coursier', additional_dependencies)
envdir = prefix.path(helpers.environment_dir(ENVIRONMENT_DIR, version))
channel = prefix.path('.pre-commit-channel')
with clean_path_on_failure(envdir):
for app_descriptor in os.listdir(channel):
_, app_file = os.path.split(app_descriptor)
app, _ = os.path.splitext(app_file)
helpers.run_setup_cmd(
prefix,
(
'cs',
'install',
'--default-channels=false',
f'--channel={channel}',
app,
f'--dir={envdir}',
),
)
def get_env_patch(target_dir: str) -> PatchesT: # pragma: win32 no cover
return (
('PATH', (target_dir, os.pathsep, Var('PATH'))),
)
@contextlib.contextmanager
def in_env(
prefix: Prefix,
) -> Generator[None, None, None]: # pragma: win32 no cover
target_dir = prefix.path(
helpers.environment_dir(ENVIRONMENT_DIR, get_default_version()),
)
with envcontext(get_env_patch(target_dir)):
yield
def run_hook(
hook: Hook,
file_args: Sequence[str],
color: bool,
) -> tuple[int, bytes]: # pragma: win32 no cover
with in_env(hook.prefix):
return helpers.run_xargs(hook, hook.cmd, file_args, color=color)
|
ColumbiaCMB/kid_readout
|
kid_readout/equipment/record_all_old.py
|
Python
|
bsd-2-clause
| 2,287
| 0.006996
|
import time
import threading
import logging
import serial
import io
import sim900
import sys
if __name__ == "__main__":
#this is a bad file for recording the diode temps and voltages
#eventually it will be merged with recording the resistance bridges
#and actually use the sim900 file functions
#create an instance of the sim900 commands
sim = sim900.sim900()
#main function to records temps
try:
timestr = time.strftime("%Y%m%d-%H%M%S")
filename = "/home/heather/SRS/%s.txt" % timestr
f = open(filename, 'w+')
# The column headers for rox 3 were the opposite of the written data until 2014-10-10:
f.write("time, diode ch1 temp, dio ch 2 temp, dio 3 temp, dio 4 temp, dio 1 volts, dio 2 volts, dio 3 volts, dio 4 volts, rox 1 temp, rox 1 res, rox 2 temp, rox 2 res, rox 3 temp, rox 3 res\n")
while 1:
#get diode info
sim.connect_sim922()
dio_temps = sim.get_sim922_temp()
dio_temps = dio_temps.rstrip()
time.sleep(1)
dio_volts = sim.get_sim922_volts()
dio_volts = dio_volts.rstrip()
sim.close_sim922()
print "diode"
time.sleep(1)
#get rox1 info
sim.connect_sim921_1()
rox1_res = sim.get_re
|
sistance()
rox1_temp = sim.get_temp()
sim.close_sim921_1()
print "rox1"
time.sleep(1)
sim.connect_sim921()
rox2_res = sim.get_resistance()
rox2_temp
|
= sim.get_temp()
sim.close_sim921()
#get rox3 info
sim.connect_sim921_6()
rox3_res = sim.get_resistance()
rox3_temp = sim.get_temp()
sim.close_sim921_6()
print "rox2"
time.sleep(1)
#write it all to file
current_time = time.strftime("%Y%m%d-%H%M%S")
f.write("%s, %s, %s, %s, %s, %s, %s, %s, %s\n" % (current_time, dio_temps, dio_volts, rox1_temp, rox1_res, rox2_temp, rox2_res, rox3_temp, rox3_res))
f.flush()
except KeyboardInterrupt:
f.close()
print "done writing"
sim.close_sim922()
sim.close_sim900()
print "ports closed"
|
igboyes/virtool
|
tests/subtractions/test_files.py
|
Python
|
mit
| 775
| 0
|
import virtool.subtractions.files
from sqlalchemy import select
from virtool.subtractions.models import SubtractionFile
async def test_create_subtraction_files(snapshot
|
, tmp_path, pg, pg_session):
test_dir = tmp_path / "subtractions" / "foo"
test_dir.mkdir(parents=True)
test_dir.joinpath("subtraction.fa.gz").write_text("FASTA file")
test_dir.joinpath("subtractio
|
n.1.bt2").write_text("Bowtie2 file")
subtraction_files = ["subtraction.fa.gz", "subtraction.1.bt2"]
await virtool.subtractions.files.create_subtraction_files(
pg, "foo", subtraction_files, test_dir
)
rows = list()
async with pg_session as session:
assert (
await session.execute(select(SubtractionFile))
).scalars().all() == snapshot
|
plotly/python-api
|
packages/python/plotly/plotly/validators/layout/_violinmode.py
|
Python
|
mit
| 516
| 0.001938
|
import _plotly_utils.basevalidators
class ViolinmodeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
de
|
f __init__(self, plotly_name="violinmode", parent_name="layout", **kwargs):
super(ViolinmodeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", ["group", "overlay"]),
|
**kwargs
)
|
dwwkelly/configs
|
srv/salt/laptop/config/ipython/profile_default/ipython_qtconsole_config.py
|
Python
|
gpl-2.0
| 23,719
| 0.003289
|
# Configuration file for ipython-qtconsole.
c = get_config()
#------------------------------------------------------------------------------
# IPythonQtConsoleApp configuration
#------------------------------------------------------------------------------
# IPythonQtConsoleApp will inherit config from: BaseIPythonApplication,
# Application, IPythonConsoleApp, ConnectionFileMixin
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.IPythonQtConsoleApp.ip = '127.0.0.1'
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.IPythonQtConsoleApp.verbose_crash = False
# Start the console window maximized.
# c.IPythonQtConsoleApp.maximize = False
# The date format used by logging formatters for %(asctime)s
# c.IPythonQtConsoleApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# set the shell (ROUTER) port [default: random]
# c.IPythonQtConsoleApp.shell_port = 0
# The SSH server to use to connect to the kernel.
# c.IPythonQtConsoleApp.sshserver = ''
# set the stdin (DEALER) port [default: random]
# c.IPythonQtConsoleApp.stdin_port = 0
# Set the log level by value or name.
# c.IPythonQtConsoleApp.log_level = 30
# Path to the ssh key to use for logging in to the ssh server.
# c.IPythonQtConsoleApp.sshkey = ''
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.IPythonQtConsoleApp.extra_config_file = u''
# Whether to create profile dir if it doesn't exist
# c.IPythonQtConsoleApp.auto_create = False
# path to a custom CSS stylesheet
# c.IPythonQtConsoleApp.stylesheet = ''
# set the heartbeat port [default: random]
# c.IPythonQtConsoleApp.hb_port = 0
# Whether to overwrite existing config files when copying
# c.IPythonQtConsoleApp.overwrite = False
# set the iopub (PUB) port [default: random]
# c.IPythonQtConsoleApp.iopub_port = 0
# The IPython profile to use.
# c.IPythonQtConsoleApp.profile = u'default'
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security-
# dir of the current profile, but can be specified by absolute path.
# c.IPythonQtConsoleApp.connection_file = ''
# Set to display confirmation dialog on exit. You can always use 'exit' or
# 'quit', to force a direct exit without any confirmation.
# c.IPythonQtConsoleApp.confirm_exit = True
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This options can also be specified through the environment
# variable IPYTHONDIR.
# c.IPythonQtConsoleApp.ipython_dir = u'/home/devin/.config/ipython'
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.IPythonQtConsoleApp.copy_config_files = False
# Connect to an already running kernel
# c.IPythonQtConsoleApp.existing = ''
# Use a plaintext widget instead of rich text (plain can't print/save).
# c.IPythonQtConsoleApp.plain = False
# Start the console window with the menu bar hidden.
# c.IPythonQtConsoleApp.hide_menubar = False
# The Logging format template
# c.IPythonQtConsoleApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
#
# c.IPythonQtConsoleApp.transport = 'tcp'
#------------------------------------------------------------------------------
# IPythonWidget configuration
#------------------------------------------------------------------------------
# A FrontendWidget for an IPython kernel.
# IPythonWidget will inherit config from: FrontendWidget, HistoryConsoleWidget,
# ConsoleWidget
# The type of completer to use. Valid values are:
#
# 'plain' : Show the available completion as a text list
# Below the editing area.
# 'droplist': Show the completion in a drop down list navigable
# by the arrow keys, and from which you can select
# completion by pressing Return.
# 'ncurses' : Show the completion as a text list which is navigable by
# `tab` and arrow keys.
# c.IPythonWidget.gui_completion = 'ncurses'
# Whether to process ANSI escape codes.
# c.IPythonWidget.ansi_codes = True
# A CSS stylesheet. The stylesheet can contain classes for:
# 1. Qt: QPlainTextEdit, QFrame, QWidget, etc
# 2. Pygments: .c, .k, .o, etc. (see PygmentsHighlighter)
# 3. IPython: .error, .in-prompt, .out-prompt, etc
# c.IPythonWidget.style_sheet = u''
# The height of the console at start time in number of characters (will double
# with `vsplit` paging)
# c.IPythonWidget.height = 25
#
# c.IPythonWidget.out_prompt = 'Out[<span class="out-prompt-number">%i</span>]: '
#
# c.IPythonWidget.input_sep = '\n'
# Whether to draw information calltips on open-parentheses.
# c.IPythonWidget.enable_calltips = True
#
# c.IPythonWidget.in_prompt = 'In [<span class="in-prompt-number">%i</span>]: '
# The width of the console at start time in number of characters (will double
# with `hsplit` paging)
# c.IPythonWidget.width = 81
# A command for invoking a system text editor. If the string contains a
# {filename} format specifier, it will be used. Otherwise, the filename will be
# appended to the end the command.
# c.IPythonWidget.editor = ''
# If not empty, use this Pygments style for syntax highlighting. Otherwise, the
# style sheet is queried for Pygments style information.
# c.IPythonWidget.syntax_style = u''
# The font family to use for the console. On OSX this defaults to Monaco, on
# Windows the default is Consolas with fallback of Courier, and on other
# platforms the default is Monospace.
# c.IPythonWidget.font_family = u''
#
# c.IPythonWidget.output_sep2 = ''
# Whether to automatically execute on syntactically complete input.
#
# If False, Shift-Enter is required to submit each execution. Disabling this is
# mainly useful for non-Python kernels, where the completion check would be
# wrong.
# c.IPythonWidget.execute_on_complete_input = True
# The maximum number of lines of text before truncation. Specifying a non-
# positive number disables text truncation (not recommended).
# c.IPythonWidget.buffer_size = 500
#
# c.IPythonWidget.history_lock = False
#
# c.IPythonWidget.banner = u''
# The type of underlying text widget to use. Valid values are 'plain', which
# specifies a QPlainTextEdit,
|
and 'rich', which specifies a QTextEdit.
# c.IPythonWidget.kind = 'plain'
# Whether to ask for user confirmation when restarting ke
|
rnel
# c.IPythonWidget.confirm_restart = True
# The font size. If unconfigured, Qt will be entrusted with the size of the
# font.
# c.IPythonWidget.font_size = 0
# The editor command to use when a specific line number is requested. The string
# should contain two format specifiers: {line} and {filename}. If this parameter
# is not specified, the line number option to the %edit magic will be ignored.
# c.IPythonWidget.editor_line = u''
# Whether to clear the console when the kernel is restarted
# c.IPythonWidget.clear_on_kernel_restart = True
# The type of paging to use. Valid values are:
#
# 'inside' : The widget pages like a traditional terminal.
# 'hsplit' : When paging is requested, the widget is split
# horizontally. The top pane contains the console, and the
# bottom pane contains the paged text.
# 'vsplit' : Similar to 'hsplit', except that a vertical splitter
# used.
# 'custom' : No action is taken by the widget beyond emitting a
# 'custom_page_requested(str)' signal.
# 'none' : The text is written directly to the console.
# c.IPythonWidget.paging = 'inside'
#
# c.IPythonWidget.output_sep = ''
#----------------------------------
|
DESHRAJ/fjord
|
fjord/base/views.py
|
Python
|
bsd-3-clause
| 6,831
| 0
|
import logging
import socket
from functools import wraps
from django.conf import settings
from django.http import (
Http404,
HttpResponse,
HttpResponseForbidden,
HttpResponseRedirect
)
from django.shortcuts import render
from django.utils.http import is_safe_url
from django.views.decorators.cache import never_cache
from celery.messaging import establish_connection
from elasticsearch.exceptions import ConnectionError, NotFoundError
from mobility.decorators import mobile_template
from fjord.base.models import Profile
from fjord.base.urlresolvers import reverse
from fjord.search.index import get_index, get_index_stats
log = logging.getLogger('i.services')
@mobile_template('{mobile/}new_user.html')
def new_user_view(request, template=None):
if request.user.is_anonymous():
# This is the AnonymousUser and they shouldn't be here
# so push them to the dashboard.
return HttpResponseRedirect(reverse('dashboard'))
try:
# If they have a profile, then this doesn't throw an error
# and we can let them see the new user view again, but it's
# not particularly interesting.
request.user.profile
except Profile.DoesNotExist:
# They aren't anonymous and don't have a profile, so create
# a profile for them.
#
# We could do more with this, but we're not at the moment.
Profile.objects.create(user=request.user)
next_url = request.GET.get('next', reverse('dashboard'))
if not is_safe_url(next_url):
next_url = reverse('dashboard')
return render(request, template, {
'next_url': next_url,
})
@mobile_template('{mobile/}login_failure.html')
def login_failure(request, template=None):
return render(request, template)
@mobile_template('{mobile/}csrf_failure.html')
def csrf_failure(request, reason='', template=None):
return HttpResponseForbidden(
render(request, template),
content_type='text/html'
)
def about_view(request):
template = 'about.html'
return render(request, template)
def robots_view(request):
"""Generate a robots.txt."""
template = render(request, 'robots.txt')
return HttpResponse(template, content_type='text/plain')
def contribute_view(request):
"""Generate a contribute.json."""
template = render(request, 'contribute.json')
return HttpResponse(template, content_type='application/json')
def test_memcached(host, port):
"""Connect to memcached.
:returns: True if test passed, False if test failed.
"""
try:
s = socket.socket()
s.connect((host, port))
return True
except Exception as exc:
log.critical('Failed to connect to memcached (%r): %s' %
((host, port), e
|
xc))
return False
finally:
s.close()
def dev_or_authorized(func):
"""Show view for admin and developer instances, else 404"""
@wraps(func)
def _dev_or_authorized(request, *args, **kwargs):
if
|
(request.user.is_superuser
or settings.SHOW_STAGE_NOTICE
or settings.DEBUG):
return func(request, *args, **kwargs)
raise Http404
return _dev_or_authorized
ERROR = 'ERROR'
INFO = 'INFO'
@dev_or_authorized
@never_cache
def monitor_view(request):
"""View for services monitor."""
# Dict of infrastructure name -> list of output tuples of (INFO,
# msg) or (ERROR, msg)
status = {}
# Note: To add a new component, do your testing and then add a
# name -> list of output tuples map to status.
# Check memcached.
memcache_results = []
try:
for cache_name, cache_props in settings.CACHES.items():
result = True
backend = cache_props['BACKEND']
location = cache_props['LOCATION']
# LOCATION can be a string or a list of strings
if isinstance(location, basestring):
location = location.split(';')
if 'memcache' in backend:
for loc in location:
# TODO: this doesn't handle unix: variant
ip, port = loc.split(':')
result = test_memcached(ip, int(port))
memcache_results.append(
(INFO, '%s:%s %s' % (ip, port, result)))
if not memcache_results:
memcache_results.append((ERROR, 'memcache is not configured.'))
elif len(memcache_results) < 2:
memcache_results.append(
(ERROR, ('You should have at least 2 memcache servers. '
'You have %s.' % len(memcache_results))))
else:
memcache_results.append((INFO, 'memcached servers look good.'))
except Exception as exc:
memcache_results.append(
(ERROR, 'Exception while looking at memcached: %s' % str(exc)))
status['memcached'] = memcache_results
# Check ES.
es_results = []
try:
get_index_stats()
es_results.append(
(INFO, ('Successfully connected to ElasticSearch and index '
'exists.')))
except ConnectionError as exc:
es_results.append(
(ERROR, 'Cannot connect to ElasticSearch: %s' % str(exc)))
except NotFoundError:
es_results.append(
(ERROR, 'Index "%s" missing.' % get_index()))
except Exception as exc:
es_results.append(
(ERROR, 'Exception while looking at ElasticSearch: %s' % str(exc)))
status['ElasticSearch'] = es_results
# Check RabbitMQ.
rabbitmq_results = []
try:
rabbit_conn = establish_connection(connect_timeout=2)
rabbit_conn.connect()
rabbitmq_results.append(
(INFO, 'Successfully connected to RabbitMQ.'))
except (socket.error, IOError) as exc:
rabbitmq_results.append(
(ERROR, 'Error connecting to RabbitMQ: %s' % str(exc)))
except Exception as exc:
rabbitmq_results.append(
(ERROR, 'Exception while looking at RabbitMQ: %s' % str(exc)))
status['RabbitMQ'] = rabbitmq_results
status_code = 200
status_summary = {}
for component, output in status.items():
if ERROR in [item[0] for item in output]:
status_code = 500
status_summary[component] = False
else:
status_summary[component] = True
return render(request, 'services/monitor.html',
{'component_status': status,
'status_summary': status_summary},
status=status_code)
class IntentionalException(Exception):
pass
@dev_or_authorized
def throw_error(request):
"""Throw an error for testing purposes."""
raise IntentionalException("Error raised for testing purposes.")
|
trdean/grEME
|
gr-video-sdl/python/video_sdl/__init__.py
|
Python
|
gpl-3.0
| 1,138
| 0.004394
|
# Copyright 2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the ho
|
pe that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
'''
Blocks and utilities for Video SDL module
'''
# The presence of this file
|
turns this directory into a Python package
import os
try:
from video_sdl_swig import *
except ImportError:
dirname, filename = os.path.split(os.path.abspath(__file__))
__path__.append(os.path.join(dirname, "..", "..", "swig"))
from video_sdl_swig import *
|
JayvicWen/Crawler
|
3che/crawler.py
|
Python
|
mit
| 4,816
| 0.004156
|
#!/usr/bin/env python
#encoding:utf-8
import os
import sys
import requests
import MySQLdb
from bs4 import BeautifulSoup
from bs4 import SoupStrainer
if len(sys.argv) != 4:
print 'Invalid parameters!'
exit(1)
print '=' * 60
print 'start:', sys.argv
aim_category_id = int(sys.argv[1])
start_point = (int(sys.argv[2]), int(sys.argv[3]))
immediate_download = False
base_url = 'http://www.3che.com'
session = requests.Session()
username = ''
password = ''
record = {
'category': '',
'detail_category': '',
'post_url': '',
'filename': '',
'url': ''
}
sql_cnt = 0
connection = None
cursor = None
def record_to_mysql():
global sql_cnt, connection, cursor
if sql_cnt %
|
20 == 0:
if connection:
connection.commit()
connection.close()
cursor.close()
connection = MySQLdb.connect(host='', user='', passwd='', db='', port=3306, charset='utf8')
cursor = connection.cursor()
sql_cnt += 1
cursor.execute('insert into san_che(`category`, `detail_category`, `post_url`, `filename`, `url`) values (%s, %s, %s, %s, %s)',
(record['category'], record['detail_category'], record['post_url'], record['filename'], r
|
ecord['url']))
def login():
login_path = '/member.php?mod=logging&action=login&loginsubmit=yes&infloat=yes&lssubmit=yes&inajax=1'
session.post(base_url + login_path, {'username': username, 'password': password})
def enter_directory(name):
if immediate_download:
if not os.path.exists(name):
os.mkdir(name)
os.chdir(name)
def get_soup(url, parse_only=None):
text = session.get(url).text
return BeautifulSoup(text, 'lxml', parse_only=parse_only)
def download_file(url, filename):
print 'Downloading:', filename, '=>', url
record['url'] = url
record['filename'] = filename
if immediate_download:
with open(filename, 'w') as fp:
res = requests.get(url)
fp.write(res.content)
fp.close()
else:
record_to_mysql()
def crawl_file(url, filename):
try:
soup = get_soup(url, SoupStrainer(id='attachpayform'))
attach_form = soup.find('form', id='attachpayform')
link = attach_form.table.find_all('a')[-1]
except Exception as e:
print 'Error! file url:', url
else:
download_file(link['href'], filename)
# Crawl detail data of one post.
def crawl_detail(detail_category, title, detail_url):
print '-' * 100
print 'Crawling Post:', detail_category, title, '=>', detail_url
record['detail_category'] = detail_category
record['post_url'] = detail_url
# Enter detail directory.
enter_directory(detail_category)
prefix = detail_url.rsplit('/', 1)[-1].split('.', 1)[0]
enter_directory(prefix + title)
soup = get_soup(detail_url, SoupStrainer('p', {'class': 'attnm'}))
attnms = soup.find_all('p', {'class': 'attnm'})
for attnm in attnms:
url = '{0}/{1}'.format(base_url, attnm.a['href'])
crawl_file(url, attnm.a.text.strip(u'[下载]'))
# Leave detail directory.
if immediate_download:
os.chdir('../..')
# Crawl data of one category.
def crawl_category(category, list_url):
print '=' * 100
print 'Crawling category:', category, '=>', list_url
record['category'] = category
# Create corresponding directory and enter.
enter_directory(category)
cur_page_id = 0
url = list_url
while url is not None:
cur_page_id += 1
print 'Crawling page url:', url
soup = get_soup(url, SoupStrainer('span'))
xsts = soup.find_all('span', {'class': 'xst'})
if cur_page_id >= start_point[0]:
cur_in_page_id = 0
for xst in xsts:
cur_in_page_id += 1
detail = xst.find('a', {'class': 'xst'})
if cur_page_id > start_point[0] or cur_in_page_id >= start_point[1]:
crawl_detail(xst.em and xst.em.a.text or '', detail.text, detail['href'])
page_footer = soup.find('span', id='fd_page_top')
next_link = page_footer.label.next_sibling
if next_link is not None:
url = next_link['href']
else:
url = None
# Leave the directory.
if immediate_download:
os.chdir('..')
if __name__ == '__main__':
login()
# Extract categories from home page.
soup = get_soup(base_url, SoupStrainer(id='nv'))
category_lis = soup.find('div', id='nv').ul.find_all('li')
categories = map(lambda x: (x.a.text, x.a['href']), category_lis)
categories = filter(lambda x: x[1] != '/', categories)
crawl_category(categories[aim_category_id][0], categories[aim_category_id][1])
# for category in categories:
# crawl_category(category[0], category[1])
|
srohatgi/cloud
|
huntnet/manage.py
|
Python
|
apache-2.0
| 250
| 0
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os
|
.environ.setdefault("DJANGO_SETTINGS_MODULE", "huntnet.settings")
from django.core.management import execute_from_command_li
|
ne
execute_from_command_line(sys.argv)
|
edofic/ggrc-core
|
src/ggrc_risks/models/threat.py
|
Python
|
apache-2.0
| 847
| 0.004723
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from ggrc import db
from ggrc.models.mixins import CustomAttributable, BusinessObject, Timeboxed
from ggrc.models.object_document import Documentable
from ggrc.models.object_person import Personable
from ggrc.models.object_owner import Ownable
from ggrc.models.relationship import Relatable
from ggrc.models.track_object_state import HasObjectState, track_state_for_class
class Threat(
HasObjectState, CustomAttributable, Documentable, Personable,
Relatable, Timeboxed, Ownable, BusinessObje
|
ct, db.Model
|
):
__tablename__ = 'threats'
_aliases = {
"contact": {
"display_name": "Contact",
"filter_by": "_filter_by_contact",
},
"secondary_contact": None,
"url": "Threat URL",
}
|
mozillazg/chendian-plus
|
chendian/member/migrations/0008_auto_20150502_1013.py
|
Python
|
mit
| 697
| 0.002869
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('member', '0007_auto_20150501_2
|
124'),
]
operations = [
migrations.AddField(
model_name='member',
name='avatar',
field=models.URLField(default='https://dn-tmp.qbox.me/chendian/cat_mouse_reading.jpg', verbose_name='\u5934\u50cf', blank=True),
),
migrations.AlterField(
model_name='member',
name='description',
field=model
|
s.TextField(default='', verbose_name='\u4e2a\u4eba\u4ecb\u7ecd', blank=True),
),
]
|
MattD830/Python-INFO1-CE9990
|
graphpaper2.py
|
Python
|
gpl-3.0
| 968
| 0.007231
|
"""
I came up with this the first try. So, that's why this is posted in duplicate.
"""
import sys
try:
columns = int(input("How many columns? "))
rows = int(input("How many rows? "))
tall = int(input("How tall should the boxes be? "))
|
wide = int(input("How wide should the boxes be? "))
except Exception as e:
print(e)
print("You have fail")
print("Try type valid integer")
sys.exit(1)
i = 0
j = 0
k = 0
m = 0
while j <= rows:
print("+",end="")
while k < columns:
while i < wide:
print("-",end="")
i += 1
print("+",end="")
i = 0
k += 1
print('\r')
k = 0
if j < rows:
whil
|
e m < tall:
print("|",end="")
while k < columns:
print(" "*wide,end="")
print("|",end="")
k += 1
k = 0
m += 1
print("\r")
m = 0
j += 1
sys.exit(0)
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractTranslasiSanusiMe.py
|
Python
|
bsd-3-clause
| 550
| 0.034545
|
def extractTranslasiSanusiMe(item):
'''
Parser for 'translasi.sanusi.me'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'
|
].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous',
|
'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
kingvuplus/PKT-gui2
|
lib/python/Screens/CCcamInfo.py
|
Python
|
gpl-2.0
| 51,455
| 0.030609
|
# -*- coding: UTF-8 -*-
# CCcam Info by AliAbdul
from base64 import encodestring
from os import listdir, remove, rename, system, path
from enigma import eListboxPythonMultiContent, eTimer, gFont, loadPNG, RT_HALIGN_RIGHT, getDesktop
from Components.ActionMap import ActionMap, NumberActionMap
from Components.config import config, getConfigListEntry
from Components.ConfigList import ConfigListScreen
from Components.Console import Console
from Components.Label import Label
from Components.MenuList import MenuList
from Components.MultiContent import MultiContentEntryText, MultiContentEntryPixmapAlphaTest, MultiContentEntryPixmapAlphaBlend
from Components.ScrollLabel import ScrollLabel
from Screens.HelpMenu import HelpableScreen
#from Screens.InfoBar import InfoBar
from Screens.LocationBox import LocationBox
from Screens.MessageBox import MessageBox
from Screens.Screen import Screen
from Screens.VirtualKeyBoard import VirtualKeyBoard
from Tools.Directories import fileExists, SCOPE_ACTIVE_SKIN, resolveFilename
from twisted.internet import reactor
from twisted.web.client import HTTPClientFactory
from urlparse import urlparse, urlunparse
#TOGGLE_SHOW = InfoBar.toggleShow
VERSION = "v2"
DATE = "21.11.2014"
CFG = "/etc/CCcam.cfg"
#############################################################
def _parse(url):
url = url.strip()
parsed = urlparse(url)
scheme = parsed[0]
path = urlunparse(('','') + parsed[2:])
host, port = parsed[1], 80
if '@' in host:
username, host = host.split('@')
if ':' in username:
username, password = username.split(':')
else:
password = ""
else:
username = ""
password = ""
if ':' in host:
host, port = host.split(':')
port = int(port)
if path == "":
path = "/"
return scheme, host, port, path, username, password
def getPage(url, contextFactory=None, *args, **kwargs):
scheme, host, port, path, username, password = _parse(url)
if username and password:
url = scheme + '://' + host + ':' + str(port) + path
basicAuth = encodestring("%s:%s" % (username, password))
authHeader = "Basic " + basicAuth.strip()
AuthHeaders = {"Authorization": authHeader}
if kwargs.has_key("headers"):
kwargs["headers"].update(AuthHeaders)
else:
kwargs["headers"] = AuthHeaders
factory = HTTPClientFactory(url, *args, **kwargs)
reactor.connectTCP(host, port, factory)
return factory.deferred
#############################################################
class HelpableNumberActionMap(NumberActionMap):
def __init__(self, parent, context, actions, prio):
alist = []
adict = {}
for (action, funchelp) in actions.iteritems():
alist.append((action, funchelp[1]))
adict[action] = funchelp[0]
NumberActionMap.__init__(self, [context], adict, prio)
parent.helpList.append((self, context, alist))
#############################################################
TranslationHelper = [
["Current time", _("Current time")],
["NodeID", _("NodeID")],
["Uptime", _("Uptime")],
["Connected clients", _("Connected clients")],
["Active clients", _("Active clients")],
["Total handled client ecm's", _("Total handled client ecm's")],
["Total handled client emm's", _("Total handled client emm's")],
["Peak load (max queued requests per workerthread)", _("Peak load (max queued requests per workerthread)")],
["card reader", _("card reader")],
["no or unknown card inserted", _("no or unknown card inserted")],
["system:", _("system:")],
["caid:", _("caid:")],
["provider:", _("provider:")],
["provid:", _("provid:")],
["using:", _("using:")],
["address:", _("address:")],
["hops:", _("hops:")],
["pid:", _("pid:")],
["share:", _("share:")],
["handled", _("handled")],
[" and", _(" and")],
["card", _("card")],
["Cardserial", _("Cardserial")],
["ecm time:", _("ecm time:")]]
def translateBlock(block):
for x in TranslationHelper:
if block.__contains__(x[0]):
block = block.replace(x[0], x[1])
return block
#############################################################
def getConfigValue(l):
list = l.split(":")
ret = ""
if len(list) > 1:
ret = (list[1]).replace("\n", "").replace("\r", "")
if ret.__contains__("#"):
idx = ret.index("#")
ret = ret[:idx]
while ret.startswith(" "):
ret = ret[1:]
while ret.endswith(" "):
ret = ret[:-1]
return ret
#############################################################
def notBlackListed(entry):
try:
f = open(config.cccaminfo.blacklist.value, "r")
content = f.read().split("\n")
f.close()
except:
content = []
ret = True
for x in content:
if x == entry:
ret = False
return ret
#############################################################
menu_list = [
_("General"),
_("Clients"),
_("Active clients"),
_("Servers"),
_("Shares"),
_("Share View"),
_("Extended Shares"),
_("Providers"),
_("Entitlements"),
_("ecm.info"),
_("Menu config"),
_("Local box"),
_("Remote box"),
_("Free memory"),
_("Switch config"),
_("About")]
#############################################################
if path.exists(resolveFilename(SCOPE_ACTIVE_SKIN, "icons/lock_on.png")):
lock_on = loadPNG(resolveFilename(SCOPE_ACTIVE_SKIN, "icons/lock_on.png"))
else:
lock_on = loadPNG("/usr/share/enigma2/skin_default/icons/lock_on.png")
if path.exists(resolveFilename(SCOPE_ACTIVE_SKIN, "icons/lock_off.png")):
lock_off = loadPNG(resolveFilename(SCOPE_ACTIVE_SKIN, "icons/lock_off.png"))
else:
lock_off = loadPNG("/usr/share/enigma2/skin_default/icons/lock_off.png")
def getConfigNameAndContent(fileName):
try:
f = open(fileName, "r")
content = f.read()
f.close()
except:
content = ""
if content.startswith("#CONFIGFILE NAME="):
content = content.replace("\r", "\n")
name = content[17:]
idx = name.index("\n")
name = name[:idx]
else:
name = fileName.replace("/var/etc/", "")
return name, content
#############################################################
class CCcamList(MenuList):
def __init__(self, list):
MenuList.__init__(self, list, False, eListboxPythonMultiContent)
self.l.setItemHeight(25)
self.l.setFont(0, gFont("Regular", 20))
self.l.setFont(1, gFont("Regular", 32))
class CCcamShareList(MenuList):
def __init__(self, list):
MenuList.__init__(self, list, False, eListboxPythonMultiContent)
self.l.setItemHeight(60)
self.l.setFont(0, gFont("Regular", 18))
self.l.setFont(1, gFont("Regular", 32))
class CCcamConfigList(MenuList):
def __init__(self, list):
MenuList.__init__(self, list, False, eListboxPythonMultiContent)
self.l.setItemHeight(30)
self.l.setFont(0, gFont("Regular", 20))
self.l.setFont(1, gFont("Regular", 32))
class CCcamShareViewList(MenuList):
def __init__(self,
|
list):
MenuList.__init__(self, list, False, eListboxPythonMultiContent)
self.l.setItemHeigh
|
t(20)
self.l.setFont(0, gFont("Regular", 18))
self.l.setFont(1, gFont("Regular", 32))
def CCcamListEntry(name, idx):
screenwidth = getDesktop(0).size().width()
res = [name]
if idx == 10:
idx = "red"
elif idx == 11:
idx = "green"
elif idx == 12:
idx = "yellow"
elif idx == 13:
idx = "blue"
elif idx == 14:
idx = "menu"
elif idx == 15:
idx = "info"
if path.exists(resolveFilename(SCOPE_ACTIVE_SKIN, "buttons/key_%s.png" % str(idx))):
png = resolveFilename(SCOPE_ACTIVE_SKIN, "buttons/key_%s.png" % str(idx))
else:
png = "/usr/share/enigma2/skin_default/buttons/key_%s.png" % str(idx)
if screenwidth and screenwidth == 1920:
if fileExists(png):
res.append(MultiContentEntryPixmapAlphaBlend(pos=(10, 3), size=(67, 48), png=loadPNG(png)))
res.append(MultiContentEntryText(pos=(90, 7), size=(900, 50), font=1, text=name))
else:
if fileExists(png):
res.append(MultiContentEntryPixmapAlphaBlend(pos=(0, 0), size=(35, 25), png=loadPNG(png)))
res.append(MultiContentEntryText(pos=(40, 3), size=(500, 25), font=0, text=name))
return res
def CCcamServerListEntry(name, color):
screenwidth = getDesktop(0).size().width()
res = [name]
if path.exists(resolveFilename(SCOPE_ACTIVE_SKIN, "buttons/key_%s.png" % color)):
png = resolveFilename(SCOPE_ACTIVE_SKIN, "buttons/key_%s.png" % color)
else:
png = "/usr/share/enigma2/skin_default/buttons/key_%s.png" % color
if screenwidth and scre
|
Osmose/normandy
|
recipe-server/normandy/recipes/migrations/0034_recipe_revisions.py
|
Python
|
mpl-2.0
| 3,780
| 0.002646
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2016-11-28 13:41
# flake8: noqa
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import normandy.recipes.validators
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('recipes', '0033_migrate_surveys'),
]
operations = [
migrations.CreateModel(
name='RecipeRevision',
fields=[
('id', models.CharField(max_length=64, primary_key=True, serialize=False)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('updated', models.DateTimeField(default=django.utils.timezone.now)),
('comment', models.TextField()),
('name', models.CharField(max_length=255)),
('arguments_json', models.TextField(default='{}', validators=[normandy.recipes.validators.validate_json])),
('filter_expression', models.TextField()),
('action', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='recipe_revisions', to='recipes.Action')),
('parent', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='child', to='recipes.RecipeRevision')),
],
),
migrations.RemoveField(
model_name='approval',
name='creator',
),
migrations.RemoveField(
model_name='approvalrequest',
name='approval',
),
migrations.RemoveField(
model_name='approvalrequest',
name='creator',
),
migrations.RemoveField(
model_name='approvalrequest',
name='recipe',
),
migrations.RemoveField(
model_name='approvalrequestcomment',
name='approval_request',
),
migrations.RemoveField(
model_name='approvalrequestcomment',
name='creator',
),
migrations.AlterModelOptions(
name='recipe',
options={'orde
|
ring': ['-enabled', '-latest_revision__updated']},
),
migrations.RemoveField(
|
model_name='recipe',
name='approval',
),
migrations.DeleteModel(
name='Approval',
),
migrations.DeleteModel(
name='ApprovalRequest',
),
migrations.DeleteModel(
name='ApprovalRequestComment',
),
migrations.AddField(
model_name='reciperevision',
name='recipe',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='revisions', to='recipes.Recipe'),
),
migrations.AddField(
model_name='reciperevision',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL,related_name='recipe_revisions', to=settings.AUTH_USER_MODEL)
),
migrations.AddField(
model_name='recipe',
name='latest_revision',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='latest_for_recipe', to='recipes.RecipeRevision'),
),
migrations.AlterField(
model_name='recipe',
name='action',
field=models.ForeignKey(to='recipes.Action', null=True),
),
migrations.AlterField(
model_name='recipe',
name='name',
field=models.CharField(max_length=255, unique=False, null=True),
),
]
|
OpenTreeOfLife/gcmdr
|
collect_study_ids.py
|
Python
|
bsd-2-clause
| 537
| 0.007449
|
"""
Creates a list of studies currently being used for synthesis.
"""
import re
#from stephen_desktop_conf import *
from microbes import studytreelist as m
|
icrobelist
from plants import studytreelist as plantslist
from metazoa import studytreelist as metalist
from fungi import studytreelist as fungilist
studytreelist = []
studytreelist.extend(plantslist)
studytreelist.extend(metalist)
studytreelist.extend(fungilist)
studytreelist.extend(microbelist)
for i in studytreelist:
studyid=i.split('_')[0]
pr
|
int studyid+".json"
|
Intel-bigdata/swift
|
test/unit/common/test_client.py
|
Python
|
apache-2.0
| 15,155
| 0.000264
|
# Copyright (c) 2010-2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: More tests
import socket
import unittest
from StringIO import StringIO
from urlparse import urlparse
# TODO: mock http connection class with more control over headers
from test.unit.proxy.test_server import fake_http_connect
from swift.common import client as c
class TestHttpHelpers(unittest.TestCase):
def test_quote(self):
value = 'standard string'
self.assertEquals('standard%20string', c.quote(value))
value = u'\u0075nicode string'
self.assertEquals('unicode%20string', c.quote(value))
def test_http_connection(self):
url = 'http://www.test.com'
_junk, conn = c.http_connection(url)
self.assertTrue(isinstance(conn, c.HTTPConnection))
url = 'https://www.test.com'
_junk, conn = c.http_connection(url)
self.assertTrue(isinstance(conn, c.HTTPSConnection))
url = 'ftp://www.test.com'
self.assertRaises(c.ClientException, c.http_connection, url)
class TestClientExc
|
eption(unittest.TestCase):
def test_is_exception(self):
|
self.assertTrue(issubclass(c.ClientException, Exception))
def test_format(self):
exc = c.ClientException('something failed')
self.assertTrue('something failed' in str(exc))
test_kwargs = (
'scheme',
'host',
'port',
'path',
'query',
'status',
'reason',
'device',
)
for value in test_kwargs:
kwargs = {
'http_%s' % value: value,
}
exc = c.ClientException('test', **kwargs)
self.assertTrue(value in str(exc))
class TestJsonImport(unittest.TestCase):
def tearDown(self):
try:
import json
except ImportError:
pass
else:
reload(json)
try:
import simplejson
except ImportError:
pass
else:
reload(simplejson)
def test_any(self):
self.assertTrue(hasattr(c, 'json_loads'))
def test_no_simplejson(self):
# break simplejson
try:
import simplejson
except ImportError:
# not installed, so we don't have to break it for these tests
pass
else:
delattr(simplejson, 'loads')
reload(c)
try:
from json import loads
except ImportError:
# this case is stested in _no_json
pass
else:
self.assertEquals(loads, c.json_loads)
def test_no_json(self):
# first break simplejson
try:
import simplejson
except ImportError:
# not installed, so we don't have to break it for these tests
pass
else:
delattr(simplejson, 'loads')
# then break json
try:
import json
except ImportError:
# not installed, so we don't have to break it for these tests
_orig_dumps = None
else:
# before we break json, grab a copy of the orig_dumps function
_orig_dumps = json.dumps
delattr(json, 'loads')
reload(c)
if _orig_dumps:
# basic test of swift.common.client.json_loads using json.loads
data = {
'string': 'value',
'int': 0,
'bool': True,
'none': None,
}
json_string = _orig_dumps(data)
else:
# even more basic test using a hand encoded json string
data = ['value1', 'value2']
json_string = "['value1', 'value2']"
self.assertEquals(data, c.json_loads(json_string))
self.assertRaises(AttributeError, c.json_loads, self)
class MockHttpTest(unittest.TestCase):
def setUp(self):
def fake_http_connection(*args, **kwargs):
_orig_http_connection = c.http_connection
def wrapper(url, proxy=None):
parsed, _conn = _orig_http_connection(url, proxy=proxy)
conn = fake_http_connect(*args, **kwargs)()
def request(*args, **kwargs):
return
conn.request = request
conn.has_been_read = False
_orig_read = conn.read
def read(*args, **kwargs):
conn.has_been_read = True
return _orig_read(*args, **kwargs)
conn.read = read
return parsed, conn
return wrapper
self.fake_http_connection = fake_http_connection
def tearDown(self):
reload(c)
# TODO: following tests are placeholders, need more tests, better coverage
class TestGetAuth(MockHttpTest):
def test_ok(self):
c.http_connection = self.fake_http_connection(200)
url, token = c.get_auth('http://www.test.com', 'asdf', 'asdf')
self.assertEquals(url, None)
self.assertEquals(token, None)
class TestGetAccount(MockHttpTest):
def test_no_content(self):
c.http_connection = self.fake_http_connection(204)
value = c.get_account('http://www.test.com', 'asdf')[1]
self.assertEquals(value, [])
class TestHeadAccount(MockHttpTest):
def test_ok(self):
c.http_connection = self.fake_http_connection(200)
value = c.head_account('http://www.tests.com', 'asdf')
# TODO: Hmm. This doesn't really test too much as it uses a fake that
# always returns the same dict. I guess it "exercises" the code, so
# I'll leave it for now.
self.assertEquals(type(value), dict)
def test_server_error(self):
c.http_connection = self.fake_http_connection(500)
self.assertRaises(c.ClientException, c.head_account,
'http://www.tests.com', 'asdf')
class TestGetContainer(MockHttpTest):
def test_no_content(self):
c.http_connection = self.fake_http_connection(204)
value = c.get_container('http://www.test.com', 'asdf', 'asdf')[1]
self.assertEquals(value, [])
class TestHeadContainer(MockHttpTest):
def test_server_error(self):
c.http_connection = self.fake_http_connection(500)
self.assertRaises(c.ClientException, c.head_container,
'http://www.test.com', 'asdf', 'asdf',
)
class TestPutContainer(MockHttpTest):
def test_ok(self):
c.http_connection = self.fake_http_connection(200)
value = c.put_container('http://www.test.com', 'asdf', 'asdf')
self.assertEquals(value, None)
class TestDeleteContainer(MockHttpTest):
def test_ok(self):
c.http_connection = self.fake_http_connection(200)
value = c.delete_container('http://www.test.com', 'asdf', 'asdf')
self.assertEquals(value, None)
class TestGetObject(MockHttpTest):
def test_server_error(self):
c.http_connection = self.fake_http_connection(500)
self.assertRaises(c.ClientException, c.get_object,
'http://www.test.com', 'asdf', 'asdf', 'asdf')
class TestHeadObject(MockHttpTest):
def test_server_error(self):
c.http_connection = self.fake_http_connection(500)
self.assertRaises(c.ClientException, c.head_object,
'http://www.test.com', 'asdf', 'asdf', 'asdf')
class TestPutObject(MockHttpTest):
def test_ok(self):
c.http_connection = self.fake_http_connection(200)
args = ('http://www.test.com'
|
epam/DLab
|
infrastructure-provisioning/src/general/scripts/os/jupyter_dataengine_create_configs.py
|
Python
|
apache-2.0
| 7,919
| 0.003283
|
#!/usr/bin/python
# *****************************************************************************
#
# Copyright (c) 2016, EPAM SYSTEMS INC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ******************************************************************************
from fabric.api import *
import argparse
import os
import sys
import time
from fabric.api import lcd
from fabric.contrib.files import exists
from fabvenv import virtualenv
from dlab.notebook_lib import *
from dlab.actions_lib import *
from dlab.fab import *
from dlab.common_lib import *
parser = argparse.ArgumentParser()
parser.add_argument('--cluster_name', type=str, default='')
parser.add_argument('--dry_run', type=str, default='false')
parser.add_argument('--spark_version', type=str, default='')
parser.add_argument('--hadoop_version', type=str, default='')
parser.add_argument('--os_user', type=str, default='')
parser.add_argument('--spark_master', type=str, default='')
parser.add_argument('--region', type=str, default='')
parser.add_argument('--datalake_enabled', type=str, default='')
parser.add_argument('--r_enabled', type=str, default='')
args = parser.parse_args()
kernels_dir = '/home/' + args.os_user + '/.local/share/jupyter/kernels/'
cluster_dir = '/opt/' + args.cluster_name + '/'
local_jars_dir = '/opt/jars/'
spark_version = args.spark_version
hadoop_version = args.hadoop_version
scala_link = "http://www.scala-lang.org/files/archive/"
spark_link = "https://archive.apache.org/dist/spark/spark-" + spark_version + "/spark-" + spark_version + \
"-bin-hadoop" + hadoop_version + ".tgz"
def r_kernel(args):
spark_path = '/opt/{}/spark/'.format(args.cluster_name)
local('mkdir -p {}/r_{}/'.format(kernels_dir, args.cluster_name))
kernel_path = "{}/r_{}/kernel.json".format(kernels_dir, args.cluster_name)
template_file = "/tmp/{}/r_dataengine_template.json".format(args.cluster_name)
r_version = local("R --version | awk '/version / {print $3}'", capture = True)
with open(template_file, 'r') as f:
text = f.read()
text = text.replace('CLUSTER_NAME', args.cluster_name)
text = text.replace('SPARK_PATH', spark_path)
text = text.replace('SPARK_VERSION', 'Spark-' + args.spark_version)
text = text.replace('R_KERNEL_VERSION', 'R-{}'.format(str(r_version)))
text = text.replace('SPARK_ACTION', 'init()')
text = text.replace('MASTER', args.spark_master)
with open(kernel_path, 'w') as f:
f.write(text)
def toree_kernel(args):
spark_path = '/opt/' + args.cluster_name + '/spark/'
scala_version = local('scala -e "println(scala.util.Properties.versionNumberString)"', capture=True)
local('mkdir -p ' + kernels_dir + 'toree_' + args.cluster_name + '/')
local('tar zxvf /tmp/{}/toree_kernel.tar.gz -C '.format(args.cluster_name) + kernels_dir + 'toree_' + args.cluster_name + '/')
kernel_path = kernels_dir + "toree_" + args.cluster_name + "/kernel.json"
template_file = "/tmp/{}/toree_dataengine_template.json".format(args.cluster_name)
with open(template_file, 'r') as f:
text = f.read()
text = text.replace('CLUSTER_NAME', args.cluster_name)
text = text.replace('SPARK_VERSION', 'Spark-' + args.spark_version)
text = text.replace('SPARK_PATH', spark_path)
text = text.replace('OS_USER', args.os_user)
text = text.replace('MASTER', args.spark_master)
text = text.replace('SCALA_VERSION', scala_version)
with open(kernel_path, 'w') as f:
f.write(text)
local('touch /tmp/{}/kernel_var.json'.format(args.cluster_name))
local(
"PYJ=`find /opt/" + args.cluster_name +
"/spark/ -name '*py4j*.zip' | tr '\\n' ':' | sed 's|:$||g'`; cat " + kernel_path +
" | sed 's|PY4J|'$PYJ'|g' > /tmp/{}/kernel_var.json".format(args.cluster_name))
local('sudo mv /tmp/{}/kernel_var.json '.format(args.cluster_name) + kernel_path)
run_sh_path = kernels_dir + "toree_" + args.cluster_name + "/bin/run.sh"
template_sh_file = '/tmp/{}/run_template.sh'.format(args.cluster_name)
with open(template_sh_file, 'r') as f:
text = f.read()
text = text.replace('CLUSTER_NAME', args.cluster_name)
text = text.replace('OS_USER', args.os_user)
with open(run_sh_path, 'w') as f:
f.write(text)
def pyspark_kernel(args):
spark_path = '/opt/' + args.cluster_name + '/spark/'
local('mkdir -p ' + kernels_dir + 'pyspark_' + args.cluster_name + '/')
kernel_path = kernels_dir + "pyspark_" + args.cluster_name + "/kernel.json"
template_file = "/tmp/{}/pyspark_dataengine_template.json".format(args.cluster_name)
with open(template_file, 'r') as f:
text = f.read()
text = text.replace('CLUSTER_NAME', args.cluster_name)
text = text.replace('SPARK_VERSION', 'Spark-' + spark_version)
text = text.replace('SPARK_PATH', spark_path)
text = text.replace('PYTHON_SHORT_VERSION', '2.7')
text = text.replace('PYTHON_FULL_VERSION', '2.7')
text = text.replace('MASTER', args.spark_master)
text = text.replace('PYTHON_PATH', '/usr/bin/python2.7')
with open(kernel_path, 'w') as f:
f.write(text)
local('touch /tmp/{}/kernel_var.json'.format(args.cluster_name))
local(
"PYJ=`find /opt/{0}/spark/ -name '*py4j*.zip' | tr '\\n' ':' | sed 's|:$||g'`; cat {1} | sed 's|PY4J|'$PYJ'|g' | sed \'/PYTHONPATH\"\:/s|\(.*\)\"|\\1/home/{2}/c
|
affe/python:/home/{2}/pytorch/build:\"|\' > /tmp/{0}/kernel_var.json".
format(args.cluster_name, kernel_path, args.os_user))
local('sudo mv /tmp/{}/kernel_var.json '.format(args.cluster_name) + kernel_path)
local('mkdir -p ' + ke
|
rnels_dir + 'py3spark_' + args.cluster_name + '/')
kernel_path = kernels_dir + "py3spark_" + args.cluster_name + "/kernel.json"
template_file = "/tmp/{}/pyspark_dataengine_template.json".format(args.cluster_name)
with open(template_file, 'r') as f:
text = f.read()
text = text.replace('CLUSTER_NAME', args.cluster_name)
text = text.replace('SPARK_VERSION', 'Spark-' + spark_version)
text = text.replace('SPARK_PATH', spark_path)
text = text.replace('MASTER', args.spark_master)
text = text.replace('PYTHON_SHORT_VERSION', '3.5')
text = text.replace('PYTHON_FULL_VERSION', '3.5')
text = text.replace('PYTHON_PATH', '/usr/bin/python3.5')
with open(kernel_path, 'w') as f:
f.write(text)
local('touch /tmp/{}/kernel_var.json'.format(args.cluster_name))
local(
"PYJ=`find /opt/{0}/spark/ -name '*py4j*.zip' | tr '\\n' ':' | sed 's|:$||g'`; cat {1} | sed 's|PY4J|'$PYJ'|g' | sed \'/PYTHONPATH\"\:/s|\(.*\)\"|\\1/home/{2}/caffe/python:/home/{2}/pytorch/build:\"|\' > /tmp/{0}/kernel_var.json".
format(args.cluster_name, kernel_path, args.os_user))
local('sudo mv /tmp/{}/kernel_var.json '.format(args.cluster_name) + kernel_path)
if __name__ == "__main__":
if args.dry_run == 'true':
parser.print_help()
else:
dataengine_dir_prepare('/opt/{}/'.format(args.cluster_name))
install_dataengine_spark(args.cluster_name, spark_link, spark_version, hadoop_version, cluster_dir, args.os_user,
args.datalake_enabled)
configure_dataengine_spark(args.cluster_name, local_jars_dir, cluster_dir, args.region, args.datalake_enabled)
pyspark_kernel(args)
toree_kernel(args)
if args.r_enabled == 'true':
r_kernel(args)
|
LumPenPacK/NetworkExtractionFromImages
|
win_build/nefi2_win_amd64_msvc_2015/site-packages/networkx/algorithms/approximation/tests/test_dominating_set.py
|
Python
|
bsd-2-clause
| 2,410
| 0
|
#!/usr/bin/env python
from nose.tools import ok_
from nose.tools import eq_
import networkx as nx
from networkx.algorithms.approximation import min_weighted_dominating_set
from networkx.algorithms.approximation import min_edge_dominating_set
class TestMinWeightDominatingSet:
def test_min_weighted_dominating_set(self):
graph = nx.Graph()
graph.add_edge(1, 2)
graph.add_edge(1, 5)
graph.add_edge(2, 3)
graph.add_edge(2, 5)
graph.add_edge(3, 4)
graph.add_edge(3, 6)
graph.add_edge(5, 6)
vertices = set([1, 2, 3, 4, 5, 6])
# due to ties, this might be hard to test tight bounds
dom_set = min_weighted_dominating_set(graph)
for vertex in vertices - dom_set:
neighbors = set(graph.neighbors(vertex))
ok_(len(neighbors & dom_set) > 0, "Non dominating set found!")
def test_star_graph(self):
"""Tests that an approximate dominating set for the star graph,
even when the center node does not have the smallest integer
label, gives just the center node.
For more information, see #1527.
"""
# Create a star graph in which the center node has the highest
# label instead of the lowest.
G = nx.star_graph(10)
G = nx.relabel_nodes(G, {
|
0: 9, 9: 0})
eq_(min_weighted_dominating_set(G), {9})
def test_min_edge_dominating_set(self):
graph = nx.path_graph(5)
dom_set = min_edge_dominating_set(graph)
# this is a crappy way to test, but good enough for now.
for edge in graph.edges_iter():
if edge in dom_set:
continue
else:
u, v = edge
found = False
fo
|
r dom_edge in dom_set:
found |= u == dom_edge[0] or u == dom_edge[1]
ok_(found, "Non adjacent edge found!")
graph = nx.complete_graph(10)
dom_set = min_edge_dominating_set(graph)
# this is a crappy way to test, but good enough for now.
for edge in graph.edges_iter():
if edge in dom_set:
continue
else:
u, v = edge
found = False
for dom_edge in dom_set:
found |= u == dom_edge[0] or u == dom_edge[1]
ok_(found, "Non adjacent edge found!")
|
kevcooper/bitcoin
|
test/functional/blockchain.py
|
Python
|
mit
| 5,880
| 0.00068
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPCs related to blockchainstate.
Test the following RPCs:
- gettxoutsetinfo
- getdifficulty
- getbestblockhash
- getblockhash
- getblockheader
- getchaintxstats
- getnetworkhashps
- verifychain
Tests correspond to code in rpc/blockchain.cpp.
"""
from decimal import Decimal
import http.client
import subprocess
from test_framework.test_framework import (BitcoinTestFramework, BITCOIND_PROC_WAIT_TIMEOUT)
from test_framework.util import (
assert_equal,
assert_raises,
assert_raises_jsonrpc,
assert_is_hex_string,
assert_is_hash_string,
)
class BlockchainTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = False
self.num_nodes = 1
self.extra_args = [['-stopatheight=207']]
def run_test(self):
self._test_getchaintxstats()
self._test_gettxoutsetinfo()
self._test_getblockheader()
self._test_getdifficulty()
self._test_getnetworkhashps()
self._test_stopatheight()
assert self.nodes[0].verifychain(4, 0)
def _test_getchaintxstats(self):
chaintxstats = self.nodes[0].getchaintxstats(1)
# 200 txs plus genesis tx
assert_equal(chaintxstats['txcount'], 201)
# tx rate should be 1 per 10 minutes, or 1/600
# we have to round because of binary math
assert_equal(round(chaintxstats['txrate'] * 600, 10), Decimal(1))
def _test_gettxoutsetinfo(self):
node = self.nodes[0]
res = node.gettxoutsetinfo()
assert_equal(res['total_amount'], Decimal('8725.00000000'))
assert_equal(res['transactions'], 200)
assert_equal(res['height'], 200)
assert_equal(res['txouts'], 200)
assert_equal(res['bogosize'], 17000),
assert_equal(res['bestblock'], node.getblockhash(200))
size = res['disk_size']
assert size > 6400
assert size < 64000
assert_equal(len(res['bestblock']), 64)
assert_equal(len(res['hash_serialized_2']), 64)
self.log.info("Test that gettxoutsetinfo() works for blockchain with just the genesis block")
b1hash = node.getblockhash(1)
node.invalidateblock(b1hash)
res2 = node.gettxoutsetinfo()
assert_equal(res2['transactions'], 0)
assert_equal(res2['total_amount'], Decimal('0'))
assert_equal(res2['height'], 0)
assert_equal(res2['txouts'], 0)
assert_equal(res2['bogosize'], 0),
assert_equal(res2['bestblock'], node.getblockhash(0))
assert_equal(len(res2['hash_serialized_2']), 64)
self.log.info("Test that gettxoutsetinfo() returns the same result after invalidate/reconsider block")
node.reconsiderblock(b1hash)
res3 = node.gettxoutsetinfo()
assert_equal(res['total_amount'], res3['total_amount'])
assert_equal(res['transactions'], res3['transactions'])
assert_equal(res['height'], res3['height'])
assert_equal(res['txouts'], res3['txouts'])
assert_equal(res['bogosize'], res3['bogosize'])
assert_equal(res['bestblock'], res3['bestblock'])
assert_equal(res['hash_serialized_2'], res3['hash_serialized_2'])
def _test_getblockheader(self):
node = self.nodes[0]
assert_raises_jsonrpc(-5, "Block not found",
node.getblockheader, "nonsense")
besthash = node.getbestblockhash()
secondbesthas
|
h = node.getblockhash(199)
header = node.getblockheader(besthash)
assert_equal(header['hash'], besthash)
assert_equal(header['height'], 200)
assert_equal(header['confirmations'], 1)
assert_equal(header['previousblockhash'],
|
secondbesthash)
assert_is_hex_string(header['chainwork'])
assert_is_hash_string(header['hash'])
assert_is_hash_string(header['previousblockhash'])
assert_is_hash_string(header['merkleroot'])
assert_is_hash_string(header['bits'], length=None)
assert isinstance(header['time'], int)
assert isinstance(header['mediantime'], int)
assert isinstance(header['nonce'], int)
assert isinstance(header['version'], int)
assert isinstance(int(header['versionHex'], 16), int)
assert isinstance(header['difficulty'], Decimal)
def _test_getdifficulty(self):
difficulty = self.nodes[0].getdifficulty()
# 1 hash in 2 should be valid, so difficulty should be 1/2**31
# binary => decimal => binary math is why we do this check
assert abs(difficulty * 2**31 - 1) < 0.0001
def _test_getnetworkhashps(self):
hashes_per_second = self.nodes[0].getnetworkhashps()
# This should be 2 hashes every 10 minutes or 1/300
assert abs(hashes_per_second * 300 - 1) < 0.0001
def _test_stopatheight(self):
assert_equal(self.nodes[0].getblockcount(), 200)
self.nodes[0].generate(6)
assert_equal(self.nodes[0].getblockcount(), 206)
self.log.debug('Node should not stop at this height')
assert_raises(subprocess.TimeoutExpired, lambda: self.nodes[0].process.wait(timeout=3))
try:
self.nodes[0].generate(1)
except (ConnectionError, http.client.BadStatusLine):
pass # The node already shut down before response
self.log.debug('Node should stop at this height...')
self.nodes[0].process.wait(timeout=BITCOIND_PROC_WAIT_TIMEOUT)
self.nodes[0] = self.start_node(0, self.options.tmpdir)
assert_equal(self.nodes[0].getblockcount(), 207)
if __name__ == '__main__':
BlockchainTest().main()
|
NewpTone/stacklab-cinder
|
cinder/tests/fake_flags.py
|
Python
|
apache-2.0
| 1,721
| 0
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder import flags
FLAGS = flags.FLAGS
flags.DECLARE('iscsi_num_targets', 'cinder.volume.driver')
flags.DECLARE('policy_file', 'cinder.policy')
flags.DECLARE('volume_driver', 'cinder.volume.manager')
flags.DECLARE('xiv_proxy', 'cinder.volume.xiv')
def_vol_type = 'fake_vol_type'
def set_defaults(conf):
conf.set_default('default_volume_type', def_vol_type)
conf.set_default('volume_driver', 'cinder.volume.driver.FakeISCSI
|
Driver')
conf.set_default('connection_type', 'fake')
conf.set_default('fake_rabbit', True)
conf.set_default('rpc_backend', 'cinder.openstack.common.rpc.impl_fake')
conf.set_default('iscsi_num_targets', 8)
conf.set_default('verbose', Tru
|
e)
conf.set_default('sql_connection', "sqlite://")
conf.set_default('sqlite_synchronous', False)
conf.set_default('policy_file', 'cinder/tests/policy.json')
conf.set_default('xiv_proxy', 'cinder.tests.test_xiv.XIVFakeProxyDriver')
|
BenHewins/influxdb-python
|
influxdb/tests/server_tests/base.py
|
Python
|
mit
| 2,621
| 0
|
# -*- coding: utf-8 -*-
"""Define the base module for server test."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
from influxdb.tests import using_pypy
from influxdb.tests.server_tests.influxdb_instance import InfluxDbInstance
from influxdb.client import InfluxDBClient
if not using_pypy:
from influxd
|
b.dataframe_client import DataFrameClient
def _setup_influxdb_server(inst):
inst.influxd_inst = InfluxDbInstance(
inst.influxdb_template_conf,
udp_enabled=getattr(inst, 'influxdb_udp_enabled', False),
)
inst.cli = InfluxDBClient('localhost',
inst.influxd_inst.http_port,
'root',
'',
database='db')
|
if not using_pypy:
inst.cliDF = DataFrameClient('localhost',
inst.influxd_inst.http_port,
'root',
'',
database='db')
def _teardown_influxdb_server(inst):
remove_tree = sys.exc_info() == (None, None, None)
inst.influxd_inst.close(remove_tree=remove_tree)
class SingleTestCaseWithServerMixin(object):
"""Define the single testcase with server mixin.
A mixin for unittest.TestCase to start an influxdb server instance
in a temporary directory **for each test function/case**
"""
# 'influxdb_template_conf' attribute must be set
# on the TestCase class or instance.
setUp = _setup_influxdb_server
tearDown = _teardown_influxdb_server
class ManyTestCasesWithServerMixin(object):
"""Define the many testcase with server mixin.
Same as the SingleTestCaseWithServerMixin but this module creates
a single instance for the whole class. Also pre-creates a fresh
database: 'db'.
"""
# 'influxdb_template_conf' attribute must be set on the class itself !
@classmethod
def setUpClass(cls):
"""Set up an instance of the ManyTestCasesWithServerMixin."""
_setup_influxdb_server(cls)
def setUp(self):
"""Set up an instance of the ManyTestCasesWithServerMixin."""
self.cli.create_database('db')
@classmethod
def tearDownClass(cls):
"""Deconstruct an instance of ManyTestCasesWithServerMixin."""
_teardown_influxdb_server(cls)
def tearDown(self):
"""Deconstruct an instance of ManyTestCasesWithServerMixin."""
self.cli.drop_database('db')
|
facetothefate/contrail-controller
|
src/analytics/test/utils/mockredis/mockredis/mockredis.py
|
Python
|
apache-2.0
| 5,634
| 0.002662
|
#!/usr/bin/env python
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
#
# mockredis
#
# This module helps start and stop redis instances for unit-testing
# redis must be pre-installed for this to work
#
import os
import signal
import subprocess
import logging
import socket
import time
import redis
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
redis_ver = '2.6.13'
redis_bdir = '/tmp/cache/' + os.environ['USER'] + '/systemless_test'
redis_url = redis_bdir + '/redis-'+redis_ver+'.tar.gz'
redis_exe = redis_bdir + '/bin/redis-server'
def install_redis():
if not os.path.exists(redis_url):
process = subprocess.Popen(['wget', '-P', redis_bdir,
'https://redis.googlecode.com/files/redis-'\
+ redis_ver + '.tar.gz'],
cwd=redis_bdir)
process.wait()
if process.returncode is not 0:
raise SystemError('wget '+redis_url)
if not os.path.exists(redis_bdir + '/redis-'+redis_ver):
process = subprocess.Popen(['tar', 'xzvf', redis_url],
cwd=redis_bdir)
process.wait()
if process.returncode is not 0:
raise SystemError('untar '+redis_url)
if not os.path.exists(redis_exe):
process = subprocess.Popen(['make', 'PREFIX=' + redis_bdir, 'install'],
cwd=redis_bdir + '/redis-'+redis_ver)
process.wait()
if process.returncode is not 0:
raise SystemError('install '+redis_url)
def get_redis_path():
if not os.path.exists(redis_exe):
install_redis()
return redis_exe
def redis_version():
'''
Determine redis-server version
'''
return 2.6
'''
command = "redis-server --version"
logging.info('redis_version call 1')
process = subprocess.Popen(command.split(' '), stdout=subprocess.PIPE)
logging.info('redis_version call 2')
output, _ = process.communicate()
if "v=2.6" in output[0]:
return 2.6
else:
return 2.4
'''
def start_redis(port, password=None):
'''
Client uses this function to start an instance of redis
Arguments:
cport : An unused TCP port for redis to use as the client port
'''
exe = get_redis_path()
version = redis_version()
if version == 2.6:
redis_conf = "redis.26.conf"
else:
redis_conf = "redis.24.conf"
conftemplate = os.path.dirname(os.path.abspath(__file__)) + "/" +\
redis_conf
redisbase = "/tmp/redis.%s.%d/" % (os.getenv('USER', 'None'), port)
output, _ = call_command_("rm -rf " + redisbase)
output, _ = call_command_("mkdir " + redisbase)
output, _ = call_command_("mkdir " + redisbase + "cache")
logging.info('Redis Port %d' % port)
output, _ = call_command_("cp " + conftemplate + " " + redisbase +
redis_conf)
replace_string_(redisbase + redis_conf,
[("/var/run/redis_6379.pid", redisbase + "pid"),
("port 6379", "port " + str(port)),
("/var/log/redis_6379.log", redisbase + "log"),
("/var/lib/redis/6379", redisbase + "cache")])
if password:
replace_string_(redisbase + redis_conf,[("# requirepass foobared","requirepass " + password)])
command = exe + " " + redisbase + redis_conf
subprocess.Popen(command.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
r = redis.StrictRedis(host='localhost', port=port, db=0, password=password)
done = False
start_wait = os.getenv('CONTRIAL_ANALYTICS_TEST_MAX_START_WAIT_TIME', 15)
cnt = 0
while not done:
try:
r.ping()
except:
cnt += 1
if cnt > start_wait:
logging.info('Redis Failed. Logs below: ')
with open(redisbase + "log", 'r') as fin:
logging.info(fin.read())
return False
logging.info('Redis not ready')
ti
|
me.sleep(1)
else:
done = True
logging.info('Redis ready')
return True
def stop_redis(port, password=None):
'''
Client uses this function to stop an instance of redis
This will only work for redis instances that were started by this module
Arguments:
cport : The Client Port for the instance of redis to be stopped
'''
r = redis.Strict
|
Redis(host='localhost', port=port, db=0, password=password)
r.shutdown()
del r
redisbase = "/tmp/redis.%s.%d/" % (os.getenv('USER', 'None'), port)
output, _ = call_command_("rm -rf " + redisbase)
def replace_string_(filePath, findreplace):
"replaces all findStr by repStr in file filePath"
print filePath
tempName = filePath + '~~~'
input = open(filePath)
output = open(tempName, 'w')
s = input.read()
for couple in findreplace:
outtext = s.replace(couple[0], couple[1])
s = outtext
output.write(outtext)
output.close()
input.close()
os.rename(tempName, filePath)
def call_command_(command):
process = subprocess.Popen(command.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return process.communicate()
if __name__ == "__main__":
cs = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
cs.bind(("", 0))
cport = cs.getsockname()[1]
cs.close()
start_redis(cport)
|
githubfun/lphw
|
gothonweb/bin/gothon_app.py
|
Python
|
mit
| 1,131
| 0.005305
|
import web
from gothonweb import map
urls = (
'/game', 'GameEngine',
'/', 'Index',
)
app = web.application(urls, globals())
#little hack so that debug mode works with sessions
if web.config.get('_session') is None:
store = web.session.DiskStore('sessions')
session = web.session.Session(app, store,
initializer={'room':None})
web.config._session = session
else:
session = web.config._session
render = web.template.render('templates/', base="layout")
class Index(object):
def GET(self):
# this is used to "setup" the session with starting values
session.room = map.START
web.seeother("/game")
class GameEngine(object):
def GET(self):
if session.room:
return render.show_room(room=session.room)
# else:
# # why is there here? do you need it?
# return render.you_died()
def POST(self):
form = web.input(action=None)
|
if session.room:
|
session.room = session.room.go(form.action)
web.seeother("/game")
if __name__ == "__main__":
app.run()
|
benschmaus/catapult
|
dashboard/dashboard/dump_graph_json.py
|
Python
|
bsd-3-clause
| 6,621
| 0.005437
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides a web interface for dumping graph data as JSON.
This is meant to be used with /load_from_prod in order to easily grab
data for a graph to a local server for testing.
"""
import base64
import json
from google.appengine.ext import ndb
from google.appengine.ext.ndb import model
from dashboard.common import request_handler
from dashboard.common import utils
from dashboard.models import anomaly
from dashboard.models import graph_data
_DEFAULT_MAX_POINTS = 500
# This is about the limit we want to return since we fetch many associated
# entities for each anomaly.
_DEFAULT_MAX_ANOMALIES = 30
class DumpGraphJsonHandler(request_handler.RequestHandler):
"""Handler for extracting entities from datastore."""
def get(self):
"""Handles dumping dashboard data."""
if self.request.get('sheriff'):
self._DumpAnomalyDataForSheriff()
elif self.request.get('test_path'):
self._DumpTestData()
else:
self.ReportError('No parameters specified.')
def _DumpTestData(self):
"""Dumps data for the requested test.
Request parameters:
test_path: A single full test path, including master/bot.
num_points: Max number of Row entities (optional).
end_rev: Ending revision number, inclusive (optional).
Outputs:
JSON array of encoded protobuf messages, which encode all of
the datastore entities relating to one test (including Master, Bot,
TestMetadata, Row, Anomaly and Sheriff entities).
"""
test_path = self.request.get('test_path')
num_points = int(self.request.get('num_points', _DEFAULT_MAX_POINTS))
end_rev = self.request.get('end_rev')
test_key = utils.TestKey(test_path)
if not test_key or test_key.kind() != 'TestMetadata':
# Bad test_path passed in.
self.response.out.write(json.dumps([]))
return
# List of datastore entities that will be dumped.
entities = []
entities.extend(self._GetTestAncestors([test_key]))
# Get the Row entities.
q = graph_data.Row.query()
print test_key
q = q.filter(graph_data.Row.parent_test == utils.OldStyleTestKey(test_key))
if end_rev:
q = q.filter(graph_data.Row.revision <= int(end_rev))
q = q.order(-graph_data.Row.revision)
entities += q.fetch(limit=num_points)
# Get the Anomaly and Sheriff entities.
alerts = anomaly.Anomaly.GetAlertsForTest(test_key)
sheriff_keys = {alert.sheriff for alert in alerts}
sheriffs = [sheriff.get() for sheriff in sheriff_keys]
entities += alerts
entities += sheriffs
# Convert the entities to protobuf message strings and output as JSON.
protobuf_strings = map(EntityToBinaryProtobuf, entities)
self.response.out.write(json.dumps(protobuf_strings))
def _
|
DumpAnomalyDataForSheriff(self):
"""Dumps Anomaly data for all sheriffs.
Request parameters:
sheriff: Sheriff name.
num_points: Max number of Row entities (optional).
num_alerts: Ma
|
x number of Anomaly entities (optional).
Outputs:
JSON array of encoded protobuf messages, which encode all of
the datastore entities relating to one test (including Master, Bot,
TestMetadata, Row, Anomaly and Sheriff entities).
"""
sheriff_name = self.request.get('sheriff')
num_points = int(self.request.get('num_points', _DEFAULT_MAX_POINTS))
num_anomalies = int(self.request.get('num_alerts', _DEFAULT_MAX_ANOMALIES))
sheriff = ndb.Key('Sheriff', sheriff_name).get()
if not sheriff:
self.ReportError('Unknown sheriff specified.')
return
anomalies = self._FetchAnomalies(sheriff, num_anomalies)
test_keys = [a.GetTestMetadataKey() for a in anomalies]
# List of datastore entities that will be dumped.
entities = []
entities.extend(self._GetTestAncestors(test_keys))
# Get the Row entities.
entities.extend(self._FetchRowsAsync(test_keys, num_points))
# Add the Anomaly and Sheriff entities.
entities += anomalies
entities.append(sheriff)
# Convert the entities to protobuf message strings and output as JSON.
protobuf_strings = map(EntityToBinaryProtobuf, entities)
self.response.out.write(json.dumps(protobuf_strings))
def _GetTestAncestors(self, test_keys):
"""Gets the TestMetadata, Bot, and Master entities preceding in path."""
entities = []
added_parents = set()
for test_key in test_keys:
if test_key.kind() != 'TestMetadata':
continue
parts = utils.TestPath(test_key).split('/')
for index, _, in enumerate(parts):
test_path = '/'.join(parts[:index + 1])
if test_path in added_parents:
continue
added_parents.add(test_path)
if index == 0:
entities.append(ndb.Key('Master', parts[0]).get())
elif index == 1:
entities.append(ndb.Key('Master', parts[0], 'Bot', parts[1]).get())
else:
entities.append(ndb.Key('TestMetadata', test_path).get())
return [e for e in entities if e is not None]
def _FetchRowsAsync(self, test_keys, num_points):
"""Fetches recent Row asynchronously across all 'test_keys'."""
rows = []
futures = []
for test_key in test_keys:
q = graph_data.Row.query()
q = q.filter(
graph_data.Row.parent_test == utils.OldStyleTestKey(test_key))
q = q.order(-graph_data.Row.revision)
futures.append(q.fetch_async(limit=num_points))
ndb.Future.wait_all(futures)
for future in futures:
rows.extend(future.get_result())
return rows
def _FetchAnomalies(self, sheriff, num_anomalies):
"""Fetches recent anomalies for 'sheriff'."""
q = anomaly.Anomaly.query(
anomaly.Anomaly.sheriff == sheriff.key)
q = q.order(-anomaly.Anomaly.timestamp)
return q.fetch(limit=num_anomalies)
def EntityToBinaryProtobuf(entity):
"""Converts an ndb entity to a protobuf message in binary format."""
# Encode in binary representation of the protocol buffer.
message = ndb.ModelAdapter().entity_to_pb(entity).Encode()
# Base64 encode the data to text format for json.dumps.
return base64.b64encode(message)
def BinaryProtobufToEntity(pb_str):
"""Converts a protobuf message in binary format to an ndb entity.
Args:
pb_str: Binary encoded protocol buffer which is encoded as text.
Returns:
A ndb Entity.
"""
message = model.entity_pb.EntityProto(base64.b64decode(pb_str))
return ndb.ModelAdapter().pb_to_entity(message)
|
cjaymes/pyscap
|
src/scap/model/oval_5/sc/unix/EntityItemEncryptMethodType.py
|
Python
|
gpl-3.0
| 1,167
| 0.001714
|
# Copyright 2016 Casey Jaymes
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PySCAP is distributed in the hope that it will
|
be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PAR
|
TICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PySCAP. If not, see <http://www.gnu.org/licenses/>.
import logging
from scap.model.oval_5.sc.EntityItemType import EntityItemType
logger = logging.getLogger(__name__)
class EntityItemEncryptMethodType(EntityItemType):
MODEL_MAP = {
'elements': [
],
'attributes': {
},
}
def get_value_enum(self):
return [
'DES',
'BSDi',
'MD5',
'Blowfish',
'Sun MD5',
'SHA-256',
'SHA-512',
'',
]
|
ZeitOnline/zeit.content.gallery
|
setup.py
|
Python
|
bsd-3-clause
| 1,113
| 0
|
from setuptools import setup, find_packages
setup(
name='zeit.content.gallery',
version='2.9.2.dev0',
author='gocept, Zeit Online',
author_email='zon-backend@zeit.de',
url='http://www.zeit.de/',
description="vivi Content-Type Portraitbox",
packages=find_packages('src'),
package_dir={'': 'src'},
include_package_data=True,
zip_safe=False,
license='BSD',
namespace_packages=['zeit', 'zeit.content'],
install_requires=[
'cssselect',
'Pillow',
'gocept.form',
'setuptools',
'zeit.cms >= 3.0.dev0',
'zeit.connector>=2.4.0.dev0',
'zeit.imp>=0.15.0.dev0',
'zeit.content.image',
'zeit.push>=1.21.0.dev0',
'zeit.wysiwyg',
'zope.app.appsetup',
'zope.app.testing',
'zope.component',
'zope.formlib',
'zope
|
.interface',
'zope.publisher',
'zope.security',
'zope.testing',
],
entry_points={
'fanstatic.libraries': [
'zeit_content_gallery=zeit.content.gallery.brows
|
er.resources:lib',
],
},
)
|
ioanpocol/superdesk-ntb
|
server/ntb/scanpix/scanpix_datalayer.py
|
Python
|
agpl-3.0
| 13,055
| 0.001762
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014, 2015, 2016 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import arrow
import logging
import re
import requests
import json
from os.path import splitext
from io import BytesIO
from eve.io.base import DataLayer
from eve_elastic.elastic import ElasticCursor
from superdesk.upload import url_for_media
from superdesk.errors import SuperdeskApiError, ProviderError
from superdesk.media.media_operations import process_file_from_stream, decode_metadata
from superdesk.media.renditions import generate_renditions, delete_file_on_error, get_renditions_spec
from superdesk.metadata.item import ITEM_TYPE, CONTENT_TYPE
from superdesk.utc import utcnow, get_date, local_to_utc
import mimetypes
# scanpix preview size to use (if available) for superdesk rendition
# preview sizes are in order of preference, first found is used
REND2PREV = {
'thumbnail': ('generated_jpg', 'thumbnail', 'thumbnail_big'),
'viewImage': ('preview', 'thumbnail_big', 'thumbnail', 'preview_big'),
'baseImage': ('mp4_preview', 'mp4_thumbnail', 'preview_big', 'preview', 'thumbnail_big', 'thumbnail')}
logger = logging.getLogger('ntb:scanpix')
# Default timezone used to convert datetimes from scanpix api results to utc
SCANPIX_TZ = 'Europe/Oslo'
def extract_params(query, names):
if isinstance(names, str):
names = [names]
findall = re.findall(r'([\w]+):\(([-\w\s*]+)\)', query)
params = {name: value for (name, value) in findall if name in names}
for name, value in findall:
query = query.replace('%s:(%s)' % (name, value), '')
query = query.strip()
# escape dashes
for name, value in params.items():
params[name] = value.replace('-', r'\-')
if query:
params['q'] = query
return params
class ScanpixDatalayer(DataLayer):
def set_credentials(self, user, password):
self._user = user
self._password = password
def init_app(self, app):
app.config.setdefault('SCANPIX_SEARCH_URL', 'http://api.scanpix.no/v2')
self._app = app
self._user = None
self._password = None
self._headers = {
'Content-Type': 'application/json',
}
def fetch_file(self, url):
"""Get file stream for given image url.
It will fetch the file using predefined auth token.
:param url: pa image api url
"""
raise NotImplementedError
def find(self, resource, req, lookup):
"""
Called to execute a search against the Scanpix API. It attempts to translate the search request
passed in req to a suitable form for a search request against the API. It parses the response into a
suitable ElasticCursor.
:param resource:
:param req:
:param lookup:
:return:
"""
url = self._app.config['SCANPIX_SEARCH_URL'] + '/search'
data = {
'mainGroup': 'any'
}
if 'query' in req['query']['filtered']:
query = req['query']['filtered']['query']['query_string']['query'] \
.replace('slugline:', 'keywords:') \
.replace('description:', 'caption:')
# Black & White
try:
bw = bool(int(extract_params(query, 'bw')['bw']))
except KeyError:
pass
else:
if bw:
data['saturation'] = {'max': 1}
# Clear Edge
try:
clear_edge = bool(int(extract_params(query, 'clear_edge')['clear_edge']))
except KeyError:
pass
else:
if clear_edge:
data['clearEdge'] = True
text_params = extract_params(query, ('headline', 'keywords', 'caption', 'text'))
# combine all possible text params to use the q field.
data['searchString'] = ' '.join(text_params.values())
try:
ids = extract_params(query, 'id')['id'].split()
except KeyError:
pass
else:
data['refPtrs'] = ids
# subscription
data['subscription'] = 'subscription' # this is requested as a default value
# data['subscription'] is always equal to 'subscription', but we keep the test in case
# of the behaviour is changed again in the future.
if 'ntbtema' in resource and data['subscription'] ==
|
'subscription':
# small hack for SDNTB-250
data['subscription'] = 'punchcard'
for criterion in req.get('post_filter', {}).get('and', {}):
if 'range' in cr
|
iterion:
start = None
end = None
filter_data = criterion.get('range', {})
if 'firstcreated' in filter_data:
created = criterion['range']['firstcreated']
if 'gte' in created:
start = created['gte'][0:10]
if 'lte' in created:
end = created['lte'][0:10]
# if there is a special start and no end it's one of the date buttons
if start and not end:
if start == 'now-24H':
data['timeLimit'] = 'last24'
if start == 'now-1w':
data['timeLimit'] = 'lastweek'
if start == 'now-1M':
data['timeLimit'] = 'lastmonth'
elif start or end:
data['archived'] = {
'min': '',
'max': ''
}
if start:
data['archived']['min'] = start
if end:
data['archived']['max'] = end
if 'terms' in criterion:
if 'type' in criterion.get('terms', {}):
type_ = criterion['terms']['type']
if type_ == CONTENT_TYPE.VIDEO:
data['mainGroup'] = 'video'
offset, limit = int(req.get('from', '0')), max(10, int(req.get('size', '25')))
data['offset'] = offset
data['showNumResults'] = limit
r = self._request(url, data, resource)
hits = self._parse_hits(r.json())
return ElasticCursor(docs=hits['docs'], hits={'hits': hits})
def _request(self, url, data, resource):
"""Perform GET request to given url.
It adds predefined headers and auth token if available.
:param url
:param data
"""
r = requests.post(url, data=json.dumps(data), headers=self._headers, auth=(self._user, self._password))
if r.status_code < 200 or r.status_code >= 300:
logger.error('error fetching url=%s status=%s content=%s' % (url, r.status_code, r.content or ''))
raise ProviderError.externalProviderError("Scanpix request can't be performed", provider={'name': resource})
return r
def _parse_doc(self, doc):
new_doc = {}
new_doc['_id'] = doc['refPtr']
new_doc['guid'] = doc['refPtr']
try:
new_doc['description_text'] = doc['caption']
except KeyError:
pass
try:
new_doc['headline'] = doc['headline']
except KeyError:
pass
try:
new_doc['original_source'] = new_doc['source'] = doc['credit']
except KeyError:
pass
new_doc['versioncreated'] = new_doc['firstcreated'] = self._datetime(
local_to_utc(SCANPIX_TZ, get_date(doc['archivedTime']))
)
new_doc['pubstatus'] = 'usable'
# This must match the action
new_doc['_type'] = 'externalsource'
# entry that the client can use to identify the fetch endpoint
new_doc['fetch_endpoint'] = 'scanpix'
# mimetype is not directly found in Scanpix API
|
Clinical-Genomics/scout
|
scout/commands/base.py
|
Python
|
bsd-3-clause
| 4,404
| 0.001589
|
"""Code for CLI base"""
import logging
import pathlib
import click
import coloredlogs
import yaml
from flask.cli import FlaskGroup, with_appcontext
# General, logging
from scout import __version__
from scout.commands.convert import convert
from scout.commands.delete import delete
from scout.commands.download import download as download_command
from scout.commands.export import export
from scout.commands.index_command import index as index_command
# Commands
from scout.commands.load import load as load_command
from scout.commands.serve import serve
from scout.commands.setup import setup as setup_command
from scout.commands.update import update as update_command
from scout.commands.view import view as view_command
from scout.commands.wipe_database import wipe
from scout.server.app import create_app
LOG_LEVELS = ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]
LOG = logging.getLogger(__name__)
@click.pass_context
def loglevel(ctx):
"""Set app cli log level"""
log_level = ctx
|
.find_root().params.get("loglevel")
log_format = None
coloredlogs.install(level=log_level, fmt=log_format)
LOG.info("Running scout version %s
|
", __version__)
LOG.debug("Debug logging enabled.")
@click.pass_context
def get_app(ctx=None):
"""Create an app with the correct config or with default app params"""
loglevel() # Set up log level even before creating the app object
# store provided params into a options variable
options = ctx.find_root()
cli_config = {}
# if a .yaml config file was provided use its params to intiate the app
if options.params.get("config"):
with open(options.params["config"], "r") as in_handle:
cli_config = yaml.load(in_handle, Loader=yaml.SafeLoader)
flask_conf = None
if options.params.get("flask_config"):
flask_conf = pathlib.Path(options.params["flask_config"]).absolute()
if options.params.get("demo"):
cli_config["demo"] = "scout-demo"
try:
app = create_app(
config=dict(
MONGO_DBNAME=options.params.get("mongodb")
or cli_config.get("demo")
or cli_config.get("mongodb")
or "scout",
MONGO_HOST=options.params.get("host") or cli_config.get("host"),
MONGO_PORT=options.params.get("port") or cli_config.get("port"),
MONGO_USERNAME=options.params.get("username") or cli_config.get("username"),
MONGO_PASSWORD=options.params.get("password") or cli_config.get("password"),
MONGO_URI=options.params.get("mongo_uri") or cli_config.get("mongo_uri"),
OMIM_API_KEY=cli_config.get("omim_api_key"),
),
config_file=flask_conf,
)
except SyntaxError as err:
LOG.error(err)
raise click.Abort
return app
@click.version_option(__version__)
@click.group(
cls=FlaskGroup,
create_app=get_app,
invoke_without_command=True,
add_default_commands=False,
add_version_option=False,
)
@click.option(
"-c",
"--config",
type=click.Path(exists=True),
help="Path to a YAML config file with database info.",
)
@click.option(
"--loglevel",
default="DEBUG",
type=click.Choice(LOG_LEVELS),
help="Set the level of log output.",
show_default=True,
)
@click.option("--demo", is_flag=True, help="If the demo database should be used")
@click.option("-db", "--mongodb", help="Name of mongo database [scout]")
@click.option("-uri", "--mongo-uri", help="MongoDB connection string")
@click.option("-u", "--username")
@click.option("-p", "--password")
@click.option("-a", "--authdb", help="database to use for authentication")
@click.option("-port", "--port", help="Specify on what port to listen for the mongod")
@click.option("-h", "--host", help="Specify the host for the mongo database.")
@click.option(
"-f",
"--flask-config",
type=click.Path(exists=True),
help="Path to a PYTHON config file",
)
@with_appcontext
def cli(**_):
"""scout: manage interactions with a scout instance."""
cli.add_command(load_command)
cli.add_command(wipe)
cli.add_command(setup_command)
cli.add_command(delete)
cli.add_command(export)
cli.add_command(convert)
cli.add_command(index_command)
cli.add_command(view_command)
cli.add_command(update_command)
cli.add_command(download_command)
cli.add_command(serve)
|
enoex/kafka-python
|
setup.py
|
Python
|
apache-2.0
| 404
| 0
|
from distutils.core import setup
setup(
name="kafka-python",
version=
|
"0.1-alpha",
author="David Arthur",
au
|
thor_email="mumrah@gmail.com",
url="https://github.com/mumrah/kafka-python",
packages=["kafka"],
license="Copyright 2012, David Arthur under Apache License, v2.0",
description="Pure Python client for Apache Kafka",
long_description=open("README.md").read(),
)
|
InTraffic/TSTK
|
TSTK/dispatcher.py
|
Python
|
gpl-3.0
| 20,467
| 0.004886
|
import logging
import signal
import socket
import configparser
import importlib.machinery
import serial
import copy
import zmq
class Dispatcher(object):
""" Superclass for all Dispatchers.
This is the part of the simulator that handles the connections.
"""
def __init__(self, dispatcher_type, dispatcher_id):
self.name = name
self.dispatcher_id = dispatcher_id
self.call_backs = {}
self.go_on = True
logger = logging.getLogger('{0}_simulator'
.format(dispatcher_type))
logger.setLevel(logging.INFO)
logfile = '/tmp/test.log'
filehandler = logging.FileHandler(logfile)
filehandler.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
filehandler.setFormatter(formatter)
logger.addHandler(filehandler)
self.logger = logger
self.context = zmq.Context(1)
def control_c_handler():
"""Controlled shutdown so we can cleanup."""
self.go_on = False
return False
def create_sockets(self, accept_socket):
# Open a socket to listen for commands from the scenario player
address = "tcp://*:{0}".format(self.command_listen_port)
self.logger.info("Command subscription at {0}".format(address))
command_socket = self.context.socket(zmq.SUB)
command_socket.bind(address)
command_socket.setsockopt(zmq.SUBSCRIBE, "")
# Add the sockets to the zmq poller.
self.poller = zmq.Poller()
self.poller.register(accept_socket, zmq.POLLIN)
self.poller.register(command_socket, zmq.POLLIN)
# Register the call backs.
self.call_backs[accept_socket.fileno()] = (accept_socket, self.accept)
self.call_backs[command_socket] = (command_socket,
self.process_player_command)
# Not part of the poller
# Message forwarding link to player
address = "tcp://*:{0}".format(self.message_forward_port)
self.logger.info("Publishing on " + address)
self.repeater_socket = self.context.socket(zmq.PUB)
self.repeater_socket.bind(address)
def accept(self, a_socket):
"""Accept a connection from the system.
"""
system_socket, address = a_socket.accept()
self.logger.info('Connection from ' + str(address))
# Register this socket too so we look for incoming data
self.poller.register(system_socket, zmq.POLLIN)
self.call_backs[system_socket.fileno()] = (
system_socket, self.process_message)
self.system_socket = system_socket
def process_player_command(self, a_socket):
""" Process a command from the scenario player.
"""
# receive the command
command = a_socket.recv_pyobj()
self.logger.info('received command from scenario player: {0}'
.format(type(command)))
self.system_socket.send(self.message.to_message(command))
def process_message(self, a_socket):
""" Receive and forward a message from the system """
self.logger.info( 'Data from the system' )
# We do not know beforehand how big the blob is.
data = a_socket.recv( 2048 )
if data == "" :
# Connection was closed, so unregister and close the socket.
self.poller.unregister(a_socket)
del self.call_backs[a_socket.fileno()]
a_socket.close()
self.system_socket = None
else :
a_message = message.from_message(data)
self.logger.info('Copying data to player')
self.repeater_socket.send_pyobj(a_message)
def run(self):
# Catch any Control-C
signal.signal(signal.SIGINT, self.control_c_handler)
self.create_sockets()
while self.go_on :
# Note that poller uses fileno() as the key for non-zmq sockets.
socks = dict(self.poller.poll(60000)) # Timeout in ms, 1 minute
for socket_key in self.call_backs.copy() :
# Need copy here cause we might modify the call_backs
# while in the call back functions.
if socket_key in socks and socks[socket_key] == zmq.POLLIN:
if socket_key in self.call_backs:
cbp = self.call_backs[socket_key]
function = cbp[1]
function(cbp[0])
self.logger.info("Still alive")
self.run(socks)
self.logger.info("Stopping")
self.context.term()
#------------------------------------------------------------------------------
class TCPDispatcher(Dispatcher):
""" Dispatcher subclass for TCP connections"""
def __init__(self, dispatcher_type, dispatcher_id):
Dispatcher.__init__(self, name, dispatcher_id)
config = configparser.ConfigParser()
config.read('simulator.conf')
dispatcher_section = ('dispatcher-{0}-{1}'
.format(dispatcher_type, dispatcher_id))
if (dispatcher_section) in config.sections():
entries = config[dispatcher_section]
# path to the message class
self.message_path = entries['MessagePath']
if message_path is not None:
loader = importlib.machinery.SourceFileLoader('message',
message_path)
message_module = loader.exec_module('message')
message = message_module.Message()
# address and port to listen on for messages from the system
self.accept_address = entries['AcceptAddress']
self.listen_port = entries['ListenPort']
# port to listen on for commands from the player.
self.command_listen_port = entries['CommandListenPort']
# port to forward messages to the player.
self.message_forward_port = entries['MessageForwardPort']
else:
self.logger.critical('no valid tcp section found in config file')
def create_sockets(self):
""" Create the TCP sockets between the system and the
Scenario player
"""
self.logger.info('Creating sockets for {0} {1}'
.format(self.name, self.dispatcher_id))
# Open a tcp socket to listen for new connections
# from the system.
self.logger.info("Listening on address {0}"
.format(str(self.accept_address)))
self.logger.info("Listening on port {0}".format(str(self.listen_port)))
accept_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
accept_socket.bind((self.accept_address, self.port))
# Only handle one connection at a time.
accept_socket.listen(1)
# Let the superclass finish the creation of the rest of the
# sockets, because it is the same.
Dispatcher.create_sockets(self, accept_socket)
def run(self):
# TCP dispatcher has no extra steps to add to the default loop.
# We will just exit this method.
pass
#------------------------------------------------------------------------------
class SerialDispatcher(Dispatcher):
""" Dispatcher subcla
|
ss for Serial connections"""
SERIAL_PARITY = {'none':serial.PARITY_NONE , 'even':serial.PARITY_EVEN ,
'odd':serial.PARITY_ODD , 'mark':serial.PARITY_MARK ,
'space':serial.PARITY_SPACE}
SERIAL_STOPBITS= {'one':serial.STOPBITS_ONE ,
|
'onePointFive': serial.STOPBITS_ONE_POINT_FIVE,
'two':serial.STOPBITS_TWO }
default_timeout = 60000
def __init__(self, dispatcher_type, dispatcher_id):
Dispatcher.__init__(self, dispatcher_type, dispatcher_id)
self.repeater_socket = None
self.poller = None
|
biomodels/BIOMD0000000045
|
BIOMD0000000045/model.py
|
Python
|
cc0-1.0
| 427
| 0.009368
|
import os
path = os.path.dirname(os.path.realpath(__file__))
sbmlFilePath = os.path.join(path, 'BIOMD0000000045.xml')
with open(sbmlFilePath,'r') as f:
sbmlString = f.read()
def module_exists(module_name):
try:
__import__(module_name)
except Imp
|
ortError:
return False
else:
return True
if module_exists('libsbml'):
import
|
libsbml
sbml = libsbml.readSBMLFromString(sbmlString)
|
luo2chun1lei2/AgileEditor
|
vc/src/ViewLog.py
|
Python
|
gpl-2.0
| 6,715
| 0.010425
|
#-*- coding:utf-8 -*-
'''
显示命令的输出结果。
'''
import threading
from gi.repository import Gtk, Gdk, GObject, GLib, GtkSource, Pango
from VcEventPipe import *
class ViewLog:
'''
显示日志。
1,来了新命令,是否更新当前的日志。
2,命令来了新的日志,并显示后,是否滚动。
'''
# 设定一个栏目的枚举常量。
(
COLUMN_TAG_LINE_NO, # 行号
COLUMN_TAG_NAME, # Tag名字
NUM_COLUMNS) = range(3)
def __init__(self, vc_cmd_grp):
self.vc_cmd_grp = vc_cmd_grp # 当前执行的命令组
self.vc_cmd = None # 当前执行的命令
sw = Gtk.ScrolledWindow()
sw.set_shadow_type(Gtk.ShadowType.ETCHED_IN)
sw.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
editor = GtkSource.View()
editor.set_cursor_visible(True)
editor.set_show_line_numbers(True) # 显示行号
editor.set_auto_indent(True) # 自动缩进
#editor.set_insert_spaces_instead_of_tabs(True) # 用空格代替tab
editor.set_tab_width(4) # tab宽度4
editor.set_highlight_current_line(True) # 高亮度显示当前行
editor.set_editable(False) # 只读
self._ide_set_font(editor, "Ubuntu mono 12") # 设置字体。
src_buffer = self.create_buffer()
editor.set_buffer(src_buffer)
sw.add(editor)
self.view = sw
self.taglistview = editor
VcEventPipe.register_event(VcEventPipe.EVENT_LOG_COMMAND_START, self.sm_start_new_cmd)
VcEventPipe.register_event(VcEventPipe.EVENT_LOG_APPEND_TEXT, self.sm_append_log)
self.set_scrollable(True)
self.set_show_new_cmd_log(True)
def layout(self):
self.taglistview.show()
self.view.show()
def unregister(self):
VcEventPipe.unregister_event(VcEventPipe.EVENT_LOG_COMMAND_START, self.sm_start_new_cmd)
VcEventPipe.unregister_event(VcEventPipe.EVENT_LOG_APPEND_TEXT, self.sm_append_log)
def _ide_set_font(self, widget, str_font_desc):
''' 设置控件的字体
widget Gtk.Widget 控件
str_font_desc String 字体的描述(名字 大小)
'''
font_desc = Pango.FontDescription.from_string(str_font_desc)
widget.modify_font(font_desc)
def create_buffer(self):
# TODO:寻找适合日志输出的语法。
# 支持的语言
# ada awk boo c c-sharp changelog chdr cpp css d def desktop diff
# docbook dot dpatch dtd eiffel erlang forth fortran gap gettext-translation
# gtk-doc gtkrc haddock haskell haskell-literate html idl ini java js latex
# libtool lua m4 makefile msil nemerle objc objective-caml ocl octave pascal
# perl php pkgconfig python r rpmspec ruby scheme sh sql tcl texinfo vala vbnet
# verilog vhdl xml xslt yacc
src_buffer = GtkSource.Buffer()
manager = GtkSource.LanguageManager()
language = manager.get_language("sh") # 设定语法的类型
src_buffer.set_language(language)
src_buffer.set_highlight_syntax(True) # 语法高亮
return src_buffer
def set_scrollable(self, is_scrollable):
# 更新日志后,不再滚动。
self.is_scrollable = is_scrollable
if is_scrollable: # 想滚动
self._scroll_to_end() # 马上滚动到最后
else: #不想滚动
pass # 什么都不用做。
def get_scrollable(self, is_scrollable):
# 查询当前是否滚动显示最新日志内容
return self.is_scrollable
def set_show_new_cmd_log(self, show):
self.is_show_new_cmd_log = show
if show:
# 如果需要显示最新执行的命令日志,则需要更新当前的情况
lastest_cmd = None
for cmd in self.vc_cmd_grp.commands:
if cmd.is_selected and cmd.process > 0:
lastest_cmd = cmd
if lastest_cmd is not None:
self.vc_cmd = lastest_cmd
self.set_log(lastest_cmd)
else:
# 如果不再需要显示最新的命令日志,则什么都不用做
pass
def get_show_new_cmd_log(self):
return self.is_show_new_cmd_log
def sm_start_new_cmd(self, vc_cmd):
# 如果命令不是这个命令组中的,就退出
if vc_cmd not in self.vc_cmd_grp.commands:
return
# 如果不是当前命令,且不需要显示新的命令,则不再接受新的命令输出。
if not self.is_show_new_cmd_log and self.vc_cmd != vc_cmd:
return
self.vc_cmd = vc_cmd
Gdk.threads_add_idle(GLib.PRIORITY_DEFAULT_IDLE, self.clean_log)
def clean_log(self):
''' 将当前的文本清除 '''
print "clean text"
editor = self.taglistview
src_buf = editor.get_buffer()
src_buf.delete(src_buf.get_start_iter(), src_buf.get_end_iter())
def sm_append_log(self, vc_cmd, text):
# 如果命令不是这个命令组中的,就退出
if vc_cmd not in self.vc_cmd_grp.commands:
return
# 如果不是当前命令,且不需要显示新的命令,则不再接受新的命令输出。
if not self.is_show_new_cmd_log and self.vc_cmd != vc_cmd:
return
Gdk.threads_add_idle(GLib.PRIORITY_DEFAULT_IDLE, self.append_log, text)
def append_log(self, text):
# thrd = threading.currentThread()
# print "append text : %s" % ( thrd.getName() )
#print "append " + text,
''' 添加一条信息。'''
editor = self.taglistview
src_buf = editor.get_buffer()
iter_ = src_buf.get_end_iter()
src_buf.insert(iter_, text)
if self.is_scrollable:
self._scroll_to_end()
def set_log(self, vc_cmd):
self.vc_cmd = vc_cmd
self.set_show_new_cmd_log(False)
self.clean_log()
self.append_log(vc_cmd.get_log())
def _scroll_to_end(self):
editor = self.taglistview
src_buf = editor.get_buffer()
iter_ = src_buf.get_end_iter()
# 移动到最后。(TOD
|
O:没有移动到最后)
editor.scroll_to_iter(iter_, 0.25, False
|
, 0.0, 0.5)
|
ParuninPavel/lenta4_hack
|
vkapp/bot/dao/newsDAO.py
|
Python
|
mit
| 1,222
| 0.005728
|
from vkapp.bot.models import Blogger, News, AdminReview, Publication
from .usersDAO import get_or_create_blogger
from datetime import datetime, timedelta, time
def new_news(link, media, uid, pic):
blogger = get_or_create_blogger(uid)
news = News(link=link, blogger=blogger, media=media, pic=pic)
news.save()
return news
def get_news_proposed_today(uid):
today = datetime.now().date()
tomorrow = today + timedelta(1)
today_start =
|
datetime.combine(today, time())
today_end = datetime.combine(tomorrow, time())
news = News.objects.filter(blogger__vk_user__vk_id=uid).filter(date_time__lte=today_end,
da
|
te_time__gte=today_start)
return news
def news_by_blogger(uid):
blogger = get_or_create_blogger(uid)
news = News.objects.filter(blogger=blogger)
return news
def get_news_review_rating(news):
review = AdminReview.objects.filter(news=news)
if len(review)==0:
return 0
else:
return review[0].rating
def is_news_published(news):
published_info = Publication.objects.filter(news=news)
if len(published_info) == 0:
return False
else:
return True
|
chteuchteu/Freebox-OS-munin
|
fields.py
|
Python
|
gpl-2.0
| 4,648
| 0.000215
|
from modes import *
# mode_traffic
field_rate_down = 'rate_down'
field_bw_down = 'bw_down'
field_rate_up = 'rate_up'
field_bw_up = 'bw_up'
# mode_temp
field_cpum = 'cpum'
field_cpub = 'cpub'
field_sw = 'sw'
field_hdd = 'hdd'
# mode_fan_speed
field_fan_speed = 'fan_speed'
# mode_xdsl
field_snr_down = 'snr_down'
field_snr_up = 'snr_up'
# mode_xdsl_errors
field_fec = 'fec'
field_crc = 'crc'
field_hec = 'hec'
field_es = 'es'
field_ses = 'ses'
# mode_switch1
field_rx1 = 'rx_1'
field_tx1 = 'tx_1'
# mode_switch2
field_rx2 = 'rx_2'
field_tx2 = 'tx_2'
# mode_switch3
field_rx3 = 'rx_3'
field_tx3 = 'tx_3'
# mode_switch4
field_rx4 = 'rx_4'
field_tx4 = 'tx_4'
# mode_transmission_tasks
field_nb_tasks_stopped = 'nb_tasks_stopped'
field_nb_tasks_checking = 'nb_tasks_checking'
field_nb_tasks_queued = 'nb_tasks_queued'
field_nb_tasks_extracting = 'nb_tasks_extracting'
field_nb_tasks_done = 'nb_tasks_done'
field_nb_tasks_repairing = 'nb_tasks_repairing'
field_nb_tasks_downloading = 'nb_tasks_downloading'
field_nb_tasks_error = 'nb_tasks_error'
field_nb_tasks_stopping = 'nb_tasks_stopping'
field_nb_tasks_seeding = 'nb_tasks_seeding'
# field_nb_tasks_active = 'nb_tasks_active' # Total active
# nb_tasks = 'nb_tasks' # Total
# mode_transmission_rate
field_rx_throttling = 'throttling_rate.rx_rate'
field_tx_throttling = 'throttling_rate.tx_rate'
field_rx_rate = 'rx_rate'
field_tx_rate = 'tx_rate'
# mode connection
field_bytes_up = 'bytes_up'
field_bytes_down = 'bytes_down'
# mode ftth
field_has_sfp = 'has_sfp'
field_link = 'link'
field_sfp_alim_ok = 'sfp_alim_ok'
field_sfp_has_signal = 'sfp_has_signal'
field_sfp_present = 'sfp_present'
# mode switch-bytes
field_rx_bytes = 'rx_good_bytes'
field_tx_bytes = 'tx_bytes'
# mode switch-packets
field_rx_packets = 'rx_good_packets'
field_tx_packets = 'tx_packets'
field_rx_uni
|
cast_packets = 'rx_unicast_packets'
field_tx_unicast_packets = 'tx_unicast_packets'
field_rx_broadcast_packets = 'rx_broadcast_packets'
field_tx_broadcast_packets = 'tx_broadcast_packets'
# mode wifi-
|
stations
field_stations = 'stations'
# mode wifi-bytes
field_wifi_rx_bytes = 'rx_bytes'
field_wifi_tx_bytes = 'tx_bytes'
fields = {
mode_traffic: [
field_rate_down,
field_bw_down,
field_rate_up,
field_bw_up
],
mode_temp: [
field_cpum,
field_cpub,
field_sw,
field_hdd
],
mode_fan_speed: [
field_fan_speed
],
mode_xdsl: [
field_snr_down,
field_snr_up
],
mode_xdsl_errors: [
field_fec,
field_crc,
field_hec,
field_es,
field_ses
],
mode_switch1: [
field_rx1,
field_tx1
],
mode_switch2: [
field_rx2,
field_tx2
],
mode_switch3: [
field_rx3,
field_tx3
],
mode_switch4: [
field_rx4,
field_tx4
],
mode_switch_bytes: [
field_rx_bytes,
field_tx_bytes,
],
mode_switch_packets: [
field_rx_packets,
field_tx_packets,
field_rx_unicast_packets,
field_tx_unicast_packets,
field_rx_broadcast_packets,
field_tx_broadcast_packets,
],
mode_transmission_tasks: [
field_nb_tasks_stopped,
field_nb_tasks_checking,
field_nb_tasks_queued,
field_nb_tasks_extracting,
field_nb_tasks_done,
field_nb_tasks_repairing,
field_nb_tasks_downloading,
field_nb_tasks_error,
field_nb_tasks_stopping,
field_nb_tasks_seeding
],
mode_transmission_traffic: [
field_rx_throttling,
field_tx_throttling,
field_rx_rate,
field_tx_rate,
],
mode_connection: [
field_bytes_up,
field_bytes_down,
],
mode_connection_log: [
field_bytes_up,
field_bytes_down,
],
mode_ftth: [
field_has_sfp,
field_link,
field_sfp_alim_ok,
field_sfp_has_signal,
field_sfp_present,
],
mode_wifi_stations: [
field_stations,
],
mode_wifi_bytes: [
field_wifi_rx_bytes,
field_wifi_tx_bytes,
],
mode_wifi_bytes_log: [
field_wifi_rx_bytes,
field_wifi_tx_bytes,
],
}
xdsl_errors_fields_descriptions = {
field_fec: 'FEC (Forward Error Connection)',
field_crc: 'CRC (Cyclic Redundancy Check)',
field_hec: 'HEC (Header Error Control)',
field_es: 'ES (Errored Seconds)',
field_ses: 'SES (Severely Errored Seconds)'
}
def get_fields(mode):
if mode not in fields:
print('Unknown mode {}'.format(mode))
return fields[mode]
|
Rinoahu/debias
|
lib/debias.py
|
Python
|
gpl-3.0
| 49,496
| 0.022204
|
#!/usr/bin/
"""
"""
from __future__ import print_function
from __future__ import division
import networkx as nx
import sys
from networkx.algorithms import bipartite
from operator import itemgetter
import matplotlib.pyplot as plt
import argparse
import pickle as cp
import math
import numpy as np
from numpy import percentile
import collections
import Bio
from Bio.UniProt import GOA
from Bio.Seq import Seq
from dateutil import parser
import os
import xlsxwriter
DATADIR = "data/"
# Some filenames
FILE_ALTERNATE_ID_TO_ID_MAPPING = DATADIR+"alt_to_id.graph"
FILE_CAFA_ID_TO_UNIPROT_ID_MAP = DATADIR+"CAFAIDTOUniprotIDMap.txt"
FILE_MFO_ONTOLOGY_GRAPH = DATADIR+"mf.graph"
FILE_BPO_ONTOLOGY_GRAPH = DATADIR+ "bp.graph"
FILE_CCO_ONTOLOGY_GRAPH = DATADIR+"cc.graph"
FILE_MFO_ONTOLOGY_ANCESTORS_GRAPH = DATADIR+"mf_ancestors.map"
FILE_BPO_ONTOLOGY_ANCESTORS_GRAPH = DATADIR+"bp_ancestors.map"
FILE_CCO_ONTOLOGY_ANCESTORS_GRAPH = DATADIR+ "cc_ancestors.map"
verbose=0
options=""
report=0
GAF21FIELDS = [
'DB',
'DB_Object_ID',
'DB_Object_Symbol',
'Qualifier',
'GO_ID',
'DB:Reference',
'Evidence',
'With',
'Aspect',
'DB_Object_Name',
'Synonym',
'DB_Object_Type',
'Taxon_ID',
'Date',
'Assigned_By',
'Annotation_Extension',
'Gene_Product_Form_ID'
]
EXPEC = [
"EXP",
"IDA",
"IPI",
"IMP",
"IGI",
"IEP"
]
COMPEC = [
"ISS",
"ISO",
"ISA",
"ISM",
"IGC",
"IBA",
"IBD",
"IKR",
"IRD",
"RCA"
]
AUTHEC = [
"TAS",
"NAS"
]
CUREC = [
"IC",
"ND"
]
IEA = ["IEA"]
def column( matrix, i ):
f = itemgetter( i )
return map( f, matrix )
def chooseProteinsBasedOnPublications( data, cutoff_prot, cutoff_attn ):
"""
This function will read the set of proteins and will choose only those proteins which have been probed in those
|
publications which
deal with less than -d <number> of proteins
This program creates a bipartite graph with one set as the GO terms and the other set as the references and cross links them with the
GO_TERMS as weights to the edges.
This function can be used to select a cut off based on number of annotations by a particular reference or even number of proteins
a
|
nnotated by a reference. It is recommended that the protein cut-off, i.e. -cprot, be used instead of the annotations cutoff. Since
it is relevant for a reference to provide more annotations to fewer proteins than to work with a lot of proteins.
"""
mapping = []
for attnid in data:
per_annotation = data[attnid]
go = per_annotation['GO_ID'] # Extracting the Gene Ontology
protein = per_annotation['DB'] + "_" + per_annotation['DB_Object_ID']
ref = per_annotation['DB:Reference'] # The reference
mapping.append( [protein, ref, go, attnid] ) # Appending the annotation id for later identification
g = nx.MultiGraph()
g.add_nodes_from( column( mapping, 0 ), bipartite = 0 )
g.add_nodes_from( column( mapping, 1 ), bipartite = 1 )
for triplet in mapping:
g.add_edge( triplet[0], triplet[1], weight = triplet[2] + "_" + triplet[3] )
simple_g = nx.Graph( g ) # Converting the multi graph to a simple graph without parallel edges
no_of_prot_annotations_by_each_ref=[]
for ref in list( set( column( mapping, 1 ) ) ):
no_of_prot_annotations_by_each_ref.append(simple_g.degree(ref))
if cutoff_attn==None:
graph=simple_g
threshold=int(cutoff_prot)
elif cutoff_prot==None:
graph=g
threshold=int(cutoff_attn)
list_of_chosen_attn = []
# Looping through each GO term and selecting those for which there is at least one reference which probes fewer proteins than threshold
for protein in list( set( column( mapping, 0 ) ) ):
references = g.neighbors( protein )
for ref in references:
# Condition for inclusion
if graph.degree( ref ) <= threshold:
for key in g.get_edge_data( protein, ref ):
weight = g.get_edge_data( protein, ref )[key]
list_of_chosen_attn.append( weight['weight'].split( "_" )[1] )
new_data = dict()
for attid in list_of_chosen_attn:
new_data[attid] = data[attid]
return new_data
#
def convertToDictionary( filename ):
"""
This function reads from the input gaf file and converts it to a dictionary. This function is deprecated and will be removed in further releases.
Instead of using this function the program now makes use of the gaf iterator function from biopython.
"""
alt_id_to_id_map = cp.load( open( FILE_ALTERNATE_ID_TO_ID_MAPPING, "rb" ) )
fhr = open( filename, "r" )
data = dict()
counter = 1
for line in fhr:
if "!" not in line:
line = line.split( "\t" )
id = "anntn" + str( counter )
per_annotation = dict()
for f_no, field in enumerate( GAF21FIELDS ):
if field=="GO_ID":
if line[f_no].strip() in alt_id_to_id_map:
#print(line[f_no].strip())
line[f_no]=alt_id_to_id_map[line[f_no].strip()]
per_annotation[field] = line[f_no]
data[id] = per_annotation
counter += 1
"""if(len(data)==10):
break"""
fhr.close()
return data
def convertFromGAFToRequiredFormat(gaf):
"""
This function takes the data input which is created by gaf iterator and then makes few changes
in the annotations which is relevant to this program.
"""
alt_id_to_id_map = cp.load( open( FILE_ALTERNATE_ID_TO_ID_MAPPING, "rb" ) )
counter=1
data=dict()
for annotation in gaf:
id="anntn" + str( counter )
if annotation['GO_ID'] in alt_id_to_id_map:
annotation['GO_ID']=alt_id_to_id_map[annotation['GO_ID']]
annotation['DB:Reference']=annotation['DB:Reference'][0]
annotation['Date']=parser.parse(annotation['Date']).date()
#annotation['Qualifier']='|'.join(annotation['Qualifier'])
#print(annotation['Evidence'])
data[id]=annotation
counter += 1
return data
def writeToFile( data, filename ,input_filename):
"""
This function will write the content of the data structure 'data' to the output file.
It requires the input file to read the header. Inclusion of the header is mandatory.
"""
vprint("Writing to file ",filename)
#print(filename)
filepath="/".join(filename.split("/")[:-1] )
try:
if os.path.isdir(filepath)==False:
os.makedirs(filepath)
except OSError:
print("You do not have sufficient Permissions to create the folder. Please alter the permissions or provide a different path.")
sys.exit()
fhr = open(input_filename,"r")
header=""
for line in fhr:
if line[0]=='!':
header+=line
fhr.close()
fhw = open( filename+".gaf", "w" )
fhw.write(header)
for key in data:
per_annotation = data[key]
per_annotation['Qualifier']='|'.join(per_annotation['Qualifier'])
per_annotation['With']='|'.join(per_annotation['With'])
per_annotation['Synonym']='|'.join(per_annotation['Synonym'])
per_annotation['Taxon_ID']='|'.join(per_annotation['Taxon_ID'])
per_annotation['Date']=''.join(str(per_annotation['Date']).split("-"))
# vprint(per_annotation)
string = ""
for field in GAF21FIELDS:
try:
string += per_annotation[field] + "\t"
except TypeError:
print("Exception has occurred in function writeToFile")
print(per_annotation)
print(field)
print(per_annotation[field])
exit()
|
Yatekii/backdoor
|
register_service.py
|
Python
|
agpl-3.0
| 4,477
| 0.00134
|
import argparse
from models import Service
from models import Base
import helpers
import traceback
import sys
import os
import importlib
import shutil
@helpers.handle_dbsession()
def prepare_service_db(sqlsession, name, desc, models, uses_blueprint):
s = sqlsession.query(Service).filter_by(name=name).first()
if s:
print('Service %s exists yet. Aborting.' % name)
return False
if models:
pass
Base.metadata.create_all(helpers.engine, [m.__table__ for m in models])
s = Service(name=name, uses_blueprint=uses_blueprint)
sqlsession.add(s)
sqlsession.commit()
print('Successfully prepared DB new service %s: %s' % (name, desc))
if models:
print('%s contains the following fields:' % name)
for model in models:
print(str(model.__name__))
else:
print('%s contains no fields.' % name)
return True
def validate_service(path):
if os.path.isdir(path):
# servicename = os.path.basename(path)
if not os.path.isfile(os.path.join(path, '__init__.py')):
print('Service contains no __init__.py.')
return False
# m = importlib.import_module('%s' % servicename, '')
# if m.__uses_blueprint__:
# blueprint = os.path.join(path, 'blueprint')
# if not os.path.isdir(blueprint):
# print('Service contains no blueprint. Please place it in the blueprint dir.')
# return False
# if not os.path.isfile(os.path.join(blueprint, '__init__.py')):
# print('Service blueprint contains no __init__.py.')
# return False
# templates = os.path.join(blueprint, 'templates')
# if not os.path.isdir(templates):
# print('Warning: Service blueprint contains no template dir.')
# elif not os.listdir(templates):
# print('Warning: Service blueprint template dir is empty.')
return True
else:
print('%s is not a directory. Please check your input' % path)
return False
def register_service(path):
print('Importing service from %s.' % path)
if validate_service(path):
servicename = os.path.basename(path)
if os.path.isdir(os.path.join('services/', servicename)):
print('Service could not be imported due to a service using the same name existing yet.')
return False
else:
destination = os.path.join('services/', servicename)
try:
shutil.copytree(path, destination)
except Exception as e:
print(e)
traceback.print_tb(sys.exc_info()[2])
shutil.rmtree(destination)
return False
else:
print('Service is faulty, please consult the errors.')
return False
print('Preparing the DB for service %s' % servicename)
try:
m = importlib.import_module('.%s' % servicename, 'services')
if prepare_service_db(m.__service_name__, m.__description__, m.__models__, m.__uses_blueprint__):
print('Successfully prepared DB for service %s' % servicename)
else:
print('Failed to prepare the DB fro service %s', servicename)
return False
except Exception as e:
print(e)
traceback.print_tb(sys.exc_info()[2])
print('Failed to load service %s due to a faulty module' % servicename)
return False
return True
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Service importer')
parser.add_argument('--path',
metavar='url',
type=str,
nargs='+',
help='Path to the service to import')
args = parser.parse_args()
if not args.pat
|
h or len(args.path) < 1:
print('Please specify at least one service to import')
else:
for p in args.path:
if register_service(p):
print('Successfully registered new service %s' % p)
|
else:
print('Failed to register service %s' % p)
# prepare_service_db('basics', 'Basic services and commands', (
# ('text', 'txt', Type.text, '.', (('2345', 'adsd'), ('2345', 'adsd'), ('2345', 'adsd'))),
# ('int', 'd', Type.int, '', ()),
# ('bool', 'truefalse', Type.bool, '', ())
# ))
|
Hugoo/Prologin
|
2008 - Machine/suite.py
|
Python
|
mit
| 678
| 0.00885
|
import sys
def suite(n,s):
p = -1
fin = ''
c = 0
for i in range(0,n+1):
if i == n:
if s[i-1]==p:
fin = fin+str(c)+str(p)
else:
fin = fin+str(c)+str(p)
p = s[i]
c = 1
break
if p == -1:
p = s[i]
|
c = 1
else:
if s[i]==p:
c = c+1
else:
fin = fin+str(c)+str(p)
p = s[i]
c = 1
print fin
return
if __name__ == '_
|
_main__':
n = int(raw_input())
s = raw_input()
suite(n,s)
|
andreisavu/django-jack
|
jack/beanstalk/forms.py
|
Python
|
apache-2.0
| 291
| 0.006873
|
from django
|
import forms
class PutForm(forms.Form):
body = forms.CharField(widget=forms.Textarea())
tube = forms.CharField(initial='
|
default')
priority = forms.IntegerField(initial=2147483648)
delay = forms.IntegerField(initial=0)
ttr = forms.IntegerField(initial=120)
|
knoguchi/druid
|
docs/_bin/get-milestone-prs.py
|
Python
|
apache-2.0
| 3,551
| 0.005069
|
#!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import requests
import subprocess
import sys
import time
if len(sys.argv) != 5:
sys.stderr.write('usage: program <github-username> <upstream-remote> <previous-release-branch> <current-release-branch>\n')
sys.stderr.write(" e.g., program myusername upstream 0.13.0-incubating 0.14.0-incubating")
sys.stderr.write(" It is also necessary to set a GIT_TOKEN environment variable containing a personal access token.")
sys.exit(1)
github_username = sys.argv[1]
upstream_remote = sys.argv[2]
previous_branch = sys.argv[3]
release_branch = sys.argv[4]
master_branch = "master"
upstream_master = "{}/{}".format(upstream_remote, master_branch)
upstream_previous = "{}/{}".format(upstream_remote, previous_branch)
upstream_release = "{}/{}".format(upstream_remote, release_branch)
command = "git log {}..{} --oneline | tail -1".format(upstream_master, upstream_previous)
# Find the commit where the previous release branch was cut from master
previous_branch_first_commit = subprocess.check_output(command, shell=True).decode('UTF-8')
match_result = re.match("(\w+) .*", previous_branch_first_commit)
previous_branch_first_commit = match_result.group(1)
print("Previous branch: {}, first commit: {}".format(upstream_previous, previous_branch_first_commit))
# Find all commits between that commit and the current release branch
command = "git rev-list {}..{}".format(previous_branch_first_commit, upstream_release)
all_release_commits = subprocess.check_output(command, shell=True).decode('UTF-8')
for commit_id in all_release_commits.splitlines():
try:
# wait 3 seconds between calls to avoid hitting the rate limit
time.sleep(3)
|
search_url = "https://api.github.com/search/issues?q=type:pr+is:merged+is:closed+repo:apache/incubator-druid+SHA:{}"
resp = requests.get(search_url.format(commit_id), auth=(github_username, os.environ["GIT_TOKEN"]))
resp_json = resp.json()
milestone_found = False
closed_pr_nums = []
if (resp_json.get("items") is No
|
ne):
print("Could not get PRs for commit ID {}, resp: {}".format(commit_id, resp_json))
continue
for pr in resp_json["items"]:
closed_pr_nums.append(pr["number"])
milestone = pr["milestone"]
if milestone is not None:
milestone_found = True
print("COMMIT: {}, PR#: {}, MILESTONE: {}".format(commit_id, pr["number"], pr["milestone"]["url"]))
if not milestone_found:
print("NO MILESTONE FOUND FOR COMMIT: {}, CLOSED PRs: {}".format(commit_id, closed_pr_nums))
except Exception as e:
print("Got exception for commitID: {} ex: {}".format(commit_id, e))
continue
|
synthicity/activitysim
|
activitysim/examples/example_estimation/scripts/infer.py
|
Python
|
agpl-3.0
| 28,331
| 0.003565
|
# ActivitySim
# See full license in LICENSE.txt.
import sys
import os
import logging
import yaml
import numpy as np
import pandas as pd
from activitysim.abm.models.util import tour_frequency as tf
from activitysim.core.util import reindex
from activitysim.abm.models.util import canonical_ids as cid
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setFormatter(logging.Formatter('%(levelname)s - %(message)s'))
logger.addHandler(ch)
CONSTANTS = {}
SURVEY_TOUR_ID = 'survey_tour_id'
SURVEY_PARENT_TOUR_ID = 'survey_parent_tour_id'
SURVEY_PARTICIPANT_ID = 'survey_participant_id'
SURVEY_TRIP_ID = 'survey_trip_id'
ASIM_TOUR_ID = 'tour_id'
ASIM_PARENT_TOUR_ID = 'parent_tour_id'
ASIM_TRIP_ID = 'trip_id'
ASIM_PARTICIPANT_ID = 'participant_id'
survey_tables = {
'households': {
'file_name': 'survey_households.csv',
'index': 'household_id'
},
'persons': {
'file_name': 'survey_persons.csv',
'index': 'person_id'
},
'tours': {
'file_name': 'survey_tours.csv'
},
'joint_tour_participants': {
'file_name': 'survey_joint_tour_participants.csv'
},
'trips': {
'file_name': 'survey_trips.csv'
},
}
outputs = {
'households': 'override_households.csv',
'persons': 'override_persons.csv',
'tours': 'override_tours.csv',
'joint_tour_participants': 'override_joint_tour_participants.csv',
'trips': 'override_trips.csv',
}
control_tables = {
'households': {
'file_name': 'final_households.csv',
'index': 'household_id'
},
'persons': {
'file_name': 'final_persons.csv',
'index': 'person_id'
},
'tours': {
'file_name': 'final_tours.csv'
},
'joint_tour_participants': {
'file_name': 'final_joint_tour_participants.csv'
},
'trips': {
'file_name': 'final_trips.csv'
},
}
apply_controls = True
skip_controls = not apply_controls
def mangle_ids(ids):
return ids * 10
def unmangle_ids(ids):
return ids // 10
def infer_cdap_activity(persons, tours, joint_tour_participants):
mandatory_tour_types = ['work', 'school']
non_mandatory_tour_types = ['escort', 'shopping', 'othmaint', 'othdiscr', 'eatout', 'social']
num_mandatory_tours = \
tours[tours.tour_type.isin(mandatory_tour_types)].\
groupby('person_id').size().\
reindex(persons.index).fillna(0).astype(np.int8)
num_non_mandatory_tours = \
tours[tours.tour_type.isin(non_mandatory_tour_types)].\
groupby('person_id').size().\
reindex(persons.index).fillna(0).astype(np.int8)
num_joint_tours = \
joint_tour_participants.\
groupby('person_id').size().\
reindex(persons.index).fillna(0).astype(np.int8)
num_non_mandatory_tours += num_joint_tours
cdap_activity = pd.Series('H', index=persons.index)
cdap_activity = cdap_activity.where(num_mandatory_tours == 0, 'M')
cdap_activity = cdap_activity.where((cdap_activity == 'M') | (num_non_mandatory_tours == 0), 'N')
return cdap_activity
def infer_mandatory_tour_frequency(persons, tours):
num_work_tours = \
tours[tours.tour_type == 'work'].\
groupby('person_id').size().reindex(persons.index).fillna(0).astype(np.int8)
num_school_tours = \
tours[tours.tour_type == 'school'].\
groupby('person_id').size().reindex(persons.index).fillna(0).astype(np.int8)
mtf = {
0: '',
1: 'work1',
2: 'work2',
10: 'school1',
20: 'school2',
11: 'work_and_school'
}
mandatory_tour_frequency = (num_work_tours + num_school_tours*10).map(mtf)
return mandatory_tour_frequency
def infer_non_mandatory_tour_frequency(configs_dir, persons, tours):
def read_alts():
# escort,shopping,othmaint,othdiscr,eatout,social
# 0,0,0,0,0,0
# 0,0,0,1,0,0, ...
alts = \
pd.read_csv(os.path.join(configs_dir, 'non_mandatory_tour_frequency_alternatives.csv'),
comment='#')
alts
|
= alts.astype(np.int8) # - NARROW
return alts
tours = tours[tours.tour_catego
|
ry == 'non_mandatory']
alts = read_alts()
tour_types = list(alts.columns.values)
# tour_frequency is index in alts table
alts['alt_id'] = alts.index
# actual tour counts (may exceed counts envisioned by alts)
unconstrained_tour_counts = pd.DataFrame(index=persons.index)
for tour_type in tour_types:
unconstrained_tour_counts[tour_type] = \
tours[tours.tour_type == tour_type].\
groupby('person_id').size().reindex(persons.index).fillna(0).astype(np.int8)
# unextend tour counts
# activitysim extend tours counts based on a probability table
# counts can only be extended if original count is between 1 and 4
# and tours can only be extended if their count is at the max possible
max_tour_counts = alts[tour_types].max(axis=0)
constrained_tour_counts = pd.DataFrame(index=persons.index)
for tour_type in tour_types:
constrained_tour_counts[tour_type] = unconstrained_tour_counts[tour_type].clip(upper=max_tour_counts[tour_type])
# persons whose tours were constrained who aren't eligible for extension becuase they have > 4 constrained tours
has_constrained_tours = (unconstrained_tour_counts != constrained_tour_counts).any(axis=1)
print("%s persons with constrained tours" % (has_constrained_tours.sum()))
too_many_tours = has_constrained_tours & constrained_tour_counts.sum(axis=1) > 4
if too_many_tours.any():
print("%s persons with too many tours" % (too_many_tours.sum()))
print(constrained_tour_counts[too_many_tours])
# not sure what to do about this. Throw out some tours? let them through?
print("not sure what to do about this. Throw out some tours? let them through?")
assert False
# determine alt id corresponding to constrained_tour_counts
# need to do index waltz because pd.merge doesn't preserve index in this case
alt_id = \
pd.merge(constrained_tour_counts.reset_index(), alts,
left_on=tour_types, right_on=tour_types, how='left').set_index(persons.index.name).alt_id
# did we end up with any tour frequencies not in alts?
if alt_id.isna().any():
bad_tour_frequencies = alt_id.isna()
logger.warning("WARNING Bad joint tour frequencies\n\n")
logger.warning("\nWARNING Bad non_mandatory tour frequencies: num_tours\n%s" %
constrained_tour_counts[bad_tour_frequencies])
logger.warning("\nWARNING Bad non_mandatory tour frequencies: num_tours\n%s" %
tours[tours.person_id.isin(persons.index[bad_tour_frequencies])].sort_values('person_id'))
bug
tf = unconstrained_tour_counts.rename(columns={tour_type: '_%s' % tour_type for tour_type in tour_types})
tf['non_mandatory_tour_frequency'] = alt_id
return tf
def infer_joint_tour_frequency(configs_dir, households, tours):
def read_alts():
# right now this file just contains the start and end hour
alts = \
pd.read_csv(os.path.join(configs_dir, 'joint_tour_frequency_alternatives.csv'),
comment='#', index_col='alt')
alts = alts.astype(np.int8) # - NARROW
return alts
alts = read_alts()
tour_types = list(alts.columns.values)
assert(len(alts.index[(alts == 0).all(axis=1)]) == 1) # should be one zero_tours alt
zero_tours_alt = alts.index[(alts == 0).all(axis=1)].values[0]
alts['joint_tour_frequency'] = alts.index
joint_tours = tours[tours.tour_category == 'joint']
num_tours = pd.DataFrame(index=households.index)
for tour_type in tour_types:
joint_tour_is_tour_type = (joint_tours.tour_type == tour_type)
if joint_tour_is_tour_type.any():
num_tours[tour_type] = \
joint_tours[joint_tour_is_tour_type].\
groupby('household_id').size().\
reindex(households.index).fi
|
samuell/luigi
|
examples/foo.py
|
Python
|
apache-2.0
| 1,501
| 0.000666
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
You can run this example like this:
.. code:: console
$ rm -rf '/tmp/bar'
$ luigi --module examples.foo examples.Foo --workers 2 --local-scheduler
"""
from __future__ import print_function
import time
import luigi
class Foo(luigi.WrapperTask):
task_namespace = 'examples'
def run(self):
print("Running Foo")
def requires(self):
for i in range(10):
|
yield Bar(i)
class Bar(luigi.Task):
task_namespace = 'examples'
num = luigi.IntParameter()
def run(self):
time.sleep(1)
self.output().open('w').close()
def output(self):
"""
Returns the target output for this task.
:return: the target output for this task.
:rtype: obj
|
ect (:py:class:`~luigi.target.Target`)
"""
time.sleep(1)
return luigi.LocalTarget('/tmp/bar/%d' % self.num)
|
dwt2c/Schoogle
|
Schoogle/Schoogle/spiders/O_Spider.py
|
Python
|
gpl-2.0
| 3,037
| 0.029964
|
from __future__ import absolute_import
from scrapy.spiders import Spider
from scrapy.selector import Selector
from scrapy import Request
import sys
from Schoogle.items import O_Item
from sys import getsizeof
from datetime import datetime
import time
import re
mport string
def reduce(text):
return "".join([c for c in text if c in string.letters or c in (" ",)])
#return re.sub('\s+',' ', re.sub(r'([^\s\w]|_)+', '', text))
#@params:
#@html_list: this is list of html in a "List"(aka vector), we replace all of those annoying
# \t and \n's in clunkey html and return a string with the pages entire html contents,
# this object will later be used by postgreql for a full text search.
def prune(html_list):
toreturn = []
for i in html_list:
t = i.encode('ascii','ignore')
t = reduce(t)
if t != '' or ' ':
toreturn.append(t)
return " ".join(toreturn)
class O_Spider(Spider):
name = 'O_Spider'
allowed_domains = ['owu.edu']
start_urls = ['http://www.owu.edu']
# @params
# @response: this is a Scrapy.Response object containing much of the website information
# attibutes of this object will be u
|
sed to flesh out our O_Item object
# @yield(1): this returns a single object each time next( this object ) is called
# first parse yields all items
# @yield(2): this is completed only after we have yielded an object from this webpage, it will
# recursively call
|
parse on all links in a web page
def parse(self,response):
# here we use scrapy's request object to catch all invalid links when parsing our documnet
try:
links = response.xpath('//@href').extract()
for link in links:
try:
req = Request(link,callback = self.parse)
except ValueError:
pass # might want to log these eventually
except AttributeError:
pass # log these eventually
# fill up item with statistics
current_item = O_Item()
current_item['url'] = response.url
try:
current_item['title'] = response.xpath('//title/text()').extract()
current_item['timestamp'] = datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
current_item['page_size'] = getsizeof(response.body)
current_item['full_html'] = response.body_as_unicode() # not sure if we really want this..
current_item['full_text'] = " ".join(prune(response.xpath('//text()').extract()))
current_item['secure'] = 'https' in str(response.request)
current_item['links'] = links
yield current_item
except Exception as e:
print "______________________________________________________________"
print " ERROR THROW ON ITEM YIELD"
print e
pass
# recursive page search is below, this must happen after the item is pipelined to postgresql
# this is where we yield a requests object with parse as the callback and the real recursion kicks ins
try:
for link in response.xpath('//@href').extract():
try:
req = Request(link,callback = self.parse)
yield req
except ValueError:
pass # might want to log these eventually
except AttributeError:
pass # log these eventually
|
kylebegovich/ProjectEuler
|
Python/Solved/Page1/Problem24.py
|
Python
|
gpl-3.0
| 663
| 0.006033
|
import math
curr = 0
goal = 1000000
potential_nums = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
output_num = []
if __name__ == '__main__':
for i in xrange(10, 0, -1):
print (curr, i, "outer loop")
for j in xra
|
nge(i + 1):
print (curr, j, "inner loop")
temp = math.factorial(i - 1) * j + curr
if temp >= goal:
print (temp)
curr += (math.factorial(i - 1) * (j-1))
print (curr, goal, i, j)
output_num.append(potential_nums[j-1])
potential_nums.remove(potential_nums[j-1])
bre
|
ak
print output_num
# SOLVED : 2783915460
|
open-craft-guild/blueberrypy
|
src/blueberrypy/config.py
|
Python
|
bsd-3-clause
| 18,173
| 0.002091
|
import collections
import difflib
import inspect
import logging
import os.path
import warnings
import os
import importlib
import cherrypy
import yaml
from yaml import load
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
json = None
for pkg in ['ujson', 'yajl', 'simplejson', 'cjson', 'json']:
try:
json = importlib.import_module(pkg)
except:
pass
else:
break
from blueberrypy.email import Mailer
from blueberrypy.exc import (BlueberryPyNotConfiguredError,
BlueberryPyConfigurationError)
logger = logging.getLogger(__name__)
class BlueberryPyConfiguration(object):
class _YAMLLoader(Loader):
"""YAML loader supporting additional tags."""
def __init__(self, *args, **kwargs):
super(BlueberryPyConfiguration._YAMLLoader, self).__init__(*args, **kwargs)
self._setup_loader()
def register_tag(self, tag, callback):
yaml.add_constructor(tag, callback, Loader=self.__class__)
def _tag_env_var(self, loader, node):
env_var_name = loader.construct_scalar(node)
return os.getenv(env_var_name)
def _tag_first_of(self, loader, node):
seq = loader.construct_sequence(node)
for v in seq:
if v is not None:
return v
raise yaml.YAMLError('At least one of values pass
|
ed to !FirstOf tag must be not None')
def _setup_loader(self):
self.register_tag('!EnvVar', self._tag_env_var)
self.register_tag('!FirstOf', self._tag_first_of)
def __init__(self, config_dir=None, app_config=None, logging_config=None,
webassets_env=None, environment=None,
env_var_name='BLUEBERRYPY_CONFIG'):
"""Loads BlueberryPy configuration from `config_dir` if supplied.
|
If `app_config` or `logging_config` or `webassets_env` are given, they
will be used instead of the configuration files found from `config_dir`.
If `environment` is given, it must be an existing CherryPy environment.
If `environment` is `production`, and `config_dir` is given, the `prod`
subdirectory will be searched for configuration files, otherwise the
`dev` subdirectory` will be searched.
If `env_var_name` is given, it must be an existing environment
variable, it will override values from YAML config.
Upon initialization of this configuration object, all the configuration
will be validated for sanity and either BlueberryPyConfigurationError or
BlueberryPyNotConfiguredError will be thrown if insane. For less severe
configuration insanity cases, a warning will be emitted instead.
:arg config_dir: a path, str
:arg app_config: a CherryPy config, dict
:arg logging_config: a logging config, dict
:arg webassets_env: a webassets environment, webassets.Environment
:arg environment: a CherryPy configuration environment, str
:arg env_var_name: an environment variable name for configuration, str
"""
ENV_CONFIG = self.__class__._load_env_var(env_var_name)
CWD = os.getcwdu() if getattr(os, "getcwdu", None) else os.getcwd()
if ENV_CONFIG.get('global', {}).get('CWD') and \
os.path.isdir(
os.path.join(ENV_CONFIG['global']['CWD'], 'src')):
CWD = ENV_CONFIG['global']['CWD']
if config_dir is None:
self.config_dir = config_dir = os.path.join(CWD, "config")
else:
self.config_dir = config_dir = os.path.abspath(config_dir)
if environment == "production":
self.config_dir = config_dir = os.path.join(config_dir, "prod")
elif environment == "test_suite" and os.path.exists(os.path.join(config_dir, "test")):
self.config_dir = config_dir = os.path.join(config_dir, "test")
else:
self.config_dir = config_dir = os.path.join(config_dir, "dev")
config_file_paths = {}
app_yml_path = os.path.join(config_dir, "app.yml")
logging_yml_path = os.path.join(config_dir, "logging.yml")
bundles_yml_path = os.path.join(config_dir, "bundles.yml")
# A local-only config, which overrides the app.yml values
app_override_yml_path = os.path.join(config_dir, "app.override.yml")
if os.path.exists(app_yml_path):
config_file_paths["app_yml"] = app_yml_path
if os.path.exists(logging_yml_path):
config_file_paths["logging_yml"] = logging_yml_path
if os.path.exists(bundles_yml_path):
config_file_paths["bundles_yml"] = bundles_yml_path
if os.path.exists(app_override_yml_path):
config_file_paths["app_override_yml"] = app_override_yml_path
self._config_file_paths = config_file_paths
if "app_yml" in config_file_paths and not app_config:
with open(config_file_paths["app_yml"]) as app_yml:
self._app_config = load(app_yml, self._YAMLLoader)
# If the overrides file exists, override the app config values
# with ones from app.override.yml
if "app_override_yml" in config_file_paths:
app_override_config = {}
with open(config_file_paths["app_override_yml"]) as app_override_yml:
app_override_config = load(app_override_yml, self._YAMLLoader)
self._app_config = self.__class__.merge_dicts(
self._app_config,
app_override_config
)
if "logging_yml" in config_file_paths and not logging_config:
with open(config_file_paths["logging_yml"]) as logging_yml:
self._logging_config = load(logging_yml, self._YAMLLoader)
if "bundles_yml" in config_file_paths and not webassets_env:
from webassets.loaders import YAMLLoader
self._webassets_env = YAMLLoader(config_file_paths["bundles_yml"]).load_environment()
if app_config:
self._app_config = dict(app_config)
try:
# Merge JSON from environment variable
self._app_config = self.__class__.merge_dicts(self._app_config, ENV_CONFIG)
except AttributeError:
if ENV_CONFIG: # not an empty dict
self._app_config = ENV_CONFIG
# Don't re-raise exception, self.validate() will do this later
if logging_config:
self._logging_config = dict(logging_config)
if webassets_env is not None:
self._webassets_env = webassets_env
self.validate() # Checks that all attributes are pre-populated
# Convert relative paths to absolute where needed
# self.validate() will fail if there's no app_config['controllers']
for _ in self._app_config['controllers']:
section = self._app_config['controllers'][_]
for r in section:
if isinstance(section[r], dict):
for __ in ['tools.staticdir.root',
'tools.staticfile.root']:
pth = section[r].get(__)
if pth is not None and not pth.startswith('/'):
self._app_config['controllers'][_][r][__] = \
os.path.join(CWD, pth)
# Convert relative paths of logs in handlers
# self.validate() will fail if there's no self._logging_config
for handler_name, handler_config in (getattr(self, '_logging_config', {}) or {}).get('handlers', {}).viewitems():
pth = handler_config.get('filename')
if pth is not None and not pth.startswith('/'):
self._logging_config['handlers'][handler_name]['filename'] = \
os.path.join(CWD, pth)
if environment == "backlash":
self.setup_backlash_environment()
@property
def config_file_paths(self):
if self._config_file_paths:
sorted_kv_pairs = tuple(((k, self._confi
|
GullyAPCBurns/bolinas
|
extractor_cansem/extractor_cansem.py
|
Python
|
mit
| 3,342
| 0.012567
|
import argparse
import sys
import os
from annotated_set import loadData
from data_structures import CanonicalDerivation
from canonical_parser import CanonicalParser
from derivation_tree import DerivationTree
from conversion.ghkm2tib import ghkm2tib
#from lib.amr.dag import Dag
class ExtractorCanSem:
def __init__(self):
pass
@classmethod
def help(self):
"""
Returns CanSem help message.
"""
return ExtractorCanSem.main(ExtractorCanSem(),"--help")
def main(self, *args):
parser = argparse.ArgumentParser(description='CanSem Extraction Algorithm for SHRG',
fromfile_prefix_chars='@',
prog='%s extract-cansem'%sys.argv[0])
parser.add_argument('nl_file', type=str, help="Natural Language File")
parser.add_argument('mr_file', type=str, help="Meaning Representation File")
parser.add_argument('alignment_file', type=str, help="Alignment File")
parser.add_argument('--ghkmDir', nargs='?', default='/home/kmh/Files/Tools/stanford-ghkm-2010-03-08', help="GHKM directory")
parser.add_argument('--tiburonLoc', nargs='?', default='/home/kmh/Files/Tools/newtib/tiburon', help="Tiburon executable file")
parser.add_argument('--prefix', nargs='?', default=False, help="Suffix for temporary and output files")
args = parser.parse_args(args=args)
if args.prefix == False:
args.prefix = "test"
args.parse_path = "%s.ptb"%args.prefix
args.align_path = "%s.a"%args.prefix
args.text_path = "%s.f"%args.prefix
args.ghkm_path = "%s.ghkm"%args.prefix
args.tib_path = "%s.tib"%args.prefix
# load input data into AnnotatedSet
data = loadData(args.nl_file,args.mr_file,args.alignment_file)
derivations = []
for sentence in data:
# Extraction
parser = CanonicalParser(sentence)
if len(parser.derivs_done) > 0:
derivations.append((sentence,parser.derivs_done[0]))
print len(derivations)
self.genGHKMfiles(args,derivations)
def genGHKMfiles(self,args,derivations):
parse_file = open(args.parse_path,'w')
align_file = open(args.align_path,'w')
text_file = open(args.text_path,'w')
for s,d in derivations:
x = DerivationTree.fromDerivation(d)
parse,align = x.getGHKMtriple_Java()
text = s["nl"].strip(' \t\n\r')
parse_file.write("%s\n"%parse)
align_file.write("%s\n"%align)
text_file.write("%s\n"%text)
parse_file.close()
align_file.close()
text_file.close()
print "Running GHKM Java rule extraction"
mem = "2g"
ghkm_opts = "-fCorpus %s -eParsedCorpus %s -align %s -joshuaFormat false -maxLHS 200 -maxRHS
|
15 -MaxUnalignedRHS 15" % (args.text_path,args.parse_path,
|
args.align_path)
java_opts="-Xmx%s -Xms%s -cp %s/ghkm.jar:%s/lib/fastutil.jar -XX:+UseCompressedOops"%(mem,mem,args.ghkmDir,args.ghkmDir)
os.system("java %s edu.stanford.nlp.mt.syntax.ghkm.RuleExtractor %s > %s" % (java_opts,ghkm_opts,args.ghkm_path))
print "Converting GHKM rules to Tiburon format"
ghkm2tib(args.ghkm_path,args.tib_path)
|
insomnia-lab/calibre
|
src/calibre/gui2/widgets2.py
|
Python
|
gpl-3.0
| 1,547
| 0.003232
|
#!/usr/bin/env python
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
from c
|
alibre.gui2.complete2 import LineEdit
from calibre.gui2.widgets import history
class HistoryL
|
ineEdit2(LineEdit):
max_history_items = None
def __init__(self, parent=None, completer_widget=None, sort_func=lambda x:None):
LineEdit.__init__(self, parent=parent, completer_widget=completer_widget, sort_func=sort_func)
@property
def store_name(self):
return 'lineedit_history_'+self._name
def initialize(self, name):
self._name = name
self.history = history.get(self.store_name, [])
self.set_separator(None)
self.update_items_cache(self.history)
self.setText('')
self.editingFinished.connect(self.save_history)
def save_history(self):
ct = unicode(self.text())
if len(ct) > 2:
try:
self.history.remove(ct)
except ValueError:
pass
self.history.insert(0, ct)
if self.max_history_items is not None:
del self.history[self.max_history_items:]
history.set(self.store_name, self.history)
self.update_items_cache(self.history)
def clear_history(self):
self.history = []
history.set(self.store_name, self.history)
self.update_items_cache(self.history)
|
h3llrais3r/SickRage
|
sickchill/oldbeard/providers/pretome.py
|
Python
|
gpl-3.0
| 5,958
| 0.003021
|
import re
import traceback
from urllib.parse import quote
from requests.utils import dict_from_cookiejar
from sickchill import logger
from sickchill.helper.common import convert_size, try_int
from sickchill.oldbeard import tvcache
from sickchill.oldbeard.bs4_parser import BS4Parser
from sickchill.providers.torrent.TorrentProvider import TorrentProvider
class Provider(TorrentProvider):
def __init__(self):
super().__init__("Pretome")
self.username = None
self.password = None
self.pin = None
self.minseed = 0
self.minleech = 0
self.urls = {
"base_url": "https://pretome.info",
"login": "https://pretome.info/takelogin.php",
"detail": "https://pretome.info/details.php?id=%s",
"search": "https://pretome.info/browse.php?search=%s%s",
"download": "https://pretome.info/download.php/%s/%s.torrent",
}
self.url = self.urls["base_url"]
self.categories = "&st=1&cat%5B%5D=7"
self.proper_strings = ["PROPER", "REPACK"]
self.cache = tvcache.TVCache(self)
def _check_auth(self):
if not self.username or not self.password or not self.pin:
logger.warn
|
ing("Invalid username or password or pin. Check your settings")
return True
def login(self):
if any(dict_from_cookiejar(self.session.cookies).values()):
return True
lo
|
gin_params = {"username": self.username, "password": self.password, "login_pin": self.pin}
response = self.get_url(self.urls["login"], post_data=login_params, returns="text")
if not response:
logger.warning("Unable to connect to provider")
return False
if re.search("Username or password incorrect", response):
logger.warning("Invalid username or password. Check your settings")
return False
return True
def search(self, search_params, age=0, ep_obj=None):
results = []
if not self.login():
return results
for mode in search_params:
items = []
logger.debug(_("Search Mode: {mode}".format(mode=mode)))
for search_string in search_params[mode]:
if mode != "RSS":
logger.debug(_("Search String: {search_string}".format(search_string=search_string)))
search_url = self.urls["search"] % (quote(search_string), self.categories)
data = self.get_url(search_url, returns="text")
if not data:
continue
try:
with BS4Parser(data, "html5lib") as html:
# Continue only if one Release is found
empty = html.find("h2", text="No .torrents fit this filter criteria")
if empty:
logger.debug("Data returned from provider does not contain any torrents")
continue
torrent_table = html.find("table", style="border: none; width: 100%;")
if not torrent_table:
logger.exception("Could not find table of torrents")
continue
torrent_rows = torrent_table("tr", class_="browse")
for result in torrent_rows:
cells = result("td")
size = None
link = cells[1].find("a", style="font-size: 1.25em; font-weight: bold;")
torrent_id = link["href"].replace("details.php?id=", "")
try:
if link.get("title", ""):
title = link["title"]
else:
title = link.contents[0]
download_url = self.urls["download"] % (torrent_id, link.contents[0])
seeders = int(cells[9].contents[0])
leechers = int(cells[10].contents[0])
# Need size for failed downloads handling
if size is None:
torrent_size = cells[7].text
size = convert_size(torrent_size) or -1
except (AttributeError, TypeError):
continue
if not all([title, download_url]):
continue
# Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode != "RSS":
logger.debug(
"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(
title, seeders, leechers
)
)
continue
item = {"title": title, "link": download_url, "size": size, "seeders": seeders, "leechers": leechers, "hash": ""}
if mode != "RSS":
logger.debug("Found result: {0} with {1} seeders and {2} leechers".format(title, seeders, leechers))
items.append(item)
except Exception:
logger.exception("Failed parsing provider. Traceback: {0}".format(traceback.format_exc()))
# For each search mode sort all the items by seeders if available
items.sort(key=lambda d: try_int(d.get("seeders", 0)), reverse=True)
results += items
return results
|
hj3938/panda3d
|
direct/src/showbase/Transitions.py
|
Python
|
bsd-3-clause
| 17,107
| 0.009002
|
"""Undocumented Module"""
__all__ = ['Transitions']
from panda3d.core import *
from direct.gui.DirectGui import *
from direct.interval.LerpInterval import LerpColorScaleInterval, LerpColorInterval, LerpScaleInterval, LerpPosInterval
from direct.interval.MetaInterval import Sequence, Parallel
from direct.interval.FunctionInterval import Func
class Transitions:
# These may be reassigned before the fade or iris transitions are
# actually invoked to change the models that will be used.
IrisModelName = "models/misc/iris"
FadeModelName = "models/misc/fade"
def __init__(self, loader,
model=None,
scale=3.0,
pos=Vec3(0, 0, 0)):
self.transitionIval = None
self.letterboxIval = None
self.iris = None
self.fade = None
self.letterbox = None
self.fadeModel
|
= model
self.imagePos = pos
if model:
self.alphaOff = Vec4(1, 1, 1, 0)
self.alphaOn =
|
Vec4(1, 1, 1, 1)
model.setTransparency(1)
self.lerpFunc = LerpColorScaleInterval
else:
self.alphaOff = Vec4(0, 0, 0, 0)
self.alphaOn = Vec4(0, 0, 0, 1)
self.lerpFunc = LerpColorInterval
self.irisTaskName = "irisTask"
self.fadeTaskName = "fadeTask"
self.letterboxTaskName = "letterboxTask"
def __del__(self):
if self.fadeModel:
self.fadeModel.removeNode()
self.fadeModel = None
##################################################
# Fade
##################################################
# We can set a custom model for the fade before using it for the first time
def setFadeModel(self, model, scale=1.0):
self.fadeModel = model
# We have to change some default parameters for a custom fadeModel
self.alphaOn = Vec4(1, 1, 1, 1)
# Reload fade if its already been created
if self.fade:
self.fade.destroy()
self.fade = None
self.loadFade()
def loadFade(self):
if self.fade is None:
# We create a DirectFrame for the fade polygon, instead of
# simply loading the polygon model and using it directly,
# so that it will also obscure mouse events for objects
# positioned behind it.
self.fade = DirectFrame(
parent = hidden,
guiId = 'fade',
relief = None,
image = self.fadeModel,
image_scale = (4, 2, 2),
state = DGG.NORMAL,
)
if not self.fadeModel:
# No fade model was given, so we make this the fade model.
self.fade["relief"] = DGG.FLAT
self.fade["frameSize"] = (-2, 2, -1, 1)
self.fade["frameColor"] = (0, 0, 0, 1)
self.fade.setTransparency(TransparencyAttrib.MAlpha)
self.fade.setBin('unsorted', 0)
self.fade.setColor(0,0,0,0)
def getFadeInIval(self, t=0.5, finishIval=None):
"""
Returns an interval without starting it. This is particularly useful in
cutscenes, so when the cutsceneIval is escaped out of we can finish the fade immediately
"""
#self.noTransitions() masad: this creates a one frame pop, is it necessary?
self.loadFade()
transitionIval = Sequence(Func(self.fade.reparentTo, aspect2d, FADE_SORT_INDEX),
Func(self.fade.showThrough), # in case aspect2d is hidden for some reason
self.lerpFunc(self.fade, t,
self.alphaOff,
# self.alphaOn,
),
Func(self.fade.detachNode),
name = self.fadeTaskName,
)
if finishIval:
transitionIval.append(finishIval)
return transitionIval
def getFadeOutIval(self, t=0.5, finishIval=None):
"""
Create a sequence that lerps the color out, then
parents the fade to hidden
"""
self.noTransitions()
self.loadFade()
transitionIval = Sequence(Func(self.fade.reparentTo,aspect2d,FADE_SORT_INDEX),
Func(self.fade.showThrough), # in case aspect2d is hidden for some reason
self.lerpFunc(self.fade, t,
self.alphaOn,
# self.alphaOff,
),
name = self.fadeTaskName,
)
if finishIval:
transitionIval.append(finishIval)
return transitionIval
def fadeIn(self, t=0.5, finishIval=None):
"""
Play a fade in transition over t seconds.
Places a polygon on the aspect2d plane then lerps the color
from black to transparent. When the color lerp is finished, it
parents the fade polygon to hidden.
"""
gsg = base.win.getGsg()
if gsg:
# If we're about to fade in from black, go ahead and
# preload all the textures etc.
base.graphicsEngine.renderFrame()
render.prepareScene(gsg)
render2d.prepareScene(gsg)
if (t == 0):
# Fade in immediately with no lerp
#print "transitiosn: fadeIn 0.0"
self.noTransitions()
self.loadFade()
self.fade.detachNode()
else:
# Create a sequence that lerps the color out, then
# parents the fade to hidden
self.transitionIval = self.getFadeInIval(t, finishIval)
self.transitionIval.start()
def fadeOut(self, t=0.5, finishIval=None):
"""
Play a fade out transition over t seconds.
Places a polygon on the aspect2d plane then lerps the color
from transparent to full black. When the color lerp is finished,
it leaves the fade polygon covering the aspect2d plane until you
fadeIn or call noFade.
lerp
"""
if (t == 0):
# Fade out immediately with no lerp
self.noTransitions()
self.loadFade()
self.fade.reparentTo(aspect2d, FADE_SORT_INDEX)
self.fade.setColor(self.alphaOn)
elif base.config.GetBool('no-loading-screen',0):
if finishIval:
self.transitionIval = finishIval
self.transitionIval.start()
else:
# Create a sequence that lerps the color out, then
# parents the fade to hidden
self.transitionIval = self.getFadeOutIval(t,finishIval)
self.transitionIval.start()
def fadeOutActive(self):
return self.fade and self.fade.getColor()[3] > 0
def fadeScreen(self, alpha=0.5):
"""
Put a semitransparent screen over the camera plane
to darken out the world. Useful for drawing attention to
a dialog box for instance
"""
#print "transitiosn: fadeScreen"
self.noTransitions()
self.loadFade()
self.fade.reparentTo(aspect2d, FADE_SORT_INDEX)
self.fade.setColor(self.alphaOn[0],
self.alphaOn[1],
self.alphaOn[2],
alpha)
def fadeScreenColor(self, color):
"""
Put a semitransparent screen over the camera plane
to darken out the world. Useful for drawing attention to
a dialog box for instance
"""
#print "transitiosn: fadeScreenColor"
self.noTransitions()
self.loadFade()
self.fade.reparentTo(aspect2d, FADE_SORT_INDEX)
self.fade.setColor(color)
def noFade(self):
"""
Removes any current fade tasks and parents the fade polygon away
|
sekikn/ambari
|
ambari-common/src/main/python/ambari_commons/repo_manager/generic_manager.py
|
Python
|
apache-2.0
| 7,285
| 0.009746
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
from resource_management.core.logger import Logger
class GenericManagerProperties(object):
"""
Class to keep all Package-manager depended properties. Each non-generic implementation should override properties
declared here
"""
empty_file = "/dev/null"
locked_output = None
repo_error = None
repo_manager_bin = None
pkg_manager_bin = None
repo_update_cmd = None
available_packages_cmd = None
installed_packages_cmd = None
all_packages_cmd = None
repo_definition_location = None
install_cmd = {
True: None,
False: None
}
remove_cmd = {
True: None,
False: None
}
verify_dependency_cmd = None
class GenericManager(object):
"""
Interface for all custom implementations. Provides the required base for any custom manager, to be smoothly integrated
"""
@property
def properties(self):
return GenericManagerProperties
def install_package(self, name, context):
"""
Install package
:type name str
:type context ambari_commons.shell.RepoCallContext
:raise ValueError if name is empty
"""
raise NotImplementedError()
def remove_package(self, name, context, ignore_dependencies=False):
"""
Remove package
:type name str
:type context ambari_commons.shell.RepoCallContext
:type ignore_dependencies bool
:raise ValueError if name is empty
"""
raise NotImplementedError()
def upgrade_package(self, name, context):
"""
Install package
:type name str
:type context ambari_commons.shell.RepoCallContext
:raise ValueError if name is empty
"""
raise NotImplementedError()
def check_uncompleted_transactions(self):
"""
Check package manager against uncompleted transactions.
:rtype bool
"""
return False
def print_uncompleted_transaction_hint(self):
"""
Print friendly message about they way to fix the issue
"""
pass
def get_available_packages_in_repos(self, repositories):
"""
Gets all (both installed and available) packages that are available at given repositories.
:type repositories resource_management.libraries.functions.repository_util.CommandRepository
:return: installed and available packages from these repositories
"""
raise NotImplementedError()
def installed_packages(self, pkg_names=None, repo_filter=None):
raise NotImplementedError()
def available_packages(self, pkg_names=None, repo_filter=None):
raise NotImplementedError()
def all_packages(self, pkg_names=None, repo_filter=None):
raise NotImplementedError()
def get_installed_repos(self, hint_packages, all_packages, ignore_repos):
"""
Gets all installed repos by name based on repos that provide any package
contained in hintPackages
Repos starting with value in ignoreRepos will not be returned
hintPackages must be regexps.
"""
all_repos = []
repo_list = []
for hintPackage in hint_packages:
for item in all_packages:
if re.match(hintPackage, item[0]) and not item[2] in all_repos:
all_repos.append(item[2])
for repo in all_repos:
ignore = False
fo
|
r ignoredRepo in ignore_repos:
if self.name_match(ignoredRepo, repo):
ignore = True
if not ignore:
repo_list.append(repo)
return repo_list
def get_installed_pkgs_by_repo(self, repos, ignore_packages, installed_packages):
"""
Get all the installed packages from the repos listed in repos
"""
packages_from_repo = []
packages_to_
|
remove = []
for repo in repos:
sub_result = []
for item in installed_packages:
if repo == item[2]:
sub_result.append(item[0])
packages_from_repo = list(set(packages_from_repo + sub_result))
for package in packages_from_repo:
keep_package = True
for ignorePackage in ignore_packages:
if self.name_match(ignorePackage, package):
keep_package = False
break
if keep_package:
packages_to_remove.append(package)
return packages_to_remove
def get_installed_pkgs_by_names(self, pkg_names, all_packages_list=None):
"""
Gets all installed packages that start with names in pkgNames
:type pkg_names list[str]
:type all_packages_list list[str]
"""
return self.installed_packages(pkg_names)
def get_package_details(self, installed_packages, found_packages):
"""
Gets the name, version, and repoName for the packages
:type installed_packages list[tuple[str,str,str]]
:type found_packages list[str]
"""
package_details = []
for package in found_packages:
pkg_detail = {}
for installed_package in installed_packages:
if package == installed_package[0]:
pkg_detail['name'] = installed_package[0]
pkg_detail['version'] = installed_package[1]
pkg_detail['repoName'] = installed_package[2]
package_details.append(pkg_detail)
return package_details
def get_repos_to_remove(self, repos, ignore_list):
repos_to_remove = []
for repo in repos:
add_to_remove_list = True
for ignore_repo in ignore_list:
if self.name_match(ignore_repo, repo):
add_to_remove_list = False
continue
if add_to_remove_list:
repos_to_remove.append(repo)
return repos_to_remove
def get_installed_package_version(self, package_name):
raise NotImplementedError()
def verify_dependencies(self):
"""
Verify that we have no dependency issues in package manager. Dependency issues could appear because of aborted or terminated
package installation process or invalid packages state after manual modification of packages list on the host
:return True if no dependency issues found, False if dependency issue present
:rtype bool
"""
raise NotImplementedError()
def name_match(self, lookup_name, actual_name):
tokens = actual_name.strip().lower()
lookup_name = lookup_name.lower()
return " " not in lookup_name and lookup_name in tokens
def _executor_error_handler(self, command, error_log, exit_code):
"""
Error handler for ac_shell.process_executor
:type command list|str
:type error_log list
:type exit_code int
"""
if isinstance(command, (list, tuple)):
command = " ".join(command)
Logger.error("Command execution error: command = \"{0}\", exit code = {1}, stderr = {2}".format(
command, exit_code, "\n".join(error_log)))
|
shinznatkid/rupture
|
rupture/rupture.py
|
Python
|
mit
| 6,405
| 0.002186
|
# -*- coding: utf-8 -*-
'''
Rupture
version 1.4.0
build 5
'''
from bs4 import BeautifulSoup
import datetime
import requests
import socket
import pickle
import time
import ssl
from .utils import six
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.poolmanager import PoolManager
class Rupture(object):
parser = None # None or html.parser or lxml
encoding = None
def __init__(self, proxies=None, parser='html.parser', timeout=None, headers=None):
self.session = requests.Session()
if headers:
self.session.headers.update(headers)
self.proxies = proxies
self.parser = parser
self.timeout = timeout
def _wrap_response(self, obj, parser):
def get_soup(self):
if not hasattr(self, '_soup'):
start_time = datetime.datetime.now()
from_encoding = None if self.encoding == 'utf-8' else self.encoding
if isinstance(self.text, six.text_type):
from_encoding = None # Prevent UserWarning
self._soup = BeautifulSoup(self.text, self.parser, from_encoding=from_encoding)
self._soup.elapsed = datetime.datetime.now() - start_time
if self.parser == 'lxml':
import lxml
lxml.etree.clear_error_log()
return self._soup
def get__repr__(self):
if hasattr(self, 'text'):
return '<Response [%s]: %s>' % (self.status_code, self.text)
return '<Response [%s]>' % (self.status_code)
obj.__class__.parser = parser
obj.__class__.soup = property(get_soup)
obj.__class__.__repr__ = get__repr__
return obj
def http_request(self, method, url, params=None, data=None, timeout=None, proxies=None, encoding=None, parser=None, retries=None, retries_interval=None, **kwargs):
timeout = self.timeout if timeout is None else timeout
proxies = self.proxies if proxies is None else proxies
encoding = self.encoding if encoding is None else encoding
parser = self.parser if parser is None else parser
if not retries:
retries = 0
while True:
try:
proxies = {'http': proxies, 'https': proxies} if proxies else None
start_time = datetime.datetime.now()
r = self.session.request(method, url, params=params, data=data, timeout=timeout, proxies=proxies, **kwargs)
r.elapsed_all = datetime.datetime.now() - start_time
if encoding:
r.encoding = encoding
return self._wrap_response(r, parser)
except (ssl.SSLError) as e:
if retries > 0:
retries = retries - 1
if retries_interval:
time.sleep(retries_interval)
continue
raise requests.exceptions.RequestException('SSLError %s' % e)
except (socket.error) as e:
if retries > 0:
retries = retries - 1
if retries_interval:
time.sleep(retries_interval)
continue
raise requests.exceptions.RequestException('Socket Error %s' % e)
def http_get(self, url, params=None, **kwargs):
return self.http_request('GET', url, params=params, **kwargs)
def xml_get(self, url, params=None, headers=None, **kwargs):
xml_headers = {
'X-Requested-With': 'XMLHttpRequest',
'Content-Type': 'application/json; charset=utf-8'
}
if headers:
headers = dict(xml_headers.items() + headers.items())
else:
headers = xml_headers
return self.http_get(url, params=params, headers=headers, **kwargs)
def http_post(self, url, data=None, **kwargs):
return self.http_request('POST', url, data=data, **kwargs)
def xml_post(self, url, data=None, headers=None, **kwargs):
xml_headers = {
'X-Requested-With': 'XMLHttpRequest',
'Content-Type': 'application/json; charset=utf-8'
}
if headers:
headers = dict(xml_headers.items() + headers.items())
else:
headers = xml_headers
return self.http_post(url, data=data, headers=headers, **kwargs)
def http_download(self, url, filepath, method='get', **kwargs):
if method.lower() == 'get':
response = self.http_get(url, stream=True, **kwargs)
elif method.lower() == 'post':
response = self.http_post(url, stream=True, **kwargs)
else:
raise NotImplementedError()
if not response.ok:
raise requests.exceptions.RequestException('Response not okay')
with open(filepath, 'wb') as handle:
for block in response.iter_content(1024):
if not block:
break
handle.write(block)
return filepath
def http_get_image(self, url, filepath, **kwargs):
return self.http_download(url, filepath, **kwargs)
def parse_float_or_none(self, s):
if s:
return float(str(s).strip().replace(',', '').replace('+', ''))
return s
def new_session(self):
self.session = requests.Session()
def serialize(self):
return pickle.dumps([self.session])
@classmethod
def _deserialize_key(cls, data, keys):
raw_results = pickle.loads(data)
entity = cls()
for i in range(len(keys)):
setattr(entity, keys[i], raw_results[i])
return entity
@classmethod
def deserialize(cls, data):
return cls._deserialize_key(data, ['session'])
def patch_ssl(self):
class SSLAdapter(HTTPAdapter):
def init_poolmanager(self, connections, maxsize, block=False):
self.poolmanager = PoolManager(num_
|
pools=connections,
maxsize=maxsize,
block=block,
|
ssl_version=ssl.PROTOCOL_TLSv1)
if not getattr(self.session, 'is_patch', False):
self.session.is_patch = True
self.session.mount('https://', SSLAdapter())
|
django-de/django-de-v2
|
django_de/apps/authors/urls.py
|
Python
|
bsd-3-clause
| 348
| 0.017241
|
from django.conf.urls.defaults import *
from django_de.apps.authors.models import Author
urlpatterns = patterns('django.views.generic.list_detail',
(r'
|
^$', 'object_list',
dict(
queryset = Author.objects.order_by('name', 'slug'),
template_object_name = 'author',
allow_empty=Tr
|
ue,
),
)
)
|
seleniumbase/SeleniumBase
|
examples/github_test.py
|
Python
|
mit
| 1,290
| 0
|
from seleniumbase import BaseCase
class GitHubTests(BaseCase):
def test_github(self):
# Selenium can trigger GitHub's anti-automation system:
# "You have triggered an abuse detection mechanism."
# "Please wait a few minutes before you try again."
# To avoid this automation blocker, two steps are being taken:
# 1. self.slow_click() is being used to slow down Selenium actions.
# 2. The browser's User Agent is modified to avoid Selenium-detection
# when running in headless mode.
if self.headless:
self.
|
get_new_driver(
agent="""Mozilla/5.0 """
"""AppleWeb
|
Kit/537.36 (KHTML, like Gecko) """
"""Chrome/Version 96.0.4664.55 Safari/537.36"""
)
self.open("https://github.com/search?q=SeleniumBase")
self.slow_click('a[href="/seleniumbase/SeleniumBase"]')
self.click_if_visible('[data-action="click:signup-prompt#dismiss"]')
self.assert_element("div.repository-content")
self.assert_text("SeleniumBase", "h1")
self.slow_click('a[title="seleniumbase"]')
self.slow_click('a[title="fixtures"]')
self.slow_click('a[title="base_case.py"]')
self.assert_text("Code", "nav a.selected")
|
spacewiki/spacewiki
|
spacewiki/test/ui_test.py
|
Python
|
agpl-3.0
| 718
| 0.001393
|
from spacewiki.app import create_app
from spacewiki import model
from spacewiki.test import create_test_app
import unittest
class UiTestCase(unittest.TestCase):
def setUp(self):
self._app = create_test_app()
with self._ap
|
p.app_context():
model.syncdb()
self.app = self._app.test_client()
def test_index(self):
self.assertEqual(self.app.
|
get('/').status_code, 200)
def test_no_page(self):
self.assertEqual(self.app.get('/missing-page').status_code, 200)
def test_all_pages(self):
self.assertEqual(self.app.get('/.all-pages').status_code, 200)
def test_edit(self):
self.assertEqual(self.app.get('/index/edit').status_code, 200)
|
maru-sama/pyblosxom
|
Pyblosxom/tests/test_tags.py
|
Python
|
mit
| 2,822
| 0.003898
|
#######################################################################
#
|
This file
|
is part of Pyblosxom.
#
# Copyright (C) 2010-2011 by the Pyblosxom team. See AUTHORS.
#
# Pyblosxom is distributed under the MIT license. See the file
# LICENSE for distribution details.
#######################################################################
import tempfile
import shutil
import os
from Pyblosxom.tests import PluginTest
from Pyblosxom.plugins import tags
from Pyblosxom.pyblosxom import Request
class TagsTest(PluginTest):
def setUp(self):
PluginTest.setUp(self, tags)
self.tmpdir = tempfile.mkdtemp()
def get_datadir(self):
return os.path.join(self.tmpdir, "datadir")
def tearDown(self):
PluginTest.tearDown(self)
try:
shutil.rmtree(self.tmpdir)
except OSError:
pass
def test_get_tagsfile(self):
req = Request({"datadir": self.get_datadir()}, {}, {})
cfg = {"datadir": self.get_datadir()}
self.assertEquals(tags.get_tagsfile(cfg),
os.path.join(self.get_datadir(), os.pardir,
"tags.index"))
tags_filename = os.path.join(self.get_datadir(), "tags.db")
cfg = {"datadir": self.get_datadir(), "tags_filename": tags_filename}
self.assertEquals(tags.get_tagsfile(cfg), tags_filename)
def test_tag_cloud_no_tags(self):
# test no tags
self.request.get_data()["tagsdata"] = {}
tags.cb_head(self.args)
self.assertEquals(
str(self.args["entry"]["tagcloud"]),
"\n".join(
["<p>",
"</p>"]))
def test_tag_cloud_one_tag(self):
# test no tags
self.request.get_data()["tagsdata"] = {
"tag2": ["a"],
}
tags.cb_head(self.args)
self.assertEquals(
str(self.args["entry"]["tagcloud"]),
"\n".join(
["<p>",
'<a class="biggestTag" href="http://bl.og//tag/tag2">tag2</a>',
"</p>"]))
def test_tag_cloud_many_tags(self):
# test no tags
self.request.get_data()["tagsdata"] = {
"tag1": ["a", "b", "c", "d", "e", "f"],
"tag2": ["a", "b", "c", "d"],
"tag3": ["a"]
}
tags.cb_head(self.args)
self.assertEquals(
str(self.args["entry"]["tagcloud"]),
"\n".join(
["<p>",
'<a class="biggestTag" href="http://bl.og//tag/tag1">tag1</a>',
'<a class="biggestTag" href="http://bl.og//tag/tag2">tag2</a>',
'<a class="smallestTag" href="http://bl.og//tag/tag3">tag3</a>',
"</p>"]))
|
AlexeyKruglov/Skeinforge-fabmetheus
|
skeinforge_application/skeinforge_plugins/profile_plugins/winding.py
|
Python
|
agpl-3.0
| 2,079
| 0.011544
|
"""
This page is in the table of contents.
Winding is a script to set the winding profile for the skeinforge chain.
The displayed craft sequence is the sequence in which the tools craft the model and export the output.
On the winding dialog, clicking the 'Add Profile' button will duplicate the selected profile and give it the name in the input field. For example, if laser is selected and the name laser_10mm is in the input field, clicking the 'Add Profile' button will duplicate laser and save it as laser_10mm. The 'Delete Profile' button deletes the selected profile.
The profile selection is the setting. If you hit 'Save and Close' the selection will be saved, if you hit 'Cancel' the selection will not be saved. However; adding and deleting a profile is a permanent action, for example 'Cancel' will not bring back any deleted profiles.
To change the winding profile, in a shell in the profile_plugins folder type:
> python winding.py
"""
from __future__ import absolute_import
import __init__
from fabmetheus_utilities import settings
from skeinforge_application.skeinforge_utilities import skeinforge_profile
import sys
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__date__ = '$Date: 2008/21/04 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/lice
|
nses/agpl.html'
def getCraftSequence():
"Get the winding craft sequence."
return 'cleave preface coil flow feed home lash fillet limit unpause alteration export'.split()
def getNewRepository():
'Get new repository.'
return WindingRepository()
class WindingRepository:
"A c
|
lass to handle the winding settings."
def __init__(self):
"Set the default settings, execute title & settings fileName."
skeinforge_profile.addListsSetCraftProfile( getCraftSequence(), 'free_wire', self, 'skeinforge_application.skeinforge_plugins.profile_plugins.winding.html')
def main():
"Display the export dialog."
if len(sys.argv) > 1:
writeOutput(' '.join(sys.argv[1 :]))
else:
settings.startMainLoopFromConstructor(getNewRepository())
if __name__ == "__main__":
main()
|
Telestream/telestream-cloud-python-sdk
|
telestream_cloud_qc_sdk/telestream_cloud_qc/models/header_byte_count_test.py
|
Python
|
mit
| 4,912
| 0
|
# coding: utf-8
"""
Qc API
Qc API # noqa: E501
The version of the OpenAPI document: 3.0.0
Contact: cloudsupport@telestream.net
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from telestream_cloud_qc.configuration import Configuration
class HeaderByteCountTest(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'header_bytes': 'int',
'reject_on_error': 'bool',
'checked': 'bool'
}
attribute_map = {
'header_bytes': 'header_bytes',
'reject_on
|
_error': 'reject_on_error',
'checked': 'checked'
}
def __init__(self, header_bytes=None, reject_on_error=None, checked=None, local_vars_configuration=None): # noqa: E501
|
"""HeaderByteCountTest - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._header_bytes = None
self._reject_on_error = None
self._checked = None
self.discriminator = None
if header_bytes is not None:
self.header_bytes = header_bytes
if reject_on_error is not None:
self.reject_on_error = reject_on_error
if checked is not None:
self.checked = checked
@property
def header_bytes(self):
"""Gets the header_bytes of this HeaderByteCountTest. # noqa: E501
:return: The header_bytes of this HeaderByteCountTest. # noqa: E501
:rtype: int
"""
return self._header_bytes
@header_bytes.setter
def header_bytes(self, header_bytes):
"""Sets the header_bytes of this HeaderByteCountTest.
:param header_bytes: The header_bytes of this HeaderByteCountTest. # noqa: E501
:type: int
"""
self._header_bytes = header_bytes
@property
def reject_on_error(self):
"""Gets the reject_on_error of this HeaderByteCountTest. # noqa: E501
:return: The reject_on_error of this HeaderByteCountTest. # noqa: E501
:rtype: bool
"""
return self._reject_on_error
@reject_on_error.setter
def reject_on_error(self, reject_on_error):
"""Sets the reject_on_error of this HeaderByteCountTest.
:param reject_on_error: The reject_on_error of this HeaderByteCountTest. # noqa: E501
:type: bool
"""
self._reject_on_error = reject_on_error
@property
def checked(self):
"""Gets the checked of this HeaderByteCountTest. # noqa: E501
:return: The checked of this HeaderByteCountTest. # noqa: E501
:rtype: bool
"""
return self._checked
@checked.setter
def checked(self, checked):
"""Sets the checked of this HeaderByteCountTest.
:param checked: The checked of this HeaderByteCountTest. # noqa: E501
:type: bool
"""
self._checked = checked
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, HeaderByteCountTest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, HeaderByteCountTest):
return True
return self.to_dict() != other.to_dict()
|
blaze33/django
|
tests/regressiontests/views/tests/debug.py
|
Python
|
bsd-3-clause
| 22,564
| 0.001197
|
# -*- coding: utf-8 -*-
# This coding header is significant for tests, as the debug view is parsing
# files to search for such a header to decode the source file content
from __future__ import absolute_import, unicode_l
|
iterals
import inspect
import os
import sys
from django.conf import settings
from django.core import mail
from django.core.files.uploadedfile
|
import SimpleUploadedFile
from django.core.urlresolvers import reverse
from django.test import TestCase, RequestFactory
from django.test.utils import (override_settings, setup_test_template_loader,
restore_template_loaders)
from django.utils.encoding import force_text
from django.views.debug import ExceptionReporter
from .. import BrokenException, except_args
from ..views import (sensitive_view, non_sensitive_view, paranoid_view,
custom_exception_reporter_filter_view, sensitive_method_view)
@override_settings(DEBUG=True, TEMPLATE_DEBUG=True)
class DebugViewTests(TestCase):
urls = "regressiontests.views.urls"
def test_files(self):
response = self.client.get('/raises/')
self.assertEqual(response.status_code, 500)
data = {
'file_data.txt': SimpleUploadedFile('file_data.txt', b'haha'),
}
response = self.client.post('/raises/', data)
self.assertContains(response, 'file_data.txt', status_code=500)
self.assertNotContains(response, 'haha', status_code=500)
def test_403(self):
# Ensure no 403.html template exists to test the default case.
setup_test_template_loader({})
try:
response = self.client.get('/views/raises403/')
self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403)
finally:
restore_template_loaders()
def test_403_template(self):
# Set up a test 403.html template.
setup_test_template_loader(
{'403.html': 'This is a test template for a 403 Forbidden error.'}
)
try:
response = self.client.get('/views/raises403/')
self.assertContains(response, 'test template', status_code=403)
finally:
restore_template_loaders()
def test_404(self):
response = self.client.get('/views/raises404/')
self.assertEqual(response.status_code, 404)
def test_view_exceptions(self):
for n in range(len(except_args)):
self.assertRaises(BrokenException, self.client.get,
reverse('view_exception', args=(n,)))
def test_template_exceptions(self):
for n in range(len(except_args)):
try:
self.client.get(reverse('template_exception', args=(n,)))
except Exception:
raising_loc = inspect.trace()[-1][-2][0].strip()
self.assertFalse(raising_loc.find('raise BrokenException') == -1,
"Failed to find 'raise BrokenException' in last frame of traceback, instead found: %s" %
raising_loc)
def test_template_loader_postmortem(self):
response = self.client.get(reverse('raises_template_does_not_exist'))
template_path = os.path.join('templates', 'i_dont_exist.html')
self.assertContains(response, template_path, status_code=500)
class ExceptionReporterTests(TestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get('/test_view/')
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<h1>ValueError at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<h1>ValueError</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertIn('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">No exception supplied</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertIn('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertIn('<h1>Report</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
class PlainTextReportTests(TestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get('/test_view/')
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
self.assertIn('ValueError at /test_view/', text)
self.assertIn("Can't find my keys", text)
self.assertIn('Request Method:', text)
self.assertIn('Request URL:', t
|
Eksmo/calibre
|
src/calibre/gui2/dialogs/quickview_ui.py
|
Python
|
gpl-3.0
| 3,673
| 0.003539
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/gugu/w/calibre/src/calibre/gui2/dialogs/quickview.ui'
#
# Created: Thu Jul 19 23:32:31 2012
# by: PyQt4 UI code generator 4.9.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_Quickview(object):
def setupUi(self, Quickview):
Quickview.setObjectName(_fromUtf8("Quickview"))
Quickview.resize(768, 342)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(Quickview.sizePolicy().hasHeightForWidth())
Quickview.setSizePolicy(sizePolicy)
self.gridlayout = QtGui.QGridLayout(Quickview)
self.gridlayout.setObjectName(_fromUtf8("gridlayout"))
self.items_label = QtGui.QLabel(Quickview)
self.items_label.setObjectName(_fromUtf8("items_label"))
self.gridlayout.addWidget(self.items_label, 0, 0, 1, 1)
self.items = QtGui.QListWidget(Quickview)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.items.sizePolicy().hasHeightForWidth())
self.items.setSizePolicy(sizePolicy)
self.items.setObjectName(_fromUtf8("items"))
self.gridlayout.addWidget(self.items, 1, 0, 1, 1)
self.books_label = QtGui.QLabel(Quickview)
self.books_label.setObjectName(_fromUtf8("books_label"))
self.gridlayout.addWidget(self.books_label, 0, 1, 1, 1)
self.books_table = QtGui.QTableWidget(Quickview)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(4)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.books_table.sizePolicy().hasHeightForWidth())
self.books_table.setSizePolicy(sizePolicy)
self.books_table.setColumnCount(0)
self.books_table.setRowCount(0)
self.books_table.setObjectName(_fromUtf8("books_table"))
self.gridlayout.addWidget(self.books_table, 1, 1, 1, 1)
self.hboxlayout = QtGui.QHBoxLayout()
self.hboxlayout.setObjectName(_fromUtf8("hboxlayout"))
self.search_button = QtGui.QPushButton(Quickview)
self.search_button.setObjectName(_fromUtf8("search_button"))
self.hboxlayout.addWidget(self.search_button)
spacerItem = QtGui.QSpacerItem(0, 0, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.hboxlayout.addItem(spacerItem)
self.buttonBox = QtGui.QDialogButtonBox(Quickview)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Close)
self.buttonBox.setCenterButtons(False)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.hboxlayout.addWidget(self.buttonBox)
self.gridlayout.addLayout(self.hboxla
|
yout, 3, 0, 1, 2)
self.retranslateUi(Quickview)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), Quickview.reject)
QtCore.QMetaObject.connectSlotsByName(Quickview)
def retranslateUi
|
(self, Quickview):
Quickview.setWindowTitle(_("Quickview"))
self.items_label.setText(_("Items"))
self.search_button.setText(_("Search"))
self.search_button.setToolTip(_("Search in the library view for the selected item"))
|
gdgellatly/OCB1
|
addons/web/controllers/main.py
|
Python
|
agpl-3.0
| 69,619
| 0.003433
|
# -*- coding: utf-8 -*-
import ast
import base64
import csv
import glob
import itertools
import logging
import operator
import datetime
import hashlib
import os
import re
import simplejson
import time
import urllib
import urllib2
import urlparse
import xmlrpclib
import zlib
from xml.etree import ElementTree
from cStringIO import StringIO
import babel.messages.pofile
import werkzeug.utils
import werkzeug.wrappers
try:
import xlwt
except ImportError:
xlwt = None
import openerp
import openerp.modules.registry
from openerp.tools.translate import _
from openerp.tools import config
from .. import http
openerpweb = http
#----------------------------------------------------------
# OpenERP Web helpers
#----------------------------------------------------------
def rjsmin(script):
""" Minify js with a clever regex.
Taken from http://opensource.perlig.de/rjsmin
|
Apache License, Version 2.0 """
def subber(match):
""" Substitution callback """
groups = match.groups()
return (
groups[0] or
groups[1] or
groups[2] or
groups[3] or
(groups[4] and '\n') or
(groups[5] and ' ') or
(groups[6] and ' ') or
(group
|
s[7] and ' ') or
''
)
result = re.sub(
r'([^\047"/\000-\040]+)|((?:(?:\047[^\047\\\r\n]*(?:\\(?:[^\r\n]|\r?'
r'\n|\r)[^\047\\\r\n]*)*\047)|(?:"[^"\\\r\n]*(?:\\(?:[^\r\n]|\r?\n|'
r'\r)[^"\\\r\n]*)*"))[^\047"/\000-\040]*)|(?:(?<=[(,=:\[!&|?{};\r\n]'
r')(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/'
r'))*((?:/(?![\r\n/*])[^/\\\[\r\n]*(?:(?:\\[^\r\n]|(?:\[[^\\\]\r\n]*'
r'(?:\\[^\r\n][^\\\]\r\n]*)*\]))[^/\\\[\r\n]*)*/)[^\047"/\000-\040]*'
r'))|(?:(?<=[\000-#%-,./:-@\[-^`{-~-]return)(?:[\000-\011\013\014\01'
r'6-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*((?:/(?![\r\n/*])[^/'
r'\\\[\r\n]*(?:(?:\\[^\r\n]|(?:\[[^\\\]\r\n]*(?:\\[^\r\n][^\\\]\r\n]'
r'*)*\]))[^/\\\[\r\n]*)*/)[^\047"/\000-\040]*))|(?<=[^\000-!#%&(*,./'
r':-@\[\\^`{|~])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/'
r'*][^*]*\*+)*/))*(?:((?:(?://[^\r\n]*)?[\r\n]))(?:[\000-\011\013\01'
r'4\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*)+(?=[^\000-\040"#'
r'%-\047)*,./:-@\\-^`|-~])|(?<=[^\000-#%-,./:-@\[-^`{-~-])((?:[\000-'
r'\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=[^'
r'\000-#%-,./:-@\[-^`{-~-])|(?<=\+)((?:[\000-\011\013\014\016-\040]|'
r'(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=\+)|(?<=-)((?:[\000-\011\0'
r'13\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=-)|(?:[\0'
r'00-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))+|(?:'
r'(?:(?://[^\r\n]*)?[\r\n])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*'
r']*\*+(?:[^/*][^*]*\*+)*/))*)+', subber, '\n%s\n' % script
).strip()
return result
def db_list(req, force=False):
proxy = req.session.proxy("db")
dbs = proxy.list(force)
h = req.httprequest.environ['HTTP_HOST'].split(':')[0]
d = h.split('.')[0]
r = openerp.tools.config['dbfilter'].replace('%h', h).replace('%d', d)
dbs = [i for i in dbs if re.match(r, i)]
return dbs
def db_monodb_redirect(req):
return db_redirect(req, not config['list_db'])
def db_redirect(req, match_first_only_if_unique):
db = False
redirect = False
dbs = db_list(req, True)
# 1 try the db in the url
db_url = req.params.get('db')
if db_url and db_url in dbs:
return (db_url, False)
# 2 use the database from the cookie if it's listable and still listed
cookie_db = req.httprequest.cookies.get('last_used_database')
if cookie_db in dbs:
db = cookie_db
# 3 use the first db if user can list databases
if dbs and not db and (not match_first_only_if_unique or len(dbs) == 1):
db = dbs[0]
# redirect to the chosen db if multiple are available
if db and len(dbs) > 1:
query = dict(urlparse.parse_qsl(req.httprequest.query_string, keep_blank_values=True))
query.update({'db': db})
redirect = req.httprequest.path + '?' + urllib.urlencode(query)
return (db, redirect)
def db_monodb(req):
# if only one db exists, return it else return False
return db_redirect(req, True)[0]
def redirect_with_hash(req, url, code=303):
# Most IE and Safari versions decided not to preserve location.hash upon
# redirect. And even if IE10 pretends to support it, it still fails
# inexplicably in case of multiple redirects (and we do have some).
# See extensive test page at http://greenbytes.de/tech/tc/httpredirects/
return "<html><head><script>window.location = '%s' + location.hash;</script></head></html>" % url
def module_topological_sort(modules):
""" Return a list of module names sorted so that their dependencies of the
modules are listed before the module itself
modules is a dict of {module_name: dependencies}
:param modules: modules to sort
:type modules: dict
:returns: list(str)
"""
dependencies = set(itertools.chain.from_iterable(modules.itervalues()))
# incoming edge: dependency on other module (if a depends on b, a has an
# incoming edge from b, aka there's an edge from b to a)
# outgoing edge: other module depending on this one
# [Tarjan 1976], http://en.wikipedia.org/wiki/Topological_sorting#Algorithms
#L ← Empty list that will contain the sorted nodes
L = []
#S ← Set of all nodes with no outgoing edges (modules on which no other
# module depends)
S = set(module for module in modules if module not in dependencies)
visited = set()
#function visit(node n)
def visit(n):
#if n has not been visited yet then
if n not in visited:
#mark n as visited
visited.add(n)
#change: n not web module, can not be resolved, ignore
if n not in modules: return
#for each node m with an edge from m to n do (dependencies of n)
for m in modules[n]:
#visit(m)
visit(m)
#add n to L
L.append(n)
#for each node n in S do
for n in S:
#visit(n)
visit(n)
return L
def module_installed(req):
# Candidates module the current heuristic is the /static dir
loadable = openerpweb.addons_manifest.keys()
modules = {}
# Retrieve database installed modules
# TODO The following code should move to ir.module.module.list_installed_modules()
Modules = req.session.model('ir.module.module')
domain = [('state','=','installed'), ('name','in', loadable)]
for module in Modules.search_read(domain, ['name', 'dependencies_id']):
modules[module['name']] = []
deps = module.get('dependencies_id')
if deps:
deps_read = req.session.model('ir.module.module.dependency').read(deps, ['name'])
dependencies = [i['name'] for i in deps_read]
modules[module['name']] = dependencies
sorted_modules = module_topological_sort(modules)
return sorted_modules
def module_installed_bypass_session(dbname):
loadable = openerpweb.addons_manifest.keys()
modules = {}
try:
registry = openerp.modules.registry.RegistryManager.get(dbname)
with registry.cursor() as cr:
m = registry.get('ir.module.module')
# TODO The following code should move to ir.module.module.list_installed_modules()
domain = [('state','=','installed'), ('name','in', loadable)]
ids = m.search(cr, 1, [('state','=','installed'), ('name','in', loadable)])
for module in m.read(cr, 1, ids, ['name', 'dependencies_id']):
modules[module['name']] = []
deps = module.get('dependencies_id')
if deps:
deps_read = registry.get('ir.module.module.dependency').read(cr, 1, deps, ['name'])
dependencies = [i['name'] for i in deps_read]
modules[module['name']] = dependencies
exce
|
jeffrimko/Qprompt
|
tests/script_test_1.py
|
Python
|
mit
| 3,045
| 0.004598
|
"""Test that arguments passed to a script Menu.main(loop=True) execute
properly."""
##==============================================================#
## SECTION: Imports #
##==============================================================#
from testlib import *
##==============================================================#
## SECTION: Global Definitions #
##==============================================================#
SCRIPT = "script_1.py"
##==============================================================#
## SECTION: Class Definitions #
##==============================================================#
class TestCase(unittest.TestCase):
def _cleanup(self):
rmfile("foo")
rmfile("bar")
rmfile("caz")
def setUp(self):
self._cleanup()
|
self.assertFalse(op.exists("foo"))
self.a
|
ssertFalse(op.exists("bar"))
self.assertFalse(op.exists("caz"))
def tearDown(self):
self._cleanup()
def test_script_1(self):
result = os.system("python %s x" % SCRIPT)
self.assertEqual(0, result)
self.assertFalse(op.exists("foo"))
self.assertFalse(op.exists("bar"))
self.assertFalse(op.exists("caz"))
def test_script_2(self):
result = os.system("python %s f" % SCRIPT)
self.assertEqual(0, result)
self.assertTrue(op.exists("foo"))
self.assertFalse(op.exists("bar"))
self.assertFalse(op.exists("caz"))
def test_script_3(self):
result = os.system("python %s b" % SCRIPT)
self.assertEqual(0, result)
self.assertFalse(op.exists("foo"))
self.assertTrue(op.exists("bar"))
self.assertFalse(op.exists("caz"))
def test_script_4(self):
result = os.system("python %s f b" % SCRIPT)
self.assertEqual(0, result)
self.assertTrue(op.exists("foo"))
self.assertTrue(op.exists("bar"))
self.assertFalse(op.exists("caz"))
def test_script_5(self):
result = os.system("python %s c" % SCRIPT)
self.assertEqual(0, result)
self.assertFalse(op.exists("foo"))
self.assertFalse(op.exists("bar"))
self.assertTrue(op.exists("caz"))
def test_script_6(self):
result = os.system("python %s c f" % SCRIPT)
self.assertEqual(0, result)
self.assertTrue(op.exists("foo"))
self.assertFalse(op.exists("bar"))
self.assertTrue(op.exists("caz"))
def test_script_7(self):
result = os.system("python %s -d" % SCRIPT)
self.assertEqual(0, result)
self.assertFalse(op.exists("foo"))
self.assertTrue(op.exists("bar"))
self.assertFalse(op.exists("caz"))
##==============================================================#
## SECTION: Main Body #
##==============================================================#
if __name__ == '__main__':
unittest.main()
|
SmartElect/SmartElect
|
register/tests/test_views.py
|
Python
|
apache-2.0
| 21,018
| 0.001523
|
from io import StringIO
import csv
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.core.files.base import ContentFile
from django.test import TestCase
from django.urls import reverse
from django.utils.timezone import now
from register.forms import CSV_FIELDS, BlacklistedNumberEditForm, WhitelistedNumberEditForm
from register.models import Blacklist, Whitelist, RegistrationCenter, Registration
from register.tests.base import LibyaTest
from register.tests.factories import WhitelistFactory, BlacklistFactory, \
RegistrationCenterFactory, RegistrationFactory
from register.tests.test_center_csv import CenterFileTestMixin
from libya_elections.phone_numbers import get_random_phone_number, format_phone_number
from libya_elections.tests.utils import ResponseCheckerMixin
from libya_site.tests.factories import UserFactory, DEFAULT_USER_PASSWORD
from polling_reports.models import StaffPhone
from polling_reports.tests.factories import StaffPhoneFactory
from staff.tests.base import StaffUserMixin
class ImportBlackWhitelistViewMixin(StaffUserMixin, ResponseCheckerMixin):
"""Base class for TestImportBlacklistView and TestImportWhitelistView.
This doesn't inherit from TestCase, so it isn't executed by itself.
"""
def setUp(self):
super(ImportBlackWhitelistViewMixin, self).setUp()
# self.url = None
# self.model = None
# self.factory = None
def test_staff_can_see_form(self):
rsp = self.client.get(self.url, follow=False)
form = rsp.context['form']
self.assertNotIn('password', form.fields)
self.assertIn('import_file', form.fields)
def test_nonstaff_cant_see_form(self):
self.client.logout()
self.nonstaff_user = UserFactory(username='joe', password='puppy')
self.client.login(username='joe', password='puppy')
self.assertForbidden(self.client.get(self.url))
def test_valid_form(self):
# with all combinations of line endings (\r\n, \n, \r)
numbers = [get_random_phone_number() for i in range(4)]
punctuated_numbers = [format_phone_number(number)
for number in numbers]
file_content = ("""%s\r\n%s\n \n%s\r%s""" % (
punctuated_numbers[0],
punctuated_numbers[1],
punctuated_numbers[2],
punctuated_numbers[3],
)).encode()
blackwhitelist_file = ContentFile(file_content, name='bw.txt')
data = {'import_file': blackwhitelist_file}
rsp = self.client.post(self.url, data=data)
# Assert that we redirect
self.assertEqual(302, rsp.status_code)
bwlist = self.model.objects.values_list('phone_number', flat=True)
for number in numbers:
self.assertIn(number, bwlist)
self.assertEqual(len(bwlist), 4)
def test_import_number_twice_works(self):
"Importing a number that is already in list shouldn't cause an error"
number = get_random_phone_number()
self.factory(phone_number=number)
file_content = number.encode()
blackwhitelist_file = ContentFile(file_content, name='bw.txt')
data = {'import_file': blackwhitelist_file}
rsp = self.client.post(self.url, data=data)
# Assert that we redirect
self.assertEqual(302, rsp.status_code)
bwlist = self.model.objects.values_list('phone_number', flat=True)
self.assertEqual(len(bwlist), 1)
self.assertIn(number, bwlist)
def test_import_number_cant_start_with_2180(self):
"Ensures that the number doesn't start with 2180"
number = '218091234123'
file_content = number.encode()
blackwhitelist_file = ContentFile(file_content, name='bw.txt')
data = {'import_file': blackwhitelist_file}
rsp = self.client.post(self.url, data=data, follow=True)
self.assertEqual(200, rsp.status_code)
bwlist = self.model.objects.values_list('phone_number', flat=True)
self.assertEqual(len(bwlist), 0)
self.assertContains(rsp, 'Numbers on these lines not imported because '
'they are not valid phone numbers: 1.')
class TestImportBlacklistView(ImportBlackWhitelistViewMixin, LibyaTest):
"""Exercise uploading a list of blacklisted numbers"""
def setUp(self):
self.model = Blacklist
self.permissions = ('add_blacklist', 'browse_blacklist')
self.url = reverse('blacklisted-numbers-upload')
self.factory = BlacklistFactory
super(TestImportBlacklistView, self).setUp()
class TestImportWhitelistView(ImportBlackWhitelistViewMixin, LibyaTest):
"""Exercise uploading a list of whitelisted numbers"""
def setUp(self):
self.permissions = ('add_whitelist', 'browse_whitelist')
self.model = Whitelist
self.url = reverse('whitelisted-numbers-upload')
self.factory = WhitelistFactory
super(TestImportWhitelistView, self).setUp()
class BlackWhitelistEditFormMixin(StaffUserMixin, ResponseCheckerMixin):
"""Base class for TestBlacklistChangeForm and TestWhitelistChangeForm.
This doesn't inherit from TestCase, so it isn't executed by itself.
"""
def setUp(self):
super(BlackWhitelistEditFormMixin, self).setUp()
# self.factory = None
# self.form = None
def test_cleans_phone_number(self):
number = get_random_phone_number()
punctuated_number = format_phone_number(number)
form = self.form(data={'phone_number': punctuated_number})
self.assertTrue(form.is_valid(), form.errors)
self.assertEqual(form.cleaned_data['phone_number'], number)
def test
|
_add_dupe_shows_form_error(self):
number = get_random_phone_number()
self.factory(phone_number=number)
form = self.form(data={'phone_number': number})
self.assertFalse(form.is_valid())
|
self.assertIn('Duplicate value for phone number', list(form.errors.values())[0])
def test_phone_number_cant_start_with_2180(self):
"Ensures the local prefix '0' isn't accidentally included in the phone number"
number = '218091234124'
form = self.form(data={'phone_number': number})
self.assertFalse(form.is_valid())
self.assertIn('Please enter a valid phone number', list(form.errors.values())[0][0])
class TestBlacklistChangeForm(BlackWhitelistEditFormMixin, TestCase):
"""Exercise Blacklist number editing"""
def setUp(self):
super(TestBlacklistChangeForm, self).setUp()
self.factory = BlacklistFactory
self.form = BlacklistedNumberEditForm
class TestWhitelistChangeForm(BlackWhitelistEditFormMixin, TestCase):
"""Exercise Whitelist number editing"""
def setUp(self):
super(TestWhitelistChangeForm, self).setUp()
self.factory = WhitelistFactory
self.form = WhitelistedNumberEditForm
class BlacklistDownload(StaffUserMixin, ResponseCheckerMixin, TestCase):
permissions = ['read_blacklist']
model = Blacklist
def test_download_blacklist_file(self):
bl = BlacklistFactory()
rsp = self.client.get(reverse('blacklisted-numbers-download'))
self.assertOK(rsp)
self.assertIn(bl.phone_number, rsp.content.decode())
class WhitelistDownload(StaffUserMixin, ResponseCheckerMixin, TestCase):
permissions = ['read_whitelist']
model = Whitelist
def test_download_whitelist_file(self):
wl = WhitelistFactory()
rsp = self.client.get(reverse('whitelisted-numbers-download'))
self.assertOK(rsp)
self.assertIn(wl.phone_number, rsp.content.decode())
class DeleteBlacklist(StaffUserMixin, ResponseCheckerMixin, TestCase):
permissions = ['delete_blacklist', 'browse_blacklist']
model = Blacklist
def setUp(self):
super(DeleteBlacklist, self).setUp()
self.url = reverse('blacklisted-numbers-delete')
BlacklistFactory.create_batch(size=3)
def test_get_deleted_page(self):
rsp = self.client.get(self.url)
self.assertOK(rsp)
self.assertIn('Are you sure yo
|
fake-name/ReadableWebProxy
|
WebMirror/processor/fonts/FontTables.py
|
Python
|
bsd-3-clause
| 18,234
| 0.033074
|
PREDEFINED_FONT_MAPS = {
(
(193, 0), (193, 1462), (297, 104), (297, 1358), (930, 104),
(930, 1358), (1034, 0), (1034, 1462),
)
: {'char' : '�', 'name' : 'glyph00000'},
(
(115, 276), (115, 804), (287, 518), (291, 653), (292, 325),
(305, 805), (376, 1116), (396, -20), (462, 977), (477, 125),
(594, 977), (596, 1116), (639, -20), (645, 125), (724, 977),
(750, -20), (802, 1116), (822, 125), (864, 653), (864, 810),
(907, 13), (995, 51), (995, 199), (1042, 518), (1042, 623),
(1042, 845),
)
: {'char' : 'e', 'name' : 'glyph00001'},
(
(31, 967), (31, 1047), (188, -20), (188, 315), (188, 967),
(188, 1116), (258, 1350), (354, 223), (354, 322), (354, 967),
(354, 1096), (354, 1350), (448, 117), (506, -20), (530, 117),
(548, -20), (574, 117), (653, -3), (656, 130), (672, 967),
(672, 1096), (680, 10), (680, 137),
)
: {'char' : 't', 'name' : 'glyph00002'},
(
(94, 148), (94, 303), (94, 635), (217, 1020), (268, 211),
(268, 301), (268, 416), (268, 893), (279, -20), (298, 1064),
(377, 117), (441, 977), (442, -20), (443, 525), (475, 117),
(491, 1114), (564, -20), (578, 977), (588, 1114), (625, 651),
(630, 117), (641, 532), (700, 977), (727, 53), (784, 1114),
(807, 287), (807, 440), (807, 539), (809, 156), (811, 657),
(811, 725), (811, 854), (817, 156), (850, 0), (973, 0),
(973, 748), (973, 940),
)
: {'char' : 'a', 'name' : 'glyph00003'},
(
(115, 376), (115, 549), (115, 817), (239, 118), (287, 339),
(287, 549), (287, 761), (383, 1116), (453, 975), (455, 119),
(467, -20), (614, -20), (621, 1116), (781, 119), (781, 975),
(851, 1116), (852, -20), (950, 338), (950, 758), (1122, 281),
(1122, 549), (1122, 810),
)
: {'char' : 'o', 'name' : 'glyph00004'},
(
(176, 0), (176, 1096), (311, 1096), (338, 946), (342, 0),
(342, 575), (342, 789), (346, 946), (397, 1027), (502, 975),
(581, 1116), (674, 975), (694, 1116), (804, 975), (892, 1116),
(926, 0), (926, 709), (926, 843), (1092, 0), (1092, 715),
(1092, 925),
)
: {'char' : 'n', 'name' : 'glyph00005'},
(
(162, 1337), (162, 1393), (162, 1450), (176, 0), (176, 1096),
(218, 1282), (218, 1503), (300, 1282), (300, 1503), (342, 0),
(342, 1096), (358, 1337), (358, 1449),
)
: {'char' : 'i', 'name' : 'glyph00006'},
(
(106, 752), (106, 827), (106, 961), (109, 49), (109, 203),
(169, 644), (188, 163), (231, -20), (264, 794), (264, 838),
(264, 903), (298, 559), (309, 732), (324, 1116), (369, 117),
(384, 977), (409, 676), (449, -20), (451, 502), (453, 117),
(502, 977), (514, 1116), (551, 623), (583, 117), (612, 440),
(636, 977), (655, -20), (691, 1116), (723, 200), (723, 285),
(723, 349), (746, 552), (801, 909), (860, 1044), (883, 146),
(883, 299), (883, 408),
)
: {'char' : 's', 'name' : 'glyph00007'},
(
(176, 0), (176, 1556), (334, 944), (342, 0), (342, 573),
(342, 787), (342, 1000), (342, 1085), (342, 1556), (344, 944),
(393, 1023), (501, 975), (574, 1114), (674, 975), (690, 1114),
(804, 975), (891, 1114), (926, 0), (926, 709), (926, 843),
(1092, 0), (1092, 715), (1092, 923),
)
: {'char' : 'h', 'name' : 'glyph00008'},
(
(176, 0), (176, 1096), (313, 1096), (332, 893), (340, 893),
(342, 0), (342, 588), (342, 749), (401, 1000), (531, 965),
(573, 1116), (664, 965), (676, 1116), (716, 965), (749, 1116),
(784, 950), (807, 1104),
)
: {'char' : 'r', 'name' : 'glyph00009'},
(
(176, 0), (176, 1556), (342, 0), (342, 1556),
)
: {'char' : 'I', 'name' : 'glyph00010'},
(
(115, 274), (115, 816), (287, 333), (287, 543), (287, 750),
(354, -20), (355, 1116), (441, 119), (442, 977), (569, -20),
(569, 1116), (588, 977), (590, 119), (758, 977), (760, 119),
(792, 1116), (798, -20), (911, 954), (913, 147), (913, 304),
(913, 510), (913, 545), (913, 778), (913, 1110), (913, 1556),
(917, 1033), (922, 147), (924, 954), (944, 0), (1079, 0),
(1079, 1556),
)
: {'char' : 'd', 'name' : 'glyph00011'},
(
(164, 170), (164, 379), (164, 1096), (332, 251), (332, 385),
(332, 1096), (363, -20), (454, 119), (563, -20), (584, 119),
(679, -20), (756, 119), (860, 66), (911, 147), (915, 307),
(915, 520), (915, 1096), (920, 147), (944, 0), (1081, 0),
(1081, 1096),
)
: {'char' : 'u', 'name' : 'glyph00012'},
(
(176, 0), (176, 1096), (311, 1096), (338, 946), (342, 0),
(342, 575), (342, 788), (346, 946), (393, 1026), (487, 975),
(564, 1116), (643, 975), (670, 1116), (762, 975), (874, 0),
(874, 713), (874, 844), (927, 1116), (1006, 930), (1014, 930),
(1040, 0), (1040, 612), (1040, 797), (1063, 1016), (1188, 975),
(1249, 1116), (1343, 975), (1368, 1116), (1461, 975), (1554, 1116),
(1573, 0), (1573, 713), (1573, 844), (1739, 0), (1739, 715),
(1739, 925),
)
: {'char' : 'm', 'name' : 'glyph00013'},
(
(39, -332), (39, -186), (39, -86), (125, 633), (125, 741),
(125, 921), (167, 60), (184, 185), (184, 238), (184, 298),
(199, -273), (199, -184), (199, -83), (232, 465), (241, 105),
(248, 388), (272, -492), (283, 86), (289, 630), (289, 745),
(289, 869), (317, 430), (325, 25), (332, 217), (332, 262),
(332, 347), (341, 1116), (349, -365), (406, 174), (415, 995),
(419, 512), (438, 25), (438, 403), (481, 395), (487, -492),
(489, -365), (496, 174), (530, 395), (532, 995), (535, 512),
(539, 1116), (625, 1116), (637, 25), (690, 174), (694, 1096),
(698, -365), (722, 395), (767, -492), (778, 512), (778, 748),
(778, 995), (789, 25), (868, 174), (870, 967), (898, 932),
(899, -240), (899, -133), (899, -44), (942, 587), (942, 748),
(942, 819), (1059, -301), (1059, -119), (1059, 24), (1073, 991),
(1073, 1096),
)
: {'char' : 'g', 'name' : 'glyph00014'},
(
(2, 1096), (16, -475), (16, -342), (71, -354), (90, -492),
(139, -354), (166, -492), (180, 1096), (302, -492), (310, -354),
(383, -162), (420, 471), (444, -6), (489, -337), (499, 257),
(518, 162), (526, 162), (539, 213), (559, -152), (622, 460),
(852, 1096), (1030, 1096),
)
: {'char' : 'y', 'name' : 'glyph00015'},
(
(115, 273), (115, 541), (115, 816), (287, 341), (287, 543),
(287, 969), (376, -20), (380, 1116), (450, 125), (610, 125),
(614, -20), (621, 969), (625, 1116), (671, 969), (704, 1116),
(747, 125), (781, -20), (801, 940), (856, 918), (862, 1082),
(891, 37), (891, 184), (907, 1059),
)
: {'char' : 'c', 'name' : 'glyph00016'},
(
(29, 967), (29, 1042), (225, 0), (225, 967), (225, 1102),
(225, 1163), (225, 1567), (391, 0), (391, 967), (391, 1096),
(391, 1167), (391, 1305), (481, 1430), (575, 1430), (578, 1567),
(643, 1430), (665, 1567), (670, 967), (670, 1096), (
|
739, 1399),
(782, 1532),
)
: {'char' : 'f', 'name' : 'glyph00017'},
(
(23, 1096), (197, 1096), (322, 0), (359, 467), (414, 251),
(422, 176), (430, 176), (441, 233), (490, 414), (508, 467),
(514, 0), (709, 1096), (721, 641), (751, 736), (791, 911),
(799, 911), (851, 702), (870, 643), (889, 1096), (1071, 0),
(1085, 467), (1141, 2
|
95), (1161, 178), (1169, 178), (1173, 214),
(1208, 364), (1268, 0), (1399, 1096), (1571, 1096),
)
: {'char' : 'w', 'name' : 'glyph00018'},
(
(176, -492), (176, 1096), (311, 1096), (330, 141), (334, 946),
(342, -492), (342, -41), (342, 45), (342, 141), (342, 318),
(342, 549), (342, 586), (342, 946), (344, 789), (402, 59),
(406, 1036), (494, 975), (496, 119), (576, 1116), (579, -20),
(662, 975), (666, 119), (686, -20), (686, 1116), (808, 119),
(808, 975), (900, -20), (904, 1116), (969, 349), (969, 551),
(969, 756), (1141, 279), (1141, 818),
)
: {'char' : 'p', 'name' : 'glyph00019'},
(
(152, 34), (152, 106), (152, 173), (213, 242), (219, -29),
(270, -29), (327, -29), (328, 242), (393, 41), (393, 173),
)
: {'char' : '.', 'name' : 'glyph00020'},
(
(176, 0), (176, 1556), (295, 0), (330, 141), (334, 950),
(342, 141), (342, 318), (342, 780), (342, 950), (342, 1051),
(342, 1178), (342, 1556), (402, 59), (458, 111
|
niemmi/algolib
|
algolib/sort/merge_sort.py
|
Python
|
bsd-3-clause
| 2,137
| 0
|
"""Two different implementations of merge sort. First one is the standard sort
that creates the result to new list on each level. Second one is an in-place
sort that uses two alternating buffers and offsets to limit memory usage
to O(2n).
"""
def sort(lst):
"""Standard merge sort.
Args:
lst: List to sort
Returns:
Sorted copy of the list
"""
if len(lst) <= 1:
return lst
mid = len(lst) // 2
low = sort(lst[:mid])
high = sort(lst[mid:])
res = []
i = j = 0
while i < len(low) and j < len(high):
if low[i] < high[j]:
res.append(low[i])
i += 1
else:
res.append(high[j])
j += 1
res.extend(low[i:])
res.extend(high[j:
|
])
return res
def helper(lst, buf, start, stop, to_buf):
"""Helper function for in-place sort with alternating buffers.
Args:
lst: List to sort
buf: Buffer to store the results
start: Start index
stop: Stop index
to_buf: Boolean flag telling w
|
here result should be written to.
In case of True result should be written to buf, if False then
result should be written to l.
"""
length = stop - start
if length <= 1:
if to_buf and length == 1:
buf[start] = lst[start]
return
mid = start + length // 2
helper(lst, buf, start, mid, not to_buf)
helper(lst, buf, mid, stop, not to_buf)
# If result goes to buf swap l & buf since following code will write
# from buf to result
if to_buf:
lst, buf = buf, lst
i = start
j = mid
to = start
while i < mid and j < stop:
if buf[i] < buf[j]:
lst[to] = buf[i]
i += 1
else:
lst[to] = buf[j]
j += 1
to += 1
for i in range(i, mid):
lst[to] = buf[i]
to += 1
for j in range(j, stop):
lst[to] = buf[j]
to += 1
def sort_in_place(lst):
"""In-place merge sort.
Args:
lst: List to sort
"""
helper(lst, [None] * len(lst), 0, len(lst), False)
|
Ksynko/django-crm
|
sample_project/external_apps/ajax_select/setup.py
|
Python
|
bsd-3-clause
| 387
| 0.005168
|
#!
|
/usr/bin/env python
from distutils.core import setup
setup(name='Ajax Select',
version='1.0',
description='Django-jQuery jQuery-powered auto-complete fields for ForeignKey and ManyToMany fields',
author='Crucial Felix',
author_email='crucialfelix@gmail.com',
url='http://code.google.com/p/django-ajax-selects/',
packages=['ajax_select',
|
],
)
|
RichDijk/eXe
|
exe/engine/package.py
|
Python
|
gpl-2.0
| 77,805
| 0.004679
|
# -*- coding: utf-8 -*-
# ===========================================================================
# eXe
# Copyright 2004-2006, University of Auckland
# Copyright 2006-2008 eXe Project, http://eXeLearning.org/
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# ===========================================================================
"""
Package represents the collection of resources the user is editing
i.e. the "package".
"""
import datetime
import shutil
import logging
import time
import zipfile
import uuid
import re
from xml.dom import minidom
from exe.engine.path import Path, TempDirPath, toUnicode
from exe.engine.node import Node
from exe.engine.genericidevice import GenericIdevice
from exe.engine.multichoiceidevice import MultichoiceIdevice
from exe.engine.quiztestidevice import QuizTestIdevice
from exe.engine.truefalseidevice import TrueFalseIdevice
from exe.engine.wikipediaidevice import WikipediaIdevice
from exe.engine.casestudyidevice import CasestudyIdevice
from exe.engine.casopracticofpdidevice import CasopracticofpdIdevice
from exe.engine.citasparapensarfpdidevice import CitasparapensarfpdIdevice
from exe.engine.clozefpdidevice import ClozefpdIdevice
from exe.engine.clozeidevice import ClozeIdevice
from exe.engine.clozelangfpdidevice import ClozelangfpdIdevice
from exe.engine.debesconocerfpdidevice import DebesconocerfpdIdevice
from exe.engine.destacadofpdidevice import DestacadofpdIdevice
from exe.engine.ejercicioresueltofpdidevice import EjercicioresueltofpdIdevice
from exe.engine.eleccionmultiplefpdidevice import EleccionmultiplefpdIdevice
from exe.engine.freetextfpdidevice import FreeTextfpdIdevice
from exe.engine.galleryidevice import GalleryIdevice
from exe.engine.imagemagnifieridevice import ImageMagnifierIdevice
from exe.engine.listaidevice import ListaIdevice
from exe.engine.multiselectidevice import MultiSelectIdevice
from exe.engine.orientacionesalumnadofpdidevice import OrientacionesalumnadofpdIdevice
from exe.engine.orientacionestutoriafpdidevice import OrientacionestutoriafpdIdevice
from exe.engine.parasabermasfpdidevice import ParasabermasfpdIdevice
from exe.engine.recomendacionfpdidevice import RecomendacionfpdIdevice
from exe.engine.reflectionfpdidevice import ReflectionfpdIdevice
from exe.engine.reflectionfpdmodifidevice import ReflectionfpdmodifIdevice
from exe.engine.reflectionidevice import ReflectionIdevice
from exe.engine.seleccionmultiplefpdidevice import SeleccionmultiplefpdIdevice
from exe.engine.verdaderofalsofpdidevice import VerdaderofalsofpdIdevice
from exe.engine.persist import Persistable, encodeObject, decodeObjectRaw
from exe import globals as G
from exe.engine.resource import Resource
from twisted.persisted.styles import doUpgrade
from twisted.spread.jelly import Jellyable, Unjellyable
from exe.engine.beautifulsoup import BeautifulSoup
from exe.engine.field import Field, TextAreaField
from exe.engine.persistxml import encodeObjectToXML, decodeObjectFromXML
from exe.engine.lom import lomsubs
from exe.engine.checker import Checker
from exe.webui import common
log = logging.getLogger(__name__)
def clonePrototypeIdevice(title):
idevice = None
for prototype in G.application.ideviceStore.getIdevices():
if prototype.get_title() == title:
log.debug('have prototype of:' + prototype.get_title())
idevice = prototype.clone()
idevice.edit = False
break
return idevice
def burstIdevice(idev_type, i, node):
# given the iDevice type and the BeautifulSoup fragment i, burst it:
idevice = clonePrototypeIdevice(idev_type)
if idevice is None:
log.warn("unable to clone " + idev_type + " idevice")
freetext_idevice = clonePrototypeIdevice('Free Text')
if freetext_idevice is None:
log.error("unable to clone Free Text for " + idev_type
+ " idevice")
return
idevice = freetext_idevice
# For idevices such as GalleryImage, where resources are being attached,
# the idevice should already be attached to a node before bursting it open:
node.addIdevice(idevice)
idevice.burstHTML(i)
return idevice
def loadNodesIdevices(node, s):
soup = BeautifulSoup(s)
body = soup.find('body')
if body:
idevices = body.findAll(name='div',
attrs={'class' : re.compile('Idevice$') })
if len(idevices) > 0:
for i in idevices:
# WARNING: none of the idevices yet re-attach their media,
# but they do attempt to re-attach images and other links.
if i.attrMap['class']=="activityIdevice":
idevice = burstIdevice('Activity', i, node)
elif i.attrMap['class']=="objectivesIdevice":
idevice = burstIdevice('Objectives', i, node)
elif i.attrMap['class']=="preknowledgeIdevice":
idevice = burstIdevice('Preknowledge', i, node)
elif i.attrMap['class']=="readingIdevice":
idevice = burstIdevice('Reading Activity', i, node)
# the above are all Generic iDevices;
# below are all others:
elif i.attrMap['class']=="RssIdevice":
idevice = burstIdevice('RSS', i, node)
elif i.attrMap['class']=="WikipediaIdevice":
# WARNING: Wiki problems loading images with accents, etc:
idevice = burstIdevice('Wiki Article', i, node)
elif i.attrMap['class']=="ReflectionIdevice":
idevice = burstIdevice('Reflection', i, node)
elif i.attrMap['class']=="GalleryIdevice":
# WARNING: Gallery problems with the popup html:
idevice = burstIdevice('Image Gallery', i, node)
elif i.attrMap['class']=="ImageMagnifierIdevice":
# WARNING: Magnifier missing major bursting components:
idevice = burstIdevice('Image Magnifier', i, node)
elif i.attrMap['class']=="AppletIdevice":
# WARNING: Applet missing file bursting components:
idevice = burstIdevice('Java Applet', i, node)
elif i.attrMap['class']=="ExternalUrlIdevice":
idevice = burstIdevice('External Web Site', i, node)
elif i.attrMap['class']=="ClozeIdevice":
idevice = burstIdevice('Cloze Activity', i, node)
elif i.attrMap['class']=="FreeTextIdevice":
idevice = burstIdevice('Free Text', i, node)
|
elif i.attrMap['class']=="CasestudyIdevice":
idevice = burstIdevice('Case Study', i, node)
elif i.attrMap['class']=="MultichoiceIdevice":
idevice = burstIdevice('Multi-choice', i, node)
elif i.attrMap['class']=="MultiSelectIdevice":
idevice = burstIdevice('Multi-select', i, node)
elif i.attrMap['class']=="QuizTestIdevice":
idevice = burstIdevice('SCORM Quiz', i,
|
node)
elif i.attrMap['class']=="TrueFalseIdevice":
idevice = burstIdevice('True-False Question', i, node)
else:
# NOTE: no custom idevices burst yet,
# nor any deprecated idevices.
|
miptliot/edx-platform
|
lms/djangoapps/teams/api_urls.py
|
Python
|
agpl-3.0
| 1,464
| 0
|
"""Defines the URL routes for the Team API."""
from django.conf import settings
from django.conf.urls import patterns, url
from .views import (
MembershipDetailView,
MembershipListView,
TeamsDetailView,
TeamsListView,
TopicDetailView,
TopicListView
)
TEAM_ID_PATTERN = r'(?P<team_id>[a-z\d_-]+)'
TOPIC_ID_PATTERN = r'(?P<topic_id>[A-Za-z\d_.-]+)'
urlpatterns = patterns(
'',
url(
r'^v0/teams/$',
TeamsListView.as_view(),
name="teams_list"
),
url(
r'^v0/teams/{team_id_pattern}$'.format(
team_id_pattern=TEAM_ID_PATTERN,
),
TeamsDetailView.as_view(),
name="teams_detail"
),
url(
r'^v0/topics/$',
TopicListView.as_view(),
name="topics_list"
),
url(
r'^v0/topics/{topic_id_pattern},{course_id_pattern}$'.format(
topic_id_pattern=TOPIC_ID_PATTERN,
course_id_pattern=setti
|
ngs.COURSE_ID_PATTERN,
),
TopicDetailView.as_view(),
name="topics_detail"
),
url(
r'^v0/team_membership/$',
MembershipListView.as_view(),
name="team_membership_list"
),
url(
r'^v0/team_membership/{team_id_pattern},{username_pattern}$'.
|
format(
team_id_pattern=TEAM_ID_PATTERN,
username_pattern=settings.USERNAME_PATTERN,
),
MembershipDetailView.as_view(),
name="team_membership_detail"
)
)
|
Ninjakow/TrueSkill
|
lib/pytba/test/api_test.py
|
Python
|
gpl-3.0
| 1,080
| 0.001852
|
import unittest
from pytba import VERSION
from pytba import api as client
class TestApiMethods(unittest.TestCase):
def setUp(self):
client.set_api_key("WesJordan", "PyTBA-Unit-Test", VERSION)
def test__tba_get(self):
# Query with proper key should succeed
team = client.tba_get('team/frc2363')
self.assertEqual(team['key'], 'frc2363')
# Query with invalid key should fail
with self.assertRaises(TypeError):
client.tba_get('team/frc2363', app_id='invalid key')
def test__event_get(self):
event = client.event_get('2016tes')
|
self.assertEqual(len(event.teams), 75)
self.assertEqual(event.info['name'], 'Tesla Division')
self.assertEqual(len(event.matches), 140)
|
self.assertEqual(event.rankings[1][1], '2056')
def test__team_matches(self):
matches = client.team_matches('frc2363', 2016)
self.assertEqual(len(matches), 62)
self.assertEqual(matches[-1]['alliances']['opponent']['score'], 89)
if __name__ == '__main__':
unittest.main()
|
veltri/DLV2
|
tests/parser/aggregates.max.propagation.7.test.py
|
Python
|
apache-2.0
| 213
| 0
|
input = """
a(1).
a(2) | a(3).
ok1 :- #max{V:a(V)} = 3.
b(3).
b(1) | b(2).
o
|
k2 :- #max{V:b(V)} = 3.
"""
output = ""
|
"
a(1).
a(2) | a(3).
ok1 :- #max{V:a(V)} = 3.
b(3).
b(1) | b(2).
ok2 :- #max{V:b(V)} = 3.
"""
|
DayGitH/Python-Challenges
|
DailyProgrammer/20120209C.py
|
Python
|
mit
| 1,271
| 0.004721
|
'''
we all know the classic "guessing game" with higher or lower prompts. lets do a role reversal; you create a program that will guess numbers between 1-100, and respond appropriately based on whether users say that the number is too high or too low. Try to make a program that can guess your number based on
|
user input and great code!
'''
import random
import numpy
got_answer = False
max = 100
min =
|
0
try_count = 0
while not got_answer:
try_count += 1
num = -1
while (num > 1) or (num < 0):
num = .125 * numpy.random.randn() + 0.5
print(num)
guess = int(((max - min) * num) + min)
print('1. Higher')
print('2. Correct!')
print('3. Lower')
print('\nIs your number {}'.format(guess))
response = input('> ')
if response == '2':
got_answer = True
if try_count > 1:
print('\nHurray! I guessed {} in {} tries!!!'.format(guess, try_count))
else:
print('\nHurray! I guessed {} in the first try!!! WOOHOO!'.format(guess, try_count))
elif response == '1':
min = guess + 1
elif response == '3':
max = guess - 1
if min > max:
got_answer = True
print('ERROR! ERROR! ERROR! Master did not answer the questions properly!')
|
BollMose/daynote
|
test_sht.py
|
Python
|
apache-2.0
| 132
| 0.015152
|
import sht21
with sht21.SHT21(1) as sht21:
print "temp: %s"%sht2
|
1.read_temperature()
print "humi: %s"%sht21
|
.read_humidity()
|
swapnilgt/percPatternDiscovery
|
rlcs/preAnalysisRun.py
|
Python
|
agpl-3.0
| 1,024
| 0.06543
|
import os
import sys
from src import impl as rlcs
import utils as ut
import analysis as anls
import matplotlib.pyplot as plt
import logging
import pickle as pkl
import time
config = ut.loadConfig('config')
sylbSimFolder=config['sylbSimFolder']
transFolder=config['transFolder']
lblDir=config['lblDir']
onsDir=config['onsDir']
resultDir=config['resultDir']
queryList = [['DHE','RE','DHE','RE','KI','TA','TA','KI','NA','TA','TA
|
','KI','TA','TA','KI','NA'],['TA','TA','KI','TA','TA','KI','TA','TA','KI','TA','TA','KI','TA','TA','KI','TA'], ['TA','KI','TA','TA','KI','TA','TA','KI'], ['TA','TA','KI','TA','TA','KI'], ['TA', 'TA','KI', 'TA'],['KI', 'TA', 'TA', 'KI'], ['TA','TA','KI','NA'],
|
['DHA','GE','TA','TA']]
queryLenCheck = [4,6,8,16]
for query in queryList:
if len(query) not in queryLenCheck:
print 'The query is not of correct length!!'
sys.exit()
masterData = ut.getAllSylbData(tPath = transFolder, lblDir = lblDir, onsDir = onsDir)
res = anls.getPatternsInTransInGTPos(masterData, queryList)
|
tqchen/tvm
|
python/tvm/tir/buffer.py
|
Python
|
apache-2.0
| 9,009
| 0.001332
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express o
|
r implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Abstraction for array data structures."""
from numbers import Integral
import tvm._ffi
from tvm._ffi.base import string_types
from tvm.runtime import Object, convert
from tvm.i
|
r import PrimExpr, PointerType, PrimType
from . import _ffi_api
@tvm._ffi.register_object("tir.Buffer")
class Buffer(Object):
"""Symbolic data buffer in TVM.
Buffer provide a way to represent data layout
specialization of data structure in TVM.
Do not construct directly, use :py:func:`~decl_buffer` instead.
See the documentation of :py:func:`decl_buffer` for more details.
See Also
--------
decl_buffer : Declare a buffer
"""
READ = 1
WRITE = 2
def access_ptr(self, access_mask, ptr_type="handle", content_lanes=1, offset=0):
"""Get an access pointer to the head of buffer.
This is the recommended method to get buffer data
ptress when interacting with external functions.
Parameters
----------
access_mask : int
The access pattern MASK. Indicate whether the
access will read or write to the data content.
ptr_type : str, optional
The data type of the result pointer. Do not specify
unless we want to cast pointer to specific type.
content_lanes: int, optional
The number of lanes for the data type. This value
is greater than one for vector types.
offset: Expr, optional
The offset of pointer. We can use it to offset by
the number of elements from the address of ptr.
Examples
--------
.. code-block:: python
# Get access ptr for read
buffer.access_ptr("r")
# Get access ptr for read/write with bitmask
buffer.access_ptr(Buffer.READ | Buffer.WRITE)
# Get access ptr for read/write with str flag
buffer.access_ptr("rw")
# Get access ptr for read with offset
buffer.access_ptr("r", offset = 100)
"""
if isinstance(access_mask, string_types):
mask = 0
for value in access_mask:
if value == "r":
mask = mask | Buffer.READ
elif value == "w":
mask = mask | Buffer.WRITE
else:
raise ValueError("Unknown access_mask %s" % access_mask)
access_mask = mask
offset = convert(offset)
return _ffi_api.BufferAccessPtr(self, access_mask, ptr_type, content_lanes, offset)
def vload(self, begin, dtype=None):
"""Generate an Expr that loads dtype from begin index.
Parameters
----------
begin : Array of Expr
The beginning index in unit of Buffer.dtype
dtype : str
The data type to be loaded,
can be vector type which have lanes that is multiple of Buffer.dtype
Returns
-------
load : Expr
The corresponding load expression.
"""
begin = (begin,) if isinstance(begin, (int, PrimExpr)) else begin
dtype = dtype if dtype else self.dtype
return _ffi_api.BufferVLoad(self, begin, dtype)
def vstore(self, begin, value):
"""Generate a Stmt that store value into begin index.
Parameters
----------
begin : Array of Expr
The beginning index in unit of Buffer.dtype
value : Expr
The value to be stored.
Returns
-------
store : Stmt
The corresponding store stmt.
"""
begin = (begin,) if isinstance(begin, (int, PrimExpr)) else begin
return _ffi_api.BufferVStore(self, begin, value)
def decl_buffer(
shape,
dtype=None,
name="buffer",
data=None,
strides=None,
elem_offset=None,
scope="",
data_alignment=-1,
offset_factor=0,
buffer_type="",
):
"""Declare a new symbolic buffer.
Normally buffer is created automatically during lower and build.
This is only needed if user want to specify their own buffer layout.
See the note below for detailed discussion on usage of buffer.
Parameters
----------
shape : tuple of Expr
The shape of the buffer.
dtype : str, optional
The data type of the buffer.
name : str, optional
The name of the buffer.
data : Var, optional
The data pointer in the buffer.
strides: array of Expr
The stride of the buffer.
elem_offset: Expr, optional
The beginning offset of the array to data.
In terms of number of elements of dtype.
scope: str, optional
The storage scope of the buffer, if not global.
If scope equals empty string, it means it is global memory.
data_alignment: int, optional
The alignment of data pointer in bytes.
If -1 is passed, the alignment will be set to TVM's internal default.
offset_factor: int, optional
The factor of elem_offset field, when set,
elem_offset is required to be multiple of offset_factor.
If 0 is pssed, the alignment will be set to 1.
if non-zero is passed, we will created a Var for elem_offset if elem_offset is not None.
buffer_type: str, optional, {"", "auto_broadcast"}
auto_broadcast buffer allows one to implement broadcast computation
without considering whether dimension size equals to one.
TVM maps buffer[i][j][k] -> buffer[i][0][k] if dimension j's shape equals 1.
Returns
-------
buffer : Buffer
The created buffer
Example
-------
Here's an example of how broadcast buffer can be used to define a symbolic broadcast operation,
.. code-block:: python
m0, m1, m2 = te.var("m0"), te.var("m1"), te.var("m2")
n0, n1, n2 = te.var("n0"), te.var("n1"), te.var("n2")
o0, o1, o2 = te.var("o0"), te.var("o1"), te.var("o2")
A = te.placeholder((m0, m1, m2), name='A')
B = te.placeholder((n0, n1, n2), name='B')
C = te.compute((o0, o1, o2), lambda i, j, k: A[i, j, k] + B[i, j, k], name='C')
Ab = tvm.tir.decl_buffer(A.shape, A.dtype, name="Ab", buffer_type="auto_broadcast")
Bb = tvm.tir.decl_buffer(B.shape, B.dtype, name="Bb", buffer_type="auto_broadcast")
s = te.create_schedule(C.op)
fadd = tvm.build(s, [A, B, C], target='llvm', name='bcast_add', binds={A:Ab, B:Bb})
ctx = tvm.cpu(0)
a = tvm.nd.array(np.random.uniform(size=(2, 4, 3)).astype(A.dtype), ctx)
b = tvm.nd.array(np.random.uniform(size=(2, 1, 3)).astype(B.dtype), ctx)
c = tvm.nd.array(np.zeros((2, 4, 3), dtype=C.dtype), ctx)
fadd(a, b, c)
tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + b.asnumpy())
Note
----
Buffer data structure reflects the DLTensor structure in dlpack.
While DLTensor data structure is very general, it is usually helpful
to create function that only handles specific case of data structure
and make compiled function benefit from it.
If user pass strides and elem_offset is passed as None
when constructing the function, then the function will be specialized
for the DLTensor that is compact and aligned.
If user pass a fully generic symbolic array to the strides,
then the resulting function becomes fully gene
|
perfidia/selearea
|
tests/seleareaTest.py
|
Python
|
mit
| 2,350
| 0.00383
|
#-*- coding: utf-8 -*-
'''
Created on 23 mar 2014
@author: mariusz
@author: tomasz
'''
import unittest
from selearea import get_ast, get_workareas
class seleareaTest(unittest.TestCase):
def get_fc_pages(self):
urls = {
"http://fc.put.poznan.pl",
"http://fc.put.poznan.pl/rekrutacja/post-powanie-kwalifikacyjne%2C29.html",
"http://fc.put.poznan.pl/o-wydziale/witamy%2C39.html"
}
return [get_ast(url) for url in urls]
def get_fce_pages(self):
urls = {
"http://www.bis.put.poznan.pl/",
"http://www.bis.put.poznan.pl/evPages/show/id/182"
}
return [get_ast(url) for url in urls]
def get_identical_pages(self):
urls = {
"http://www.bis.put.poznan.pl/",
"http://www.bis.put.poznan.pl/"
}
return [get_ast(url) for url in urls]
def test_get_wrong_page(self):
url = "putpoznan.pl"
with self.assertRaises(ValueError):
get_ast(url)
def test_get_none_page(self):
with self.assertRaises(ValueError):
get_ast(None)
def test_get_workarea_identical_pages(self):
asts = self.get_identical_pages()
workareas = get_workareas(asts)
self.assertEqual(0, len(workareas), "AssertionF
|
ailed: work area found on identical pages.")
def test_get_ast_fc_count(self):
asts = self.get_fc_pages()
|
self.assertEqual(3, len(asts), "AssertionFailed: count for fc pages.")
def test_get_workarea_fc_content(self):
asts = self.get_fc_pages()
workareas = get_workareas(asts)
xpath = str("//html[@class='js']/body/div[@id='right']/div[@id='content']")
self.assertEqual(xpath, workareas[0], "AssertionFailed: xpaths for fc pages.")
def test_get_ast_fce_count(self):
asts = self.get_fce_pages()
self.assertEqual(2, len(asts), "AssertionFailed: count for fc pages.")
def test_get_workarea_fce_content(self):
asts = self.get_fce_pages()
workareas = get_workareas(asts)
xpath = str("//html/body/div[@id='main']/div/div[@id='left_menu']/div[@id='left_menu_box']")
self.assertEqual(xpath, workareas[1], "AssertionFailed: xpaths for fc pages.")
if __name__ == "__main__":
unittest.main()
|
SoftwareDefinedBuildings/smap
|
python/smap/smapconf.py
|
Python
|
bsd-2-clause
| 3,347
| 0.008664
|
"""
Copyright (c) 2011, 2012, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
@author Stephen Dawson-Haggerty <stevedh@eecs.berkeley.edu>
"""
import sys
import os
import logging
from twisted.python import log
# default configuration
SERVER = {
'port' : 8080,
}
LOGGING = {
# configure logging to sentry via raven
# 'raven': {
# 'dsn': 'twisted+http://a888206fd60f4307a7b1a880d1fe04fe:15ecf70787b0490880c712d8469459bd@localhost:9000/2'
# },
'console': {
'level': 'INFO'
}
}
# guess where the html might be...
try:
if not 'docroot' in SERVER:
path = os.path.dirname(sys.modules[__name__].__file__)
path = os.path.join(path, "data")
SERVER['docroot'] = path
except:
SERVER['docroot'] = None
class InverseFilter(logging.Filter):
def filter(self, reco
|
rd):
return not logging.Filter.filter(self, record)
def start_logging():
observer = log.PythonLoggingObserver()
observer.start()
for logtype, config in LOGGING.iteritems():
if logtype == "raven":
from raven.handlers.logging import SentryHandler
lvl = getattr(logging, config.get('level', 'info').upper())
|
handler = SentryHandler(config["dsn"])
handler.setLevel(lvl)
# don't try to log sentry errors with sentry
handler.addFilter(InverseFilter('sentry'))
logging.getLogger().addHandler(handler)
print "Starting sentry logging [%s] with destination %s"% (
config.get('level', 'info').upper(), config["dsn"])
elif logtype == 'console':
console = logging.StreamHandler()
lvl = getattr(logging, config.get('level', 'info').upper())
console.setLevel(lvl)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger().addHandler(console)
print "Starting console logging [%s]" % config.get('level', 'info').upper()
|
liangdas/mqantserver
|
client/mqtt_chat_client.py
|
Python
|
apache-2.0
| 2,416
| 0.021183
|
# -*- coding: utf-8 -*-
'''
Created on 17/2/16.
@author: love
'''
import paho.mqtt.client as mqtt
import json
import ssl
def on_connect(client, userdata, flags, rc):
print("Connected with result code %d"%rc)
client.publish("Login/HD_Login/1", json.dumps({"userName": user, "passWord": "Hello,anyone!"}),qos=0,retain=False)
def on_message(client, userdata, msg):
print ('---------------')
print ("topic :"+msg.topic)
print ("payload :"+msg.payload)
client.subscribe([("chat",2),("aaa",2)])
client.unsubscribe(["chat"])
#client.publish("login/addUser", json.dumps({"user": user, "say": "Hello,anyone!"}),qos=2,retain=False)
#print(msg.topic+":"+str(msg.payload.decode()))
#print(msg.topic+":"+msg.payload.decode())
#payload = json.loads(msg.payload.decode())
#print(payload.get("user")+":"+payload.get("say"))
def mylog(self,userdata,level, buf):
print buf
if __name__ == '__main__':
client = mqtt.Client(protocol=mqtt.MQTTv31)
client.username_pw_set("admin", "password") # 必须设置,否则会返回「Connected with result code 4」
client.on_connect = on_connect
client.on_message = on_message
#链接测试服务器 需要用tls请求 python tls功能比较弱。
# 需要一个证书,这里使用的这个网站提供的证书https://curl.haxx.se/docs/caextract.html
HOST = "mqant.com"
# client.tls_set(ca_certs="caextract.pem", certfile=None, keyfile=None, cer
|
t_reqs=ssl.CERT_REQUIRED,
# tls_version=ssl.PROTOCOL_TLSv1, ciphers=None)
client.connect(HOST, 3563, 60)
#client.loop_forever()
user = raw_input("请输入用户名:")
client.user_data_set(user)
client.loop_start()
while True:
s = raw_input("请先输入'join'加入房间,然后输入任意聊天字符:\n
|
")
if s:
if s=="join":
client.publish("Chat/HD_JoinChat/2", json.dumps({"roomName": "mqant"}),qos=0,retain=False)
elif s=="start":
client.publish("Master/HD_Start_Process/2", json.dumps({"ProcessID": "001"}),qos=0,retain=False)
elif s=="stop":
client.publish("Master/HD_Stop_Process/2", json.dumps({"ProcessID": "001"}),qos=0,retain=False)
else:
client.publish("Chat/HD_Say/2", json.dumps({"roomName": "mqant","from":user,"target":"*","content": s}),qos=0,retain=False)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.