repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
ajnelson/dfxmlfs | dfxmlfs.py | Python | lgpl-2.1 | 10,784 | 0.006677 | #!/usr/bin/env python
# HelloFS Copyright (C) 2006 Andrew Straw <strawman@astraw.com>
#
# DFXMLFS Copyright (C) 2015 Prometheus Computing, LLC.
# Implemented by Alex Nelson <a.nelson@prometheuscomputing.com>
#
# This program can be distributed under the terms of the GNU LGPL v2.1.
# See the file COPYING.
#
__version__ = "0.0.1"
import os
import stat
import errno
import logging
import collections
import fuse
import Objects
_logger = logging.getLogger(os.path.basename(__file__))
if not hasattr(fuse, '__version__'):
raise RuntimeError, \
"your fuse-py doesn't know of fuse.__version__, probably it's too old."
fuse.fuse_python_api = (0, 2)
#This list is for debug purposes.
_stat_fields = ['st_atime', 'st_ctime', 'st_dev', 'st_gid', 'st_ino', 'st_mode', 'st_mtime', 'st_nlink', 'st_size', 'st_uid']
def obj_to_stat(obj):
st = fuse.Stat()
#for field in _stat_fields:
# _logger.debug("st.%s = %r." % (field, getattr(st, field)))
st.st_ino = obj.inode
st.st_dev = 0
st.st_nlink = obj.nlink or 0 #In case of None
st.st_size = obj.filesize
#Don't try translating user IDs for now - complicated in NTFS.
st.st_uid = 0
st.st_gid = 0
if obj.name_type == "r":
st.st_mode = 0o0444 | stat.S_IFREG
elif obj.name_type == "d":
st.st_mode = 0o0555 | stat.S_IFDIR
else:
st.st_mode = 0o0444
#_logger.debug("Setting timestamps.")
if obj.atime is None:
st.st_atime = 0
else:
st.st_atime = obj.atime.timestamp
#_logger.debug("Set a timestamp.")
if obj.mtime is None:
st.st_mtime = 0
else:
st.st_mtime = obj.mtime.timestamp
if obj.crtime is None:
st.st_ctime = 0
else:
st.st_ctime = obj.crtime.timestamp
#_logger.debug("st = %r." % st)
#for field in _stat_fields:
# _logger.debug("st.%s = %r." % (field, getattr(st, field)))
return st
class DFXMLFS(fuse.Fuse):
def __init__(self, *args, **kw):
self._referenced_inodes = set()
self._last_assigned_inode_number = 2
fuse.Fuse.__init__(self, *args, **kw)
def _next_inode_number(self):
while self._last_assigned_inode_number < 2**32:
self._last_assigned_inode_number += 1
if not self._last_assigned_inode_number in self.referenced_inodes:
break
if self._last_assigned_inode_number == 2**32:
raise ValueError("Out of inode numbers.")
return self._last_assigned_inode_number
def main(self):
#_logger.debug("dir(self) = %r." % dir(self))
if not hasattr(self, "imgfile"):
self.imgfile = None
else:
#_logger.debug("Getting real imgfile path.")
self.imgfile = os.path.realpath(self.imgfile)
#_logger.debug("self.imgfile = %r." % self.imgfile)
if not hasattr(self, "xmlfile"):
raise RuntimeError("-o xmlfile must be passed on the command line.")
_logger.info("Parsing DFXML file...")
#Key: Absolute path, including partition designation
#Value: Objects.FileObject
self.objects_by_path = dict()
self.dir_lists_by_path = collections.defaultdict(list)
self.volumes = dict()
objects_without_inode_numbers = []
for (tup_no, (event, obj)) in enumerate(Objects.iterparse(self.xmlfile)):
if not isinstance(obj, Objects.FileObject):
continue
#_logger.debug("obj.filename = %r." % obj.filename)
alloc = obj.is_allocated()
if alloc is None:
#_logger.debug("Assuming allocated.")
pass
elif alloc == False:
#_logger.debug("Not allocated.")
continue
if obj.filename is None:
#_logger.debug("Null filename.")
continue
if obj.filename.endswith(("/.", "/..")) or obj.filename in [".", ".."]:
#_logger.debug("Dot-dir filename.")
continue
partition_dir = "partition_" + ("null" if obj.partition is None else str(obj.partition))
if obj.partition not in self.volumes:
self.volumes[obj.partition] = obj.volume_object #Might be null.
#Every file should end up with an inode number; but they should be assigned after the stream is all visited.
if obj.inode is None:
objects_without_inode_numbers.append(obj)
filepath = partition_dir + "/" + obj.filename
self.objects_by_path["/" + filepath] = obj
basename = os.pa | th.basename(filepath)
dirname = os.path.dirname(filepath)
self.dir_lists_by_path["/" + dirname].append(basename)
#Shorten reading DFXML files in debug settings
if "debug" in self.fuse_args.optlist and tup_no > 50:
| _logger.debug("Shortening object parsing while in debug mode: Only 50 file objects read from XML.")
break
#Assign inode numbers for objects that were in the stream first
for obj in objects_without_inode_numbers:
obj.inode = self._next_inode_number()
#Creating the top-level partition directories a loop ago means they need to be created again for the root directory.
for partition_number in self.volumes:
partition_dir = "partition_" + ("null" if partition_number is None else str(partition_number))
partition_obj = Objects.FileObject()
partition_obj.filename = partition_dir
partition_obj.filesize = 0
partition_obj.name_type = "d"
partition_obj.alloc = True
partition_obj.inode = self._next_inode_number()
partition_obj.nlink = 2 #This should be adjusted to be 1 + # of directory children.
self.objects_by_path["/" + partition_dir] = partition_obj
self.dir_lists_by_path["/"].append(partition_dir)
_logger.info("Parsed DFXML file.")
#_logger.debug("self.objects_by_path = %r." % self.objects_by_path)
#_logger.debug("self.dir_lists_by_path = %r." % self.dir_lists_by_path)
#_logger.debug("self.volumes = %r." % self.volumes)
return fuse.Fuse.main(self)
def getattr(self, path):
if path == '/':
st = fuse.Stat()
st.st_mode = stat.S_IFDIR | 0o555
st.st_nlink = len(self.dir_lists_by_path["/"])
else:
obj = self.objects_by_path.get(path)
if obj is None:
return -errno.ENOENT
st = obj_to_stat(obj)
#for field in _stat_fields:
# _logger.debug("st.%s = %r." % (field, getattr(st, field)))
return st
def readdir(self, path, offset):
dir_list = self.dir_lists_by_path.get(path)
if dir_list is None:
_logger.error("readdir failed to find a directory: %r." % path)
else:
for r in '.', '..':
yield fuse.Direntry(r)
for filename in dir_list:
yield fuse.Direntry(filename)
def open(self, path, flags):
#Existence check
#TODO Isn't this handled by getattr?
if path == "/":
pass
elif not path in self.objects_by_path:
return -errno.ENOENT
#Access check - read-only
accmode = os.O_RDONLY | os.O_WRONLY | os.O_RDWR
if (flags & accmode) != os.O_RDONLY:
return -errno.EACCES
return 0
def read(self, path, size, offset):
_logger.debug("read(%r, %r, %r)" % (path, size, offset))
if self.imgfile is None:
_logger.error("Cannot read file without backing disk image.")
return -errno.EIO
#Existence check
#TODO Isn't this handled by getattr?
obj = self.objects_by_path.get(path)
if obj is None:
_logger.debug("Could not get file for reading: %r." % path)
return -errno.ENOENT
_logger.debug("Found object at path: %r." % path)
#File type check
if obj.n |
saurabh6790/medsyn-app1 | selling/doctype/patient_encounter_entry/patient_encounter_entry.py | Python | agpl-3.0 | 25,080 | 0.016627 | from __future__ import unicode_literals
import webnotes
from webnotes.utils import flt, fmt_money, cstr, cint
from selling.doctype.customer.customer import DocType
import datetime
from webnotes import msgprint, _
from selling.doctype.lead.lead import create_contact
from webnotes.model.code import get_obj
from webnotes.model.bean import getlist, copy_doclist
from selling.doctype.patient_encounter_entry.notification_schedular import get_encounters
from webnotes.model.doc import Document, make_autoname
class DocType():
def __init__(self, d, dl):
self.doc, self.doclist = d, dl
def autoname(self):
entry = make_autoname(webnotes.conn.get_value('DocType', 'Patient Encounter Entry', 'autoname'))
company = webnotes.conn.sql(""" select name from tabCompany
where name = (select value from tabSingles
where doctype = 'Global Defaults' and field = 'default_company') """)[0][0]
self.doc.name = company + ' ' + entry
def validate(self):pass
# if not webnotes.conn.sql("select patient from `tabPatient Encounter Entry` where name = '%s'"%self.doc.name):
# self.send_notification()
def on_update(self):
patient_id = None
from datetime import datetime
if self.doc.status == 'Canceled':
webnotes.conn.sql("update `tabPatient Encounter Entry` set docstatus = '1' where name = '%s'"%(self.doc.name))
s1=(self.doc.start_time).split(':')
s2=(self.doc.end_time).split(':')
# date_a=cstr(datetime.combine(datetime.strptime(self.doc.encounter_date,'%Y-%m-%d').date(),datetime.strptime(s1[0]+":"+s1[1],'%H:%M').time()))
# date_b=cstr(datetime.combine(datetime.strptime(self.doc.encounter_date,'%Y-%m-%d').date(),datetime.strptime(s2[0]+":"+s2[1],'%H:%M').time()))
#webnotes.errprint(self.doc.entry_in_child)
if self.doc.new_user == 1 and not self.doc.new_patient:
patient_id = self.make_patient()
self.doc.new_patient=patient_id
self.create_new_contact()
self.create_customer(patient_id)
self.create_account_head(patient_id)
self.doc.save()
if self.doc.entry_in_child == 'False':
self.make_child_entry(patient_id)
#self.make_event()
if not self.doc.eventid:
self.create_child()
else:
webnotes.conn.sql("update `tabSlot Child` set slot='"+self.doc.appointment_slot+"', start_time='"+cstr(datetime.strptime(date_a,'%Y-%m-%d %H:%M:%S').strftime('%Y-%m-%d %H:%M'))+"', end_time='"+cstr(datetime.strptime(date_b,'%Y-%m-%d %H:%M:%S').strftime('%Y-%m-%d %H:%M'))+"' where encounter='"+self.doc.name+"'")
# webnotes.errprint(date_a)
webnotes.conn.sql("update `tabEvent` set starts_on='"+cstr(datetime.strptime(date_a,'%Y-%m-%d %H:%M:%S').strftime('%Y-%m-%d %H:%M'))+"', ends_on='"+cstr(datetime.strptime(date_b,'%Y-%m-%d %H:%M:%S').strftime('%Y-%m-%d %H:%M'))+"' where name='"+self.doc.eventid+"'")
if cint(self.doc.checked_in)==1: pass
# check_confirmed=webnotes.conn.sql("select true from `tabSlot Child` where slot='"+self.doc.appointment_slot+"' and modality='"+self.doc.encounter+"' and study='"+self.doc.study+"' and date_format(start_time,'%Y-%m-%d %H:%M')=date_format('"+date_a+"','%Y-%m-%d %H:%M') and date_format(end_time,'%Y-%m-%d %H:%M')=date_format('"+date_b+"','%Y-%m-%d %H:%M') and status='Confirm'",debug=1)
# if not check_confirmed:
# webnotes.conn.sql("update tabEvent set event_type='Confirm' where name='%s'"%self.doc.eventid)
# webnotes.conn.sql("update `tabSlot Child` set status='Confirm' where encounter='%s'"%self.doc.name)
# else:
# webnotes.msgprint("Selected slot is not available",raise_exception=1)
# get_encounters()
def send_notification(self):
mail_list = []
number = []
msg = """Hi %(patient)s, Your appointment has been schedule on %(encounter_date)s at time %(start_time)s
for study %(study)s on modality %(modality)s"""%{'patient': self.doc.patient, 'encounter_date':self.doc.encounter_date,
'start_time':self.doc.start_time, 'study':self.doc.study, 'modality':self.doc.modality}
technologiest_contact = webnotes.conn.sql("select cell_number, personal_email from tabEmployee where name = '%s'"%(self.doc.technologist),as_list=1)
patient_contact = webnotes.conn.sql("select mobile, email from `tabPatient Register` where name = '%s'"%(self.doc.patient),as_list=1)
# webnotes.errprint([technologiest_contact, patient_contact])
if mail_list:
mail_list.append(technologiest_contact[0][1])
mail_list.append(patient_contact[0][1])
if number:
number.append(technologiest_contact[0][0])
number.append(patient_contact[0][0])
self.send_mail(msg, mail_list)
self.send_sms(msg, number)
def send_mail(self, msg, mail_list):
from webnotes.utils.email_lib import sendmail
for id in mail_list:
if id:
sendmail(id, subject='Appoiontment Scheduling', msg = msg)
def send_sms(self, msg, number):
ss = get_obj('SMS Settings', 'SMS Settings', with_children=1)
# webnotes.errprint(ss)
for num in number:pass
# webnotes.errprint(['number',num])
args = {}
for d in getlist(ss.doclist, 'static_parameter_details'):
args[d.parameter] = d.value
sms_url=webnotes.conn.get_value('SMS Settings', None, 'sms_gateway_url')
msg_parameter=webnotes.conn.get_value('SMS Settings', None, 'message_parameter')
receiver_parameter=webnotes.conn.get_value('SMS Settings', None, 'receiver_parameter')
for num in number:
if num:
url = sms_url +"?user="+ args["user"] +"&senderID="+ args["sender ID"] +"&receipientno="+ num +"\
| &dcs="+ args["dcs"]+ "&msgtxt=" + msg +"&state=" +args["state"]
# webnotes.errprint(url)
import requests
r = requests.get(url)
def create_new_contact(self):
details = {}
details['first_name'] = self.doc.first_name
details['email_id'] = self.doc.email or ''
details['mobile_no'] = self.doc | .mobile or ''
details['doc']='Customer'
details['link']=self.doc.name or ''
create_contact(details)
def create_customer(self, patient_id):
from webnotes.model.doc import Document
d = Document('Customer')
d.customer_name = patient_id
d.full_name = self.doc.first_name
d.save()
def get_company_abbr(self):
return webnotes.conn.get_value('Company', self.doc.company, 'abbr')
def get_receivables_group(self):
g = webnotes.conn.sql("select receivables_group from tabCompany where name=%s", self.doc.company)
g = g and g[0][0] or ''
if not g:
msgprint("Update Company master, assign a default group for Receivables")
raise Exception
return g
def create_accou |
shalomeir/snippod-boilerplate | snippod_boilerplate/wsgi.py | Python | mit | 471 | 0.006369 | """
WSGI config for thinkster_django_angular_boilerpla | te project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "snippod_boilerplate.settings.prod")
from django | .core.wsgi import get_wsgi_application
from dj_static import Cling
application = Cling(get_wsgi_application())
|
Germanika/plover | test/test_key_combo.py | Python | gpl-2.0 | 4,119 | 0.005098 |
from plover.key_combo import parse_key_combo
from . import TestCase
class KeyComboParserTest(TestCase):
def test_noop(self):
for combo_string in ('', ' '):
self.assertEqual(parse_key_combo(combo_string), [])
def test_syntax_error(self):
for combo_string in (
# Invalid character.
'Return,',
'Return&',
'Ret. urn <',
'exclam ! foo',
'shift[a]',
# Unbalanced )
') arg',
'arg )',
'arg())',
'arg(x) )',
# Unbalanced (
'test(',
'( grr',
'foo ( bar',
'foo (bar ( ',
'foo ((',
):
msg = 'parse_key_combo(%r): SyntaxError not raised' % (
combo_string,
)
with self.assertRaisesWithMessage(SyntaxError, msg):
parse_key_combo(combo_string)
def test_already_pressed(self):
for combo_string in (
# Pressing an already pressed key.
'foo(foo)',
'Foo(foO)',
'foo(fOo(arg))',
'foo(bar(Foo))',
'foo(bar(foo(x)))',
):
msg = 'parse_key_combo(%r): ValueError not raised' % (
combo_string,
)
with self.assertRaisesWithMessage(ValueError, msg):
parse_key_combo(combo_string)
def test_stacking(self):
for combo_string_variants, expected in (
# + press, - release
# 1 is not a valid identifier, but still a valid key name.
(('1',) , '+1 -1' ),
(('Shift_l', 'SHIFT_L') , '+shift_l -shift_l' ),
# Case does not matter.
(('a', ' A ') , '+a -a' ),
(('a(b c)', 'a ( b c )') , '+a +b -b +c -c -a' ),
(('a(bc)', ' a( Bc )') , '+a +bc -bc -a' ),
(('a(bc(d)e f(g) h())i j',), '+a +bc +d -d -bc +e -e +f +g -g -f +h -h -a +i -i +j -j'),
(('foo () bar ( foo a b c (d))',
'fOo () Bar ( FOO a B c (D))'),
'+foo -foo +bar +foo -foo +a -a +b -b +c +d -d -c -bar'),
):
expected = [s.strip() for s in expected.split()]
for combo_string in combo_string_variants:
result = ['%s%s' % ('+' if pressed else '-', key)
for key, pressed in parse_key_combo(combo_string)]
msg = (
'parse_key_combo(%r):\n'
' result : %r\n'
' expected: %r\n'
% (combo_string, result, expected)
)
self.assertEqual(result, expected, msg=msg)
def test_bad_keyname(self):
name2code = { c: c for c in '123abc' }
combo_string = '1 (c) 2 bad 3 (a b c)'
msg = 'parse_key_combo(%r): ValueError not raised' % (
combo_string,
)
with self.assertRaisesWithMessage(ValueError, msg):
parse_key_combo(combo_string, key_name_to_key_code=name2code.get)
def test_aliasing(self):
name2code = {
'1' : 10,
'exclam': 10,
}
self.assertListEqual(list(parse_key_combo('1 exclam', key_name_to_key_code=name2code.get)),
| [(10, True), (10, False),
(10, True), (10, False)])
for combo_string in (
'1 ( exclam )',
'exclam(1)',
):
msg = 'parse_key_combo(%r): ValueError not raised' % (
combo_string,
)
with self.assertRaisesWithMessage(ValueError, msg):
# Yielding the first key event should
# only happen after full validation.
parse_key_combo(combo_str | ing, key_name_to_key_code=name2code.get)
|
ResearchSoftwareInstitute/MyHPOM | hs_app_netCDF/tests/test_netcdf_metadata.py | Python | bsd-3-clause | 34,897 | 0.003754 | import os
import tempfile
import shutil
from django.test import TransactionTestCase
from django.core.exceptions import ValidationError
from django.core.files.uploadedfile import UploadedFile
from django.contrib.auth.models import Group
from django.db import IntegrityError
from hs_core import hydroshare
from hs_core.hydroshare import utils
from hs_core.models import CoreMetaData, Creator, Contributor, Coverage, Rights, Title, Language, \
Publisher, Identifier, Type, Subject, Description, Date, Format, Relation, Source
from hs_core.testing import MockIRODSTestCaseMixin, TestCaseCommonUtilities
from hs_app_netCDF.models import NetcdfResource, Variable, OriginalCoverage
class TestNetcdfMetaData(MockIRODSTestCaseMixin, TestCaseCommonUtilities, TransactionTestCase):
def setUp(self):
super(TestNetcdfMetaData, self).setUp()
self.group, _ = Group.objects.get_or_create(name='Resource Author')
self.user = hydroshare.create_account(
'user1@nowhere.com',
username='user1',
first_name='Creator_FirstName',
last_name='Creator_LastName',
superuser=False,
groups=[self.group]
)
self.resNetcdf = hydroshare.create_resource(
resource_type='NetcdfResource',
owner=self.user,
title='Snow water equivalent estimation at TWDEF site from Oct 2009 to June 2010'
)
self.temp_dir = tempfile.mkdtemp()
self.netcdf_file_name = 'netcdf_valid.nc'
self.netcdf_file = 'hs_app_netCDF/tests/{}'.format(self.netcdf_file_name)
| target_temp_netcdf_file = os.path.join(self.temp_dir, self.netcdf_file_name)
shutil.copy(self.netcdf_file, target_temp_netcdf_file)
sel | f.netcdf_file_obj = open(target_temp_netcdf_file, 'r')
self.temp_dir = tempfile.mkdtemp()
self.netcdf_file_name_crs = 'netcdf_valid_crs.nc'
self.netcdf_file_crs = 'hs_app_netCDF/tests/{}'.format(self.netcdf_file_name_crs)
target_temp_netcdf_file_crs = os.path.join(self.temp_dir, self.netcdf_file_name_crs)
shutil.copy(self.netcdf_file_crs, target_temp_netcdf_file_crs)
self.netcdf_file_obj_crs = open(target_temp_netcdf_file_crs, 'r')
self.netcdf_bad_file_name = 'netcdf_invalid.nc'
self.netcdf_bad_file = 'hs_app_netCDF/tests/{}'.format(self.netcdf_bad_file_name)
target_temp_bad_netcdf_file = os.path.join(self.temp_dir, self.netcdf_bad_file_name)
shutil.copy(self.netcdf_bad_file, target_temp_bad_netcdf_file)
self.netcdf_bad_file_obj = open(target_temp_bad_netcdf_file, 'r')
temp_text_file = os.path.join(self.temp_dir, 'netcdf_text.txt')
text_file = open(temp_text_file, 'w')
text_file.write("NetCDF records")
self.text_file_obj = open(temp_text_file, 'r')
def tearDown(self):
super(TestNetcdfMetaData, self).tearDown()
if os.path.exists(self.temp_dir):
shutil.rmtree(self.temp_dir)
def test_allowed_file_types(self):
# test allowed file type is '.nc'
self.assertIn('.nc', NetcdfResource.get_supported_upload_file_types())
self.assertEqual(len(NetcdfResource.get_supported_upload_file_types()), 1)
# there should not be any content file
self.assertEqual(self.resNetcdf.files.all().count(), 0)
# trying to add a text file to this resource should raise exception
files = [UploadedFile(file=self.text_file_obj, name=self.text_file_obj.name)]
with self.assertRaises(utils.ResourceFileValidationException):
utils.resource_file_add_pre_process(resource=self.resNetcdf, files=files,
user=self.user,
extract_metadata=False)
# trying to add bad .nc file should raise file validation error
files = [UploadedFile(file=self.netcdf_bad_file_obj, name=self.netcdf_bad_file_name)]
with self.assertRaises(utils.ResourceFileValidationException):
utils.resource_file_add_pre_process(resource=self.resNetcdf, files=files,
user=self.user,
extract_metadata=False)
# trying to add valid .nc file should pass the file check
files = [UploadedFile(file=self.netcdf_file_obj, name=self.netcdf_file_name)]
utils.resource_file_add_pre_process(resource=self.resNetcdf, files=files, user=self.user,
extract_metadata=False)
utils.resource_file_add_process(resource=self.resNetcdf, files=files, user=self.user,
extract_metadata=False)
# there should be 2 content file: with ncdump file created by system
self.assertEqual(self.resNetcdf.files.all().count(), 2)
# file pre add process should raise validation error if we try to add a 2nd file
# when the resource has already 2 content files
with self.assertRaises(utils.ResourceFileValidationException):
utils.resource_file_add_pre_process(resource=self.resNetcdf, files=files,
user=self.user,
extract_metadata=False)
def test_metadata_extraction_on_resource_creation(self):
# passing the file object that points to the temp dir doesn't work - create_resource
# throws error open the file from the fixed file location
self._create_netcdf_resource()
super(TestNetcdfMetaData, self).netcdf_metadata_extraction()
def test_metadata_extraction_on_content_file_add(self):
# test the core metadata at this point
self.assertEqual(
self.resNetcdf.metadata.title.value,
'Snow water equivalent estimation at TWDEF site from Oct 2009 to June 2010')
# there shouldn't any abstract element
self.assertEqual(self.resNetcdf.metadata.description, None)
# there shouldn't any coverage element
self.assertEqual(self.resNetcdf.metadata.coverages.all().count(), 0)
# there shouldn't any format element
self.assertEqual(self.resNetcdf.metadata.formats.all().count(), 0)
# there shouldn't any subject element
self.assertEqual(self.resNetcdf.metadata.subjects.all().count(), 0)
# there shouldn't any contributor element
self.assertEqual(self.resNetcdf.metadata.contributors.all().count(), 0)
# there shouldn't any source element
self.assertEqual(self.resNetcdf.metadata.sources.all().count(), 0)
# there shouldn't any relation element
self.assertEqual(self.resNetcdf.metadata.relations.all().filter(type='cites').count(), 0)
# there should be 1 creator
self.assertEqual(self.resNetcdf.metadata.creators.all().count(), 1)
# there shouldn't any extended metadata
self.assertEqual(self.resNetcdf.metadata.ori_coverage.all().count(), 0)
self.assertEqual(self.resNetcdf.metadata.variables.all().count(), 0)
# adding a valid netcdf file should generate some core metadata and all extended metadata
files = [UploadedFile(file=self.netcdf_file_obj, name=self.netcdf_file_name)]
utils.resource_file_add_pre_process(resource=self.resNetcdf, files=files, user=self.user,
extract_metadata=False)
utils.resource_file_add_process(resource=self.resNetcdf, files=files, user=self.user,
extract_metadata=False)
super(TestNetcdfMetaData, self).netcdf_metadata_extraction(expected_creators_count=2)
def test_metadata_on_content_file_delete(self):
# test that some of the metadata is not deleted on content file deletion
# adding a valid netcdf file should generate some core metadata and all extended metadata
files = [UploadedFile(file=self.netcdf_file_obj, name=self.netcdf_file_name)]
utils.resource_file_add_pre_process(resource=self.resNetcdf, files=files, user=self.user,
|
mbrucher/AudioTK | tests/Python/EQ/PyATKEQ_custom_test.py | Python | bsd-3-clause | 1,779 | 0.023609 | #!/usr/bin/env python
from ATK.EQ import DoubleCustomFIRFilter, DoubleCustomIIRFilter
import numpy as np
from nose.tools import raises
sampling = 48000
@raises(ValueError)
def customFIR_bad_dim_test():
import numpy as np
filter = DoubleCustomFIRFilter()
filter.coefficients_in = np.ones(())
@raises(ValueError)
def customFIR_bad_size_test():
import numpy as np
filter = DoubleCustomFIRFilter()
filter.coefficients_in = np.ones((0))
def customFIR_check_coeffs_test():
import numpy as np
from numpy.testing import assert_almost_equal
filter = DoubleCustomFIRFilter()
f | ilter.coefficients_in = np.ones((10))
assert_almost_equal(filter.coefficients_in, np.ones((10)))
@raises(ValueError)
def customIIR_bad_dim_test():
import numpy as np
filter = DoubleCustomIIRFilter()
filter.coefficients_in = np.ones(())
@raises(ValueError)
def customIIR_bad_size_test():
import numpy as np
filter = DoubleCustomIIRFilter()
filter.coefficients_in = np.ones((0))
def customIIR_check_coeffs_test( | ):
import numpy as np
from numpy.testing import assert_almost_equal
filter = DoubleCustomIIRFilter()
filter.coefficients_in = np.ones((10))
assert_almost_equal(filter.coefficients_in, np.ones((10)))
@raises(ValueError)
def customIIR_bad_dim2_test():
import numpy as np
filter = DoubleCustomIIRFilter()
filter.coefficients_out = np.ones(())
@raises(ValueError)
def customIIR_bad_size2_test():
import numpy as np
filter = DoubleCustomIIRFilter()
filter.coefficients_out = np.ones((0))
def customIIR_check_coeffs2_test():
import numpy as np
from numpy.testing import assert_almost_equal
filter = DoubleCustomIIRFilter()
filter.coefficients_out = np.ones((10))
assert_almost_equal(filter.coefficients_out, np.ones((10)))
|
hswhite33/picturegame-bot | src/utils/MarkdownUtils.py | Python | mit | 279 | 0.007168 | def escapeChar | s(name):
'''Escape markdown characters to avoid usernames getting rendered weirdly in som | e cases.
The only known case is with underscores, and that is only in a special case, but we'll do a generalized fix here.
'''
return name.replace('_', '\\_')
|
florian-f/sklearn | sklearn/cluster/tests/test_hierarchical.py | Python | bsd-3-clause | 6,780 | 0 | """
Several basic tests for hierarchical clustering procedures
"""
# Authors: Vincent Michel, 2010, Gael Varoquaux 2012
# License: BSD-like
import warnings
from tempfile import mkdtemp
import numpy as np
from scipy import sparse
from scipy.cluster import hierarchy
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.cluster import Ward, WardAgglomeration, ward_tree
from sklearn.cluster.hierarchical import _hc_cut
from sklearn.feature_extraction.image import grid_to_graph
def test_structured_ward_tree():
"""
Check that we obtain the correct solution for structured ward tree.
"""
rnd = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
# Avoiding a mask with only 'True' entries
mask[4:7, 4:7] = 0
X = rnd.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
children, n_components, n_leaves, parent = ward_tree(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
# Check that ward_tree raises a ValueError with a connectivity matrix
# of the wrong shape
assert_raises(ValueError, ward_tree, X.T, np.ones((4, 4)))
def test_unstructured_ward_tree():
"""
Check that we obtain the correct solution for unstructured ward tree.
"""
rnd = np.random.RandomState(0)
X = rnd.randn(50, 100)
for this_X in (X, X[0]):
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter("always", UserWarning)
# With specified a number of clusters just for the sake of
# raising a warning and testing the warning code
children, n_nodes, n_leaves, parent = ward_tree(this_X.T,
n_clusters=10)
assert_equal(len(warning_list), 1)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
def test_height_ward_tree():
"""
Check that the height of ward tree is sorted.
"""
rnd = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rnd.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
children, n_nodes, n_leaves, parent = ward_tree(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
def test_ward_clustering():
"""
Check that we obtain the correct number of clusters with Ward clustering.
"""
rnd = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rnd.randn(100, 50)
connectivity = grid_to_graph(*mask.shape)
clustering = Ward(n_clusters=10, connectivity=connectivity)
clustering.fit(X)
# test caching
clustering = Ward(n_clusters=10, connectivity=connectivity,
memory=mkdtemp())
clustering.fit(X)
labels = clustering.labels_
assert_true(np.size(np.unique(labels)) == 10)
# Check that we obtain the same solution with early-stopping of the
# tree building
clustering.compute_full_tree = False
clustering.fit(X)
np.testing.assert_array_equal(clustering.labels_, labels)
clustering.connectivity = None
clustering.fit(X)
assert_true(np.size(np.unique(clustering.labels_)) == | 10)
# Check that we raise a TypeError on dense matrices
clustering = Ward(n_clusters=10,
connectivity=connectivity.todense())
assert_raises(TypeError, clustering.fit, X)
clustering = Ward(n_clusters=10,
connectivity=sparse.lil_matrix(
connectivity.todense()[:10, :10]))
assert_raises(ValueError, clustering.fit, X)
def test_ward_agglomeration():
"""
Check that | we obtain the correct solution in a simplistic case
"""
rnd = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rnd.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
ward = WardAgglomeration(n_clusters=5, connectivity=connectivity)
ward.fit(X)
assert_true(np.size(np.unique(ward.labels_)) == 5)
Xred = ward.transform(X)
assert_true(Xred.shape[1] == 5)
Xfull = ward.inverse_transform(Xred)
assert_true(np.unique(Xfull[0]).size == 5)
def assess_same_labelling(cut1, cut2):
"""Util for comparison with scipy"""
co_clust = []
for cut in [cut1, cut2]:
n = len(cut)
k = cut.max() + 1
ecut = np.zeros((n, k))
ecut[np.arange(n), cut] = 1
co_clust.append(np.dot(ecut, ecut.T))
assert_true((co_clust[0] == co_clust[1]).all())
def test_scikit_vs_scipy():
"""Test scikit ward with full connectivity (i.e. unstructured) vs scipy
"""
from scipy.sparse import lil_matrix
n, p, k = 10, 5, 3
rnd = np.random.RandomState(0)
connectivity = lil_matrix(np.ones((n, n)))
for i in range(5):
X = .1 * rnd.normal(size=(n, p))
X -= 4 * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.ward(X)
children_ = out[:, :2].astype(np.int)
children, _, n_leaves, _ = ward_tree(X, connectivity)
cut = _hc_cut(k, children, n_leaves)
cut_ = _hc_cut(k, children_, n_leaves)
assess_same_labelling(cut, cut_)
# Test error management in _hc_cut
assert_raises(ValueError, _hc_cut, n_leaves + 1, children, n_leaves)
def test_connectivity_popagation():
"""
Check that connectivity in the ward tree is propagated correctly during
merging.
"""
from sklearn.neighbors import NearestNeighbors
X = np.array([(.014, .120), (.014, .099), (.014, .097),
(.017, .153), (.017, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .152), (.018, .149), (.018, .144),
])
nn = NearestNeighbors(n_neighbors=10).fit(X)
connectivity = nn.kneighbors_graph(X)
ward = Ward(n_clusters=4, connectivity=connectivity)
# If changes are not propagated correctly, fit crashes with an
# IndexError
ward.fit(X)
def test_connectivity_fixing_non_lil():
"""
Check non regression of a bug if a non item assignable connectivity is
provided with more than one component.
"""
# create dummy data
x = np.array([[0, 0], [1, 1]])
# create a mask with several components to force connectivity fixing
m = np.array([[True, False], [False, True]])
c = grid_to_graph(n_x=2, n_y=2, mask=m)
w = Ward(connectivity=c)
with warnings.catch_warnings(record=True):
w.fit(x)
if __name__ == '__main__':
import nose
nose.run(argv=['', __file__])
|
akx/shoop | shoop/core/taxing/_context.py | Python | agpl-3.0 | 720 | 0.002778 | # This file is part of Shoop.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
class TaxingContext(object):
def __init__(self, customer_tax_group=None, customer_tax_number=None, location=None):
self.customer_tax_group = customer_tax_group
self.c | ustomer_tax_number = customer_tax_number
self.country_code = getattr(location, "country_code", None) or getattr(location, "country", None)
self.region_code = geta | ttr(location, "region_code", None)
self.postal_code = getattr(location, "postal_code", None)
self.location = location
|
nathanbjenx/cairis | cairis/gui/OMPanel.py | Python | apache-2.0 | 3,444 | 0.01626 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import wx
import os
from cairis.core.armid import *
from EditorBase import EditorBase
from ObstaclesGrid import ObstaclesGrid
from datetime import datetime
__author__ = 'Shamal Faily'
NAME_POS = 0
DEFINITION_POS = 1
CATEGORY_POS = 2
ORIGINATOR_POS = 3
class OMPanel(EditorBase):
def __init__(self,parent,id):
self.objectDimension = 'obstacle'
self.objectLabel = 'Obstacles'
self.statusBar = parent.statusBar
EditorBase.__init__(self,parent,id)
self.grid = ObstaclesGrid(self,self.modCombo,self.envCombo)
self.sizer.Add( self.grid,1,wx.EXPAND )
self.resizeColumns()
self.SetSizeHints(1150,400)
self.SetSizer(self.sizer)
self.Bind(wx.EVT_COMBOBOX, self.onObjectChange,id = RMFRAME_TOOL_COMBOOBJECT)
self.Bind(wx.EVT_COMBOBOX, self.onEnvironmentChange,id = RMFRAME_TOOL_COMBOENVIRONMENT)
def onObjectChange(self,evt):
obsName = self.modCombo.GetValue()
self.updateObjectSelection(obsName)
self.refresh()
def updateEnvironments(self):
obsName = self.modCombo.GetValue()
envs = self.dbProxy.obstacleEnvironments(obsName)
self.envCombo.SetItems(envs)
if (len(envs) > 0):
self.envCombo.SetValue(envs[1])
def resizeColumns(self):
self.grid.SetColSize(NAME_POS,200)
self.grid.SetColSize(DEFINITION_POS,450)
self.grid.SetColSize(CATEGORY_POS,150)
self.grid.SetColSize(ORIGINATOR_POS,150)
self.grid.SetDefaultRowSize(35)
def updateObjectSelection(self,selectedObs = ''):
obsName = self.modCombo.GetValue()
if (obsName != ''):
self.updateEnvironments()
else:
self.envCombo.Clear()
self.envCombo.SetItems([''])
self.envCombo.SetValue('')
self.modCombo.Clear()
envName = self.envCombo.GetValue()
if (selectedObs == ''):
obsMods = self.dbProxy.getDimensionNames(self.objectDimension,envName)
else:
obsMods = self.dbProxy.getSubObstacleNames(obsName,envName)
obsMods.sort()
self.modCombo.SetItems(obsMods)
if (selectedObs != ''):
self.modCombo.SetStringSelection(selectedObs)
self.refresh()
def onEnvironmentChange(self,evt):
obsName = self.modCombo.GetValue()
envName = self.envCombo.GetValue()
obsMods = self.dbProxy.getSubObstacleNames(obsName,envName)
self.modCombo.SetItems(obsMods)
self.refresh()
def relabel(self):
envName = self.envCombo.GetValue()
self.dbProxy.relabelObstacles(envName)
self.statusBar.SetStatusText(str(datetime.now())[:19] + ' : obstacles relabelled | ')
def addObject(self):
grid = self.FindWindowById(ID_REQGRID)
pos = grid.GetGridCursorR | ow()
grid.InsertRows(pos)
|
xaxa89/mitmproxy | test/mitmproxy/addons/test_streamfile.py | Python | mit | 1,682 | 0 | import pytest
from mitmproxy.test import taddons
from mitmproxy.test import tflow
from mitmproxy import io
from mitmproxy import exceptions
from mitmproxy import options
from mitmproxy.addons import streamfile
def test_configure(tmpdir):
sa = streamfile.StreamFile()
with taddons.context(options=options.Options()) as tctx:
with pytest.raises(exceptions.OptionsError):
tctx.configure(sa, streamfile=str(tmpdir))
with pytest.raises(Exception, match="Invalid filter"):
tctx.configure(
sa, streamfile=str(tmpdir.join("foo")), streamfile_filter="~~"
)
tctx.configure(sa, streamfile_filter="foo")
assert sa.filt
tctx.configure(sa, streamfile_filter=None)
assert not s | a.filt
def rd(p):
x = io.FlowReader(open(p, "rb"))
return list(x.stream())
def test_tcp(tmpdir):
sa = streamfile.StreamFile()
with taddons.context() as tctx:
p = str | (tmpdir.join("foo"))
tctx.configure(sa, streamfile=p)
tt = tflow.ttcpflow()
sa.tcp_start(tt)
sa.tcp_end(tt)
tctx.configure(sa, streamfile=None)
assert rd(p)
def test_simple(tmpdir):
sa = streamfile.StreamFile()
with taddons.context() as tctx:
p = str(tmpdir.join("foo"))
tctx.configure(sa, streamfile=p)
f = tflow.tflow(resp=True)
sa.request(f)
sa.response(f)
tctx.configure(sa, streamfile=None)
assert rd(p)[0].response
tctx.configure(sa, streamfile="+" + p)
f = tflow.tflow()
sa.request(f)
tctx.configure(sa, streamfile=None)
assert not rd(p)[1].response
|
valhallasw/phabricator-tools | py/abd/abdt_rbranchnaming__t.py | Python | apache-2.0 | 3,352 | 0 | """Test suite for abdt_rbranchnaming."""
# =============================================================================
# TEST PLAN
# -----------------------------------------------------------------------------
# Here we detail the things we are concerned to test and specify which tests
# cover those concer | ns.
#
# Concerns:
# [XB] review names that are globally known to be bad are not accepted
# [XB] tracker names that are globally known to be bad are not accepted
# [XC] names that are known to be potential reviews aren't accepted as trackers
# [XC] names that | are known to be potential trackers aren't accepted as reviews
# [XD] ReviewBranches created by the scheme have the expected attributes
# [XD] ReviewBranches created by the scheme can create expected TrackerBranches
# [XD] TrackerBranches created by the scheme have the expected attributes
# [XD] there is a 1-1 relationship between tracker params and tracker names
# -----------------------------------------------------------------------------
# Tests:
# [ A] XXX: test_A_Breathing
# [XA] check_XA_Breathing
# [XB] check_XB_globally_invalid_review_tracker_names
# [XC] check_XC_potentially_valid_review_tracker_names
# [XD] check_XD_valid_reviews
# =============================================================================
from __future__ import absolute_import
import unittest
import abdt_namingtester
import abdt_rbranchnaming
class Test(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def make_naming(self):
return abdt_rbranchnaming.Naming()
def test_A_Breathing(self):
pass
def test_XA_Breathing(self):
abdt_namingtester.check_XA_Breathing(self)
def test_XB_globally_invalid_review_tracker_names(self):
abdt_namingtester.check_XB_globally_invalid_review_tracker_names(
self, self.make_naming())
def test_XC_potentially_valid_review_tracker_names(self):
abdt_namingtester.check_XC_potentially_valid_review_tracker_names(
self, self.make_naming())
def test_XD_valid_reviews(self):
names_to_properties = {}
for properties in abdt_namingtester.VALID_REVIEW_PROPERTIES:
name = 'r/{base}/{description}'.format(
description=properties.description,
base=properties.base)
assert name not in names_to_properties
names_to_properties[name] = properties
abdt_namingtester.check_XD_valid_reviews(
self, self.make_naming(), names_to_properties)
# -----------------------------------------------------------------------------
# Copyright (C) 2013-2014 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
|
ildelusion/JSBot | hangupsbot/handlers/commands.py | Python | gpl-3.0 | 2,643 | 0.001933 | import re, shlex
import hangups
from hangupsbot.utils import text_to_segments
from hangupsbot.handlers import handler, StopEventHandling
from hangupsbot.commands import command
default_bot_alias = '/bot'
def find_bot_alias(aliases_list, text):
"""Return True if text starts with bot alias"""
command = text.split()[0].lower()
for alias in aliases_list:
if alias.lower().startswith('regex:') and re.search(alias[6:], command, re.IGNORECASE):
return True
elif command == alias.lower():
return True
return False
def is_bot_alias_too_long(text):
"""check whether the bot alias is too long or not"""
if default_bot_alias in text:
return True
else:
return False
@handler.r | egister(priority=5, event=hangups.ChatMessageEvent)
def handle_command(bot, event):
"""Handle command messages"""
# Test if message is not empty
if not event.text:
return
# Get list of bot aliases
aliases_li | st = bot.get_config_suboption(event.conv_id, 'commands_aliases')
if not aliases_list:
aliases_list = [default_bot_alias]
# Test if message starts with bot alias
if not find_bot_alias(aliases_list, event.text):
return
# Test if command handling is enabled
if not bot.get_config_suboption(event.conv_id, 'commands_enabled'):
raise StopEventHandling
# Parse message
line_args = shlex.split(event.text, posix=False)
# Test if command length is sufficient
if len(line_args) < 2:
yield from event.conv.send_message(
text_to_segments(_('{}: 무엇을 도와드릴까요?').format(event.user.full_name))
)
raise StopEventHandling
# Test if user has permissions for running command
commands_admin_list = command.get_admin_commands(bot, event.conv_id)
if commands_admin_list and line_args[1].lower() in commands_admin_list:
admins_list = bot.get_config_suboption(event.conv_id, 'admins')
if event.user_id.chat_id not in admins_list:
yield from event.conv.send_message(
text_to_segments(_('{}: 권한이 없습니다.').format(event.user.full_name))
)
raise StopEventHandling
# Run command
yield from command.run(bot, event, *line_args[1:])
#Check whether the bot alias is too long or not
if is_bot_alias_too_long(event.text):
yield from event.conv.send_message(
text_to_segments(_('**Tip**: /bot 대신에 /b, /, ! 등을 사용할 수 있어요'))
)
# Prevent other handlers from processing event
raise StopEventHandling
|
zstackio/zstack-woodpecker | integrationtest/vm/virt_plus/image_store/test_crt_2_vms_imgs_from_same_img.py | Python | apache-2.0 | 2,414 | 0.006628 | '''
Create 2 VMs with same image. Then commit 2 new images from 2 VMs.
@author: Youyk
'''
import time
import os
import apibinding.inventory as inventory
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.zstack_test.zstack_test_vm as test_vm_header
import | zstackwoodpecker.zstack_test.zstack_test_image as test_image
import zstackwoodpecker.zstack_test.zstack_test_vm as test_vm
test_stub = test_lib.lib_get_specific_stub()
test_obj_dict = test_state.TestStateDict()
def test():
vm1 = test_stub.create_vm(vm_name = 'basic-test-vm')
test_obj_dict.add_vm(vm1)
#vm1.check()
image_creation_option = test_util.ImageOption()
backup_storage_list | = test_lib.lib_get_backup_storage_list_by_vm(vm1.vm)
for bs in backup_storage_list:
if bs.type in [inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE, inventory.CEPH_BACKUP_STORAGE_TYPE]:
image_creation_option.set_backup_storage_uuid_list([backup_storage_list[0].uuid])
break
else:
vm1.destroy()
test_util.test_skip('Not find image store or ceph type backup storage.')
vm2 = test_stub.create_vm(vm_name = 'basic-test-vm')
test_obj_dict.add_vm(vm2)
image_creation_option.set_root_volume_uuid(vm1.vm.rootVolumeUuid)
image_creation_option.set_name('test_create_vm_images_with_same_name')
#image_creation_option.set_platform('Linux')
image1 = test_image.ZstackTestImage()
image1.set_creation_option(image_creation_option)
image1.create()
test_obj_dict.add_image(image1)
image1.check()
vm1.destroy()
image_creation_option.set_root_volume_uuid(vm2.vm.rootVolumeUuid)
image_creation_option.set_name('test_create_vm_images_with_same_name')
image2 = test_image.ZstackTestImage()
image2.set_creation_option(image_creation_option)
image2.create()
test_obj_dict.add_image(image2)
image2.check()
vm3 = test_stub.create_vm(image_name = 'test_create_vm_images_with_same_name')
test_obj_dict.add_vm(vm3)
test_lib.lib_robot_cleanup(test_obj_dict)
test_util.test_pass('Create 2 VM images from same origin Image Successfully')
#Will be called only if exception happens in test().
def error_cleanup():
test_lib.lib_error_cleanup(test_obj_dict)
|
obsidianforensics/unfurl | unfurl/tests/integration/test_api.py | Python | apache-2.0 | 729 | 0.004115 | import unittest
from unfurl.core import app as my_app
class TestApi(unittest.TestCase):
def setUp(self):
my_app.config["testing"] = True
self.client = my_app.test_client()
def test_home_ok(self):
| self.assertEqual(self.client.get("/", follow_redirects=True).status_code, 200)
def test_api_without_url_ok | (self):
self.assertEqual(self.client.get("/api/", follow_redirects=True).status_code, 200)
def test_api_call_ok(self):
response = self.client.get("/https://mastodon.cloud/@TimDuran/103453805855961797", follow_redirects=True)
self.assertEqual(response.status_code, 200)
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
|
jxta/cc | vendor/Twisted-10.0.0/twisted/web/_auth/digest.py | Python | apache-2.0 | 1,700 | 0.001765 | # -*- test-case-name: twisted.web.test.test_httpauth -*-
# Copyright (c) 2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Implementation of RFC2617: HTTP Digest Authentication
@see: U{http://www.faqs.org/rfcs/rfc2617.html}
"""
from zope.interface import implements
from twisted.cred import credentials
from twisted.web.iweb import ICredentialFact | ory
class DigestCredentialFactory(object):
"""
Wrapper for L{digest.DigestCredentialFactory} that implements the
L{ICredentialFactory} interface.
"""
implements(ICredentialFactory)
scheme = 'digest'
def __init__(self, algorithm, authenticationRealm):
"""
Create the digest credential factory that this object wraps.
| """
self.digest = credentials.DigestCredentialFactory(algorithm,
authenticationRealm)
def getChallenge(self, request):
"""
Generate the challenge for use in the WWW-Authenticate header
@param request: The L{IRequest} to with access was denied and for the
response to which this challenge is being generated.
@return: The C{dict} that can be used to generate a WWW-Authenticate
header.
"""
return self.digest.getChallenge(request.getClientIP())
def decode(self, response, request):
"""
Create a L{twisted.cred.digest.DigestedCredentials} object from the
given response and request.
@see: L{ICredentialFactory.decode}
"""
return self.digest.decode(response,
request.method,
request.getClientIP())
|
ScOrPiOn9/FIT | ISJ/Projekt - automatické stahování titulků k filmům/Aplikace.py | Python | mit | 17,098 | 0.017838 | import xmlrpc.client
import sys, re, base64, gzip, os , io
# Autor Jan Hrivnak
# ISJ projekt
# Soubor: Aplikace.py
# spoustet s : export LC_ALL=cs_CZ.UTF-8;
class Aplikace:
def __init__(self):
self.IMDBid=0 #IMDB id filmu
self.subID=0 #ID zadanych titulku
self.nazevFilmu=""
self.auto=0
self.url="" # zadana url adresa ceskeho titulku
self.ost= xmlrpc.client.ServerProxy("http://api.opensubtitles.org/xml-rpc")
self.login = self.ost.LogIn('', '', 'cs', 'xhrivn01-isj')
self.token = self.login['token']
self.vybraneCZ=[]
self.vsechnyEN=[]
self.czTitulky=[] #vyhledane ceske titulky .
self.enTitulky=[] # vyhledane anglicke titulky
self.vysledek=[] # pole pro ulozeni jednotlivych zarovnanych vysledku
self.uspechy=[]
##############################################################################################################################
# NACTI TITULKY
##############################################################################################################################
def nactiTitulky(self, lang):
#print("dotaz na server na ziskani seznamu titulku ("+lang+")")
dotazNaOst = self.ost.SearchSubtitles(self.token, [{'sublanguageid':lang,'imdbid':self.IMDBid}])
if dotazNaOst['status'] != "200 OK":
sys.stderr.write("Server vratil chybu")
sys.exit(1)
elif dotazNaOst['data']==False:
sys.stderr.write("Zadne titulky nenalezeny")
sys.exit(1)
if lang=="cze":
print("Zakladni udaje o filmu:")
print("\tFilm:", dotazNaOst['data'][0]['MovieName'])
print("\tRok:", dotazNaOst['data'][0]['MovieYear'])
self.czTitulky=dotazNaOst['data']
else:
self.enTitulky=dotazNaOst['data']
print("uspesne nacteno", len(dotazNaOst['data']), "titulku ("+lang+")")
return len(dotazNaOst['data'])
##############################################################################################################################
# VYBER CZ TITULEK
##############################################################################################################################
def vyberCZtitulek(self):
nacteneTitulky = []
i=0
print("vyber ceskych titulku:")
print("----------------------------")
for titulek in self.czTitulky:
print(str(i)+'\t'+titulek['SubFileName'])
nacteneTitulky.append([{'SubtitleName':titulek['SubFileName'],'imdbID':self.IMDBid,'SubtitleID':titulek['IDSubtitleFile']}])
i+=1
print("----------------------------")
ok=0
if self.auto==0:
while(ok==0):
try:
vyber=input('Zadejte cislo vybraneho titulku: ')
ok=1
except:
sys.stderr.write("Nespravny vyber")
sys.exit(1)
if int(vyber) >= i:
sys.stderr.write("Nespravny vyber")
sys.exit(1)
else:
vyber=0
print("podle parametru --auto byly vybrany prvni titulky")
self.vybraneCZ=nacteneTitulky[int(vyber)]
print("uspesne vybrano")
##############################################################################################################################
# VYBER EN TITULEK
##############################################################################################################################
def vyberENtitulek(self):
nacteneTitulky = []
i=0
for titulek in self.enTitulky:
#print(str(i)+'\t'+titulek['SubFileName'])
nacteneTitulky.append([{'SubtitleName':titulek['SubFileName'],'imdbID':self.IMDBid,'SubtitleID':titulek['IDSubtitleFile']}])
self.vsechnyEN+=nacteneTitulky[i]
i+=1
##############################################################################################################################
# STAHNOUT
##############################################################################################################################
def stahnout(self, idTitulku, nazev, lang):
slozka="ISJ-xhrivn01-"+lang
#print("Nachystani slozky pro ulozeni titulku:", slozka)
if not os.path.exists(slozka):
try:
os.makedirs(slozka)
print("vytvorime slozku "+slozka)
except:
sys.stderr.write("Chyba pri vytvareni adresare")
sys.exit(1)
stahnuto=self.ost.DownloadSubtitles(self.token, [idTitulku])
if stahnuto['status']!="200 OK":
sys.stderr.write("Nepodarilo se stahnout titulky")
sys.exit(1)
print("Stahuji titulky", nazev,"v",lang)
try:
dekodovane=base64.b64decode(bytes(stahnuto['data'][0]['data'], "cp1250"))
except:
sys.stderr.write("Nepodarilo se dekodovat titulky")
sys.exit(1)
text=str(gzip.decompress(dekodovane), "cp1250")
try:
file=open(slozka+'/'+nazev,'w', encoding="cp1250") #otevreme soubor pro zapis
except:
sys.stderr.write("Nepodarilo se otevrit soubor pro zapis")
sys.exit(1)
file.write(text)
file.close()
#print("titulky uspesne stazeny a ulozeny")
############################################################################################################ | ##################
# | POROVNEJ
##############################################################################################################################
def porovnej(self):
czech=self.nacti(self.vybraneCZ[0]['SubtitleName'], "cze")
posledniCZtit=czech[-1][0]['konec']
for titulek in self.vsechnyEN:
eng=self.nacti(titulek['SubtitleName'], "eng")
#bereme v uvahu pouze ceske a anglicke titulky ktere maji cas posledniho titulku (=delky filmu) lisici se maximalne o 15 minut (mel jsem zde 5 minuty, ale nektere titulky jsou delsi ptze maji i neco na konci pod "skutecnymi" titulkami ve filmu")
posledniENGtit=eng[-1][0]['konec']
if (posledniENGtit > posledniCZtit-900) and (posledniENGtit < posledniCZtit+900): # cislo je povolena tolerance ve vterinach
print("Porovnavame titulky:",titulek['SubtitleName'])
promluvy=self.seradPromluvy(czech, eng)
if len(promluvy)!=0 and len(promluvy)>len(self.vysledek): # je toto reseni lepsi nez predchozi?
self.vysledek=promluvy # uloz ho
##############################################################################################################################
# NACTI
##############################################################################################################################
def nacti(self, nazev, lang):
slozka="ISJ-xhrivn01-"+lang
try:
file=open(slozka+'/'+nazev,'r', encoding="cp1250")
except:
sys.stderr.write("Nepodarilo se otevrit soubor pro cteni")
sys.exit(1)
text=file.read()
regSrt=re.compile("^.*\.srt$", re.DOTALL)
regSub=re.compile("^.*\.sub$", re.DOTALL)
if regSrt.match(nazev.lower()):
#print("nacteme", nazev,"jako .srt titulky ("+lang+")")
slovnik=self.srtTOslovnik(text)
elif regSub.match(nazev.lower()):
#print("nacteme", nazev,"jako .sub titulky ("+lang+")")
slovnik=self.subTOslovnik(text)
else:
sys.stderr.write("Chybny format titulku. Podporovany jsou pouze formaty .srt a .sub")
sys.exit(1)
if len(slovnik)!=0:
return slo |
ewanbarr/reynard | reynard/__init__.py | Python | mit | 31 | 0 | impo | rt mo | nitors
import servers
|
freedesktop-unofficial-mirror/gstreamer__cerbero | test/test_packages_common.py | Python | lgpl-2.1 | 3,804 | 0.000263 | # cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
from cerbero.config import Platform, Distro, DistroVersion
from cerbero.packages import package
from cerbero.packages.packagesstore import PackagesStore
from test.test_build_common import create_cookbook
class Package1(package.Package):
name = 'gstreamer-test1'
shortdesc = 'GStreamer Test'
version = '1.0'
licences = ['LGPL']
uuid = '1'
vendor = 'GStreamer Project'
deps = ['gstreamer-test2']
files = ['recipe1:misc:libs:bins']
platform_files = {
Platform.WINDOWS: ['recipe5:libs']
}
class Package2(package.Package):
name = 'gstreamer-test2'
shortdesc = 'GStreamer Test 2'
version = '1.0'
licences = ['LGPL']
uuid = '1'
vendor = 'GStreamer Project'
files = ['recipe2:misc']
class Package3(package.Package):
name = 'gstreamer-test3'
shortdesc = 'GStreamer Test 3'
version = '1.0'
licences = ['LGPL']
uuid = '1'
vendor = 'GStreamer Project'
files = ['recipe3:misc']
class Package4(package.Package):
name = 'gstreamer-test-bindings'
shortdesc = 'GStreamer Bindings'
version = '1.0'
licences = ['LGPL']
uuid = '1'
vendor = 'GStreamer Project'
sys_deps = {Distro.DEBIAN: ['python'],
DistroVersion.FEDORA_16: ['python27']}
files = ['recipe4:misc']
class MetaPackage(package.MetaPackage):
name = "gstreamer-runtime"
shortdesc = "GStreamer runtime"
longdesc = "GStreamer runtime"
title = "GStreamer runtime"
url = "http://www.gstreamer.net"
version = '1.0'
uuid = '3ffe67b2-4565-411f-8287-e8faa892f853'
vendor = "GStreamer Project"
org = 'net.gstreamer'
packages = [
('gstreamer-test1', True, True),
('gstreamer-test3', False, True),
('gstreamer-test-bindings', False, False)]
platform_packages = {
Platform.LINUX: [('gstreamer-test2', False, False)]}
icon = "gstreamer.ico"
class App(package.App):
name = "gstreamer-app"
shortdesc = "GStreamer sample app"
longdesc = "GStreamer sample app"
title = "GStreamer sample app"
url = "http://www.gstreamer.net"
version = '1.0'
uuid = '3ffe67b2-4565-411f-8287-e8faa892f853'
vendor | = "GStreamer Project"
org = 'net.gstreamer'
app_recipe = 'recipe3'
deps = ['gstreamer-test1']
icon = "share/images/gstreamer.png"
embed_deps = True
class DummyConfig(object):
| pass
def create_store(config):
cookbook = create_cookbook(config)
store = PackagesStore(config, False)
for klass in [Package1, Package2, Package3, Package4, App]:
package = klass(config, store, cookbook)
package.__file__ = 'test/test_packages_common.py'
store.add_package(package)
for klass in [MetaPackage]:
package = klass(config, store)
package.__file__ = 'test/test_packages_common.py'
store.add_package(package)
return store
|
emsrc/daeso-dutch | lib/daeso_nl/string/ascii.py | Python | gpl-3.0 | 3,263 | 0.006384 | # -*- coding: utf-8 -*-
"""
encode a unicode string as an ascii byte string with smart replacement
Support for encoding in ascii where iso-88591-1 characters (i.e. those code
points which can be encoded in iso-8859-1 aka latin-1) beyond ascii chars
(i.e. those code points which can be encoded in ascii) are replaced with the
closest ascii equivalant. For example, 'e-accent-grave' is replaced by plain
'e', 'o-umlaut' is replaced by plain 'o', etc.
Importing this module will register an error handler called "smart_replace"
which can be used in combination with the "encode" method of unicode strings.
Example:
>>> import daeso_nl.string.ascii
>>> utf_8_string = "één"
>>> print utf_8_string.decode("utf-8").encode("ascii", "replace")
??n
>>> print utf_8_string.decode("utf-8").encode("ascii", "smart_replace")
een
"""
from codecs import register_error
# the mapping of iso-8859-1 chars to ascii chars
# encoded as utf-8
_iso_to_ascii_map = """
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161 ¡
162 ¢
163 £
164 ¤
165 ¥
166 ¦
167 §
168 ¨
169 © c
170 ª
171 « "
172 ¬
173
174 ® r
175 ¯
176 °
177 ±
178 ²
179 ³
180 ´
181 µ u
182 ¶
183 ·
184 ¸
185 ¹
186 º
187 »
188 ¼
189 ½
190 ¾
191 ¿
192 À A
193 Á A
194 Â A
195 Ã A
196 Ä A
197 Å A
198 Æ A
199 Ç C
200 È E
201 É E
202 Ê E
203 Ë E
204 Ì I
205 Í I
206 Î I
207 Ï I
208 Ð D
209 Ñ N
210 Ò O
211 Ó O
212 Ô O
213 Õ O
214 Ö O
215 × x
216 Ø O
217 Ù U
218 Ú U
219 Û U
220 Ü U
221 Ý | Y
222 Þ P
223 ß B
224 à a
225 á a
226 â a
227 ã a
228 ä a
229 å a
230 æ a
231 ç c
232 è e
233 é e
234 ê e
235 ë e
236 ì i
237 í i
238 î i
239 ï i
240 ð o
241 ñ n
242 ò o
243 ó o
244 ô o
245 õ o
246 ö o
247 ÷
248 ø o
249 ù u
250 ú u
251 û u
252 ü u
253 ý y
254 þ p
255 ÿ y
"""
# the unicode char that will be subsituted if no translation is available
_unknown = u"?"
def _make_translation_table():
# create a table mapping unicode points which can be | encoded in iso-8859-1
# to unicode points which can be encoded in ascii
translation = {}
for l in _iso_to_ascii_map.split("\n"):
try:
n, iso_char, ascii_char = l.strip().split()
except ValueError:
continue
# this file is utf-8 encoded, so we have to decode
translation[iso_char.decode("utf-8")] = ascii_char.decode("utf-8")
return translation
def _error_handler(exception):
assert exception.encoding == "ascii"
s = "".join([ _translation.get(c, _unknown)
for c in exception.object[exception.start:exception.end] ])
return (s, exception.end)
_translation = _make_translation_table()
register_error("smart_replace", _error_handler)
if __name__ == "__main__":
for l in _iso_to_ascii_map.split("\n"):
fields = l.strip().split()
try:
n, iso_char = fields[:2]
except ValueError:
continue
print n, iso_char, iso_char.decode("utf8").encode("ascii", "smart_replace")
|
onepercentclub/onepercentclub-site | apps/cowry/factory.py | Python | bsd-3-clause | 3,526 | 0.002836 | import sys
from apps.cowry.exceptions import PaymentMethodNotFound
from django.utils.importlib import import_module
def _load_from_module(path):
package, attr = path.rsplit('.', 1)
module = import_module(package)
return getattr(module, attr)
# TODO read django settings to find out what adapters to load.
# TODO Ensure not duplicate payment method names.
# ADAPTERS = getattr(settings, 'COWRY_ADAPTERS')
ADAPTERS = ('apps.cowry_docdata.adapters.DocDataPaymentAdapter',)
_adapters = []
for adapter_str in ADAPTERS:
adapter_class = _load_from_module(adapter_str)
_adapters.append(adapter_class())
def _adapter_for_payment_method(payment_method_id):
for adapter in _adapters:
for pmi in adapter.get_payment_methods():
if payment_method_id == pmi:
return adapter
raise PaymentMethodNotFound('', payment_method_id)
def create_payment_object(order, payment_method_id, payment_submethod='', amount='', currency=''):
adapter = _adapter_for_payment_method(payment_method_id)
payment = adapter.create_payment_object(order, payment_method_id, payment_submethod, amount, currency)
payment.save()
return payment
def get_payment_methods(amount=None, currency='', country='', recurring=None, pm_ids=None):
payment_methods = []
for adapter in _adapters:
cur_payment_methods = adapter.get_payment_methods()
for pm_id in cur_payment_methods:
if pm_ids is None or pm_id in pm_ids:
# Extract values from the configuration.
pm_config = cur_payment_methods[pm_id]
max_amount = pm_config.get('max_amount', sys.maxint)
min_amount = pm_config.get('min_amount', 0)
restricted_currencies = pm_config.get('restricted_currencies', (currency,))
restricted_countries = pm_config.get('restricted_countries', (country,))
supports_recurring = pm_config.get('supports_recurring', True)
supports_single = pm_config.get('supports_single', True)
# See if we need to exclude the current payment_method (pm).
add_pm = True
if amount and (amount > max_amount or amount < min_amount):
add_pm = False
if country not in restricted_countries:
add_pm = False
if currency not in restricted_currencies:
add_pm = False
if recurring and not supports_recurring:
add_pm = False
if not recurring and not supports_single:
add_pm = False
# For now we only return a few params. Later on we might want to return the entire object.
if add_pm:
payment_methods.append({'id': pm_id, 'name': pm_config.get('name')})
return payment_methods
def get_payment_method_ids(amount=None, currency='', country='', recurring=None, pm_ids=None):
payment_method_ids = []
for pm in get_payment_methods(amount=am | ount, currency=currency, country=country, recurring=recurring, pm_ids=pm_ids):
payment_method_ids.append(pm['id'])
return payment_method_ids
def get_payment_submethods(payment_method_id):
adapter = _adapter_for_payment_method(payment_method_id)
for payment_methods in adapter.get_payment_met | hod_config():
for pmi in payment_methods.keys():
config = payment_methods[pmi]
return config.get('submethods')
|
ctrlaltdel/neutrinator | vendor/keystoneauth1/loading/adapter.py | Python | gpl-3.0 | 14,041 | 0 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneauth1 import adapter
from keystoneauth1.loading import _utils
from keystoneauth1.loading import base
__all__ = ('register_argparse_arguments',
'register_service_argparse_arguments',
'register_conf_options',
'load_from_conf_options',
'get_conf_options')
class Adapter(base.BaseLoader):
@property
def plugin_class(self):
return adapter.Adapter
def get_options(self):
return []
@staticmethod
def get_conf_options(include_deprecated=True, deprecated_opts=None):
"""Get oslo_config options that are needed for a :py:class:`.Adapter`.
These may be useful without being registered for config file generation
or to manipulate the options before registering them yourself.
The options that are set are:
:service_type: The default service_type for URL discovery.
:service_name: The default service_name for URL discovery.
:interface: The default interface for URL discovery.
(deprecated)
:valid_interfaces: List of acceptable interfaces for URL
discovery. Can be a list of any of
'public', 'internal' or 'admin'.
:region_name: The default region_name for URL discovery.
:endpoint_override: Always use this endpoint URL for requests
for this client.
:version: The minimum version restricted to a given Major
API. Mutually exclusive with min_version and
max_version.
:min_version: The minimum major version of a given API,
intended to be used as the lower bound of a
range with max_version. Mutually exclusive with
version. If min_version is given with no
max_version it is as if max version is
'latest'.
:max_version: The maximum major version of a given API,
intended to be used as the upper bound of a
range with min_version. Mutually exclusive with
version.
:param include_deprecated: If True (the default, for backward
compatibility), deprecated options are
included in the result. If False, they are
excluded.
:param dict deprecated_opts: Deprecated options that should be included
in the definition of | new options. This should be a dict from the
name of the new option to a list of oslo.DeprecatedOpts that
correspond to the new option. (optional)
For example, to support the `` | api_endpoint`` option pointing to
the new ``endpoint_override`` option name::
old_opt = oslo_cfg.DeprecatedOpt('api_endpoint', 'old_group')
deprecated_opts={'endpoint_override': [old_opt]}
:returns: A list of oslo_config options.
"""
cfg = _utils.get_oslo_config()
if deprecated_opts is None:
deprecated_opts = {}
# This is goofy, but need to support hyphens *or* underscores
deprecated_opts = {name.replace('_', '-'): opt
for name, opt in deprecated_opts.items()}
opts = [cfg.StrOpt('service-type',
deprecated_opts=deprecated_opts.get('service-type'),
help='The default service_type for endpoint URL '
'discovery.'),
cfg.StrOpt('service-name',
deprecated_opts=deprecated_opts.get('service-name'),
help='The default service_name for endpoint URL '
'discovery.'),
cfg.ListOpt('valid-interfaces',
deprecated_opts=deprecated_opts.get(
'valid-interfaces'),
help='List of interfaces, in order of preference, '
'for endpoint URL.'),
cfg.StrOpt('region-name',
deprecated_opts=deprecated_opts.get('region-name'),
help='The default region_name for endpoint URL '
'discovery.'),
cfg.StrOpt('endpoint-override',
deprecated_opts=deprecated_opts.get(
'endpoint-override'),
help='Always use this endpoint URL for requests '
'for this client. NOTE: The unversioned '
'endpoint should be specified here; to '
'request a particular API version, use the '
'`version`, `min-version`, and/or '
'`max-version` options.'),
cfg.StrOpt('version',
deprecated_opts=deprecated_opts.get('version'),
help='Minimum Major API version within a given '
'Major API version for endpoint URL '
'discovery. Mutually exclusive with '
'min_version and max_version'),
cfg.StrOpt('min-version',
deprecated_opts=deprecated_opts.get('min-version'),
help='The minimum major version of a given API, '
'intended to be used as the lower bound of a '
'range with max_version. Mutually exclusive '
'with version. If min_version is given with '
'no max_version it is as if max version is '
'"latest".'),
cfg.StrOpt('max-version',
deprecated_opts=deprecated_opts.get('max-version'),
help='The maximum major version of a given API, '
'intended to be used as the upper bound of a '
'range with min_version. Mutually exclusive '
'with version.'),
]
if include_deprecated:
opts += [
cfg.StrOpt('interface',
help='The default interface for endpoint URL '
'discovery.',
deprecated_for_removal=True,
deprecated_reason='Using valid-interfaces is'
' preferrable because it is'
' capable of accepting a list of'
' possible interfaces.'),
]
return opts
def register_conf_options(self, conf, group, include_deprecated=True,
deprecated_opts=None):
"""Register the oslo_config options that are needed for an Adapter.
The options that are set are:
:service_type: The default service_type for URL discovery.
:service_name: The default service_name for URL discovery.
:interface: The default i |
riking/youtube-dl | test/test_youtube_signature.py | Python | unlicense | 4,167 | 0.00504 | #!/usr/bin/env python
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import io
import re
import string
from youtube_dl.extractor import YoutubeIE
from youtube_dl.utils import compat_str, compat_urlretrieve
_TESTS = [
(
'https://s.ytimg.com/yts/jsbin/html5player-vflHOr_nV.js',
'js',
86,
'>=<;:/.-[+*)(\'&%$#"!ZYX0VUTSRQPONMLKJIHGFEDCBA\\yxwvutsrqponmlkjihgfedcba987654321',
),
(
'https://s.ytimg.com/yts/jsbin/html5player-vfldJ8xgI.js',
'js',
85,
'3456789a0cdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRS[UVWXYZ!"#$%&\'()*+,-./:;<=>?@',
),
(
'https://s.ytimg.com/yts/jsbin/html5player-vfle-mVwz.js',
'js',
90,
']\\[@?>=<;:/.-,+*)(\'&%$#"hZYXWVUTSRQPONMLKJIHGFEDCBAzyxwvutsrqponmlkjiagfedcb39876',
),
(
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vfl0Cbn9e.js',
'js',
84,
'O1I3456789abcde0ghijklmnopqrstuvwxyzABCDEFGHfJKLMN2PQRSTUVW@YZ!"#$%&\'()*+,-./:;<=',
),
(
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflXGBaUN.js',
'js',
'2ACFC7A61CA478CD21425E5A57EBD73DDC78E22A.2094302436B2D377D14A3BBA23022D023B8BC25AA',
'A52CB8B320D22032ABB3A41D773D2B6342034902.A22E87CDD37DBE75A5E52412DC874AC16A7CFCA2',
),
(
'http://s.ytimg.com/yts/swfbin/player-vfl5vIhK2/watch_as3.swf',
'swf',
86,
'O1I3456789abcde0ghijklmnopqrstuvwxyzABCDEFGHfJKLMN2PQRSTUVWXY\\!"#$%&\'()*+,-./:;<=>?'
),
(
'http://s.ytimg.com/yts/swfbin/player-vflmDyk47/watch_as3.swf',
'swf',
'F375F75BF2AFDAAF2666E43868D46816F83F13E81C46.3725A8218E446A0DECD33F79DC282994D6AA92C92C9',
'9C29AA6D499282CD97F33DCED0A644E8128A5273.64C18E31F38361864D86834E6662FAADFA2FB57F'
),
(
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflBb0OQx.js',
'js',
84,
'123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQ0STUVWXYZ!"#$%&\'()*+,@./:;<=>'
),
(
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vfl9FYC6l.js',
'js',
83,
'123456789abcdefghijklmnopqr0tuvwxyzABCDETGHIJKLMNOPQRS>UVWXYZ!"#$%&\'()*+,-./:;<=F'
),
(
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflCGk6yw/html5player.js',
'js',
'4646B5181C6C3020DF1D9C7FCFEA.AD80ABF70C39BD369CCCAE780AFBB98FA6B6CB42766249D9488C288',
'82C8849D94266724DC6B6AF89BBFA087EACCD963.B93C07FBA084ACAEFCF7C9D1FD0203C6C1815B6B'
)
]
class TestSignature(unittest.TestCase):
def setUp(self):
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
self.TESTDATA_DIR = os.path.join(TEST_DIR, 'testdata')
if not os.path.exists(self.TESTDATA_DIR):
os.mkdir(self.TESTDATA_DIR)
def make_tfunc(url, stype, sig_input, expected_sig):
m = re.match(r'.*-([a-zA-Z0-9_-]+)(?:/watch_as3|/html5player)?\.[a-z]+$', url)
assert m, '%r should follow URL format' % url
test_id = m.group(1)
def test_func(self):
basename = 'player-%s.%s' % (test_id, stype)
fn = os.path.join(self.TESTDATA_DIR, basename)
if not os.path.exists(fn):
compat_urlretrieve(url, fn)
ie = YoutubeIE()
if stype == 'js':
with io.open(fn, encoding='utf-8') as testf:
jscode = testf.read()
func = ie._parse_sig_js(jscode)
else:
assert stype == 'swf'
with open(fn, 'rb') as testf:
swfcode = testf.read()
func = ie._parse_sig_swf(swfco | de)
src_sig = (
compat_str(string.printable[:sig_input])
if isinstance(sig_input, int) else sig_input)
got_sig = func(src_sig)
self.assertEqual(got_sig, expected_sig)
test_func.__name__ = | str('test_signature_' + stype + '_' + test_id)
setattr(TestSignature, test_func.__name__, test_func)
for test_spec in _TESTS:
make_tfunc(*test_spec)
if __name__ == '__main__':
unittest.main()
|
SilverWingedSeraph/corewar32 | toroid/toroid/warrior.py | Python | apache-2.0 | 188 | 0 | c | lass Warrior(object):
def __init__(self, name, author, source, score=0):
self.name = name
self.author = author
self.source = source
self.score | = score
|
jobovy/flexgp | flexgp/trainingSet.py | Python | bsd-3-clause | 5,727 | 0.015191 | ###############################################################################
# trainingSet: training-set class for training a GP
#
# main routine: __init__
#
# example usage:
#
# trainingSetObject= trainingSet(listx=,listy=,noise=)
#
# where listx is a list of abcissae, listy is the corresponding set ordinates
# and noise is the noise in listy
#
# actual example: trainSet= trainingSet(listx=mjd[band],
# listy=m[band]-numpy.mean(m[band]),
# noise=err_m[band])
###############################################################################
import numpy
class trainingSet:
"""
trainingSet: Class representing a set of training
points to train the (SP)GP
"""
def __init__(self,**kwargs):
"""
NAME:
__init__
PURPOSE:
Initialize a training set object
INPUT:
Either:
listx=a list of ndarrays of training set inputs [N,dim]
listy=a list or ndarray of training set outputs [N]
noise= list of noise in y
or noiseCovar= list of noise**2. in y
OUTPUT:
HISTORY:
2010-02-12 - Written - Bovy (NYU)
"""
if kwargs.has_key('listx'):
if not isinstance(kwargs['listx'],list) and not isinstance(kwargs['listx'],numpy.ndarray):
raise trainingSetError("Your 'listx=' object is not a list or ndarray")
elif isinstance(kwargs['listx'],list):
self.listx= kwargs['listx']
else: #is ndarray
if len(kwargs['listx'].shape) == 1: #one-d
self.listx= list(kwargs['listx'])
else: #more-than-one-d
self.listx= [numpy.array(kwargs['listx'][ii,:]) for ii in range(kwargs['listx'].shape[0])]
if kwargs.has_key('listy'):
if not isinstance(kwargs['listy'],list) and not isinstance(kwargs['listy'],numpy.ndarray):
raise trainingSetError("Your 'listy=' object is not a list or ndarray")
elif isinstance(kwargs['listy'],list):
self.listy= numpy.array(kwargs['listy'])
else: #is ndarray
self.listy= kwargs['listy']
self.nTraining= len(self.listy)
if kwargs.has_key('noiseCovar'):
if isinstance(kwargs['noiseCovar'],float):
self.noiseCovar= kwargs['noiseCovar']
self.uniformNoise= True
elif isinstance(kwargs['noiseCovar'],list):
self.noiseCovar= numpy.array(kwargs['noiseCovar'],dtype=numpy.float64)
self.uniformNoise= False
elif isinstance(kwargs['noiseCovar' | ],numpy.ndarray):
s | elf.noiseCovar= kwargs['noiseCovar']
self.uniformNoise= True
else:
try:
kwargs['noiseCovar'][0]
except TypeError:
try:
tmpnoise= float(kwargs['noiseCovar'])
except ValueError:
raise trainingSetError("'noiseCovar=' noise parameter should be a float, list of floats, or numpy array")
else:
self.noiseCovar= tmpnoise
self.uniformNoise= True
else:
self.noiseCovar= numpy.array(kwargs['noiseCovar'],dtype=numpy.float64)
self.uniformNoise= False
self.hasNoise= True
elif kwargs.has_key('noise'):
if isinstance(kwargs['noise'],float):
self.noise= kwargs['noise']
self.uniformNoise= True
elif isinstance(kwargs['noise'],list):
self.noise= numpy.array(kwargs['noise'],dtype=numpy.float64)
self.uniformNoise= False
elif isinstance(kwargs['noise'],numpy.ndarray):
self.noise= kwargs['noise']
self.uniformNoise= True
else:
try:
kwargs['noise'][0]
except TypeError:
try:
tmpnoise= float(kwargs['noise'])
except ValueError:
raise trainingSetError("'noise=' noise parameter should be a float, list of floats, or numpy array")
else:
self.noise= tmpnoise
self.uniformNoise= True
else:
self.noise= numpy.array(kwargs['noise'],dtype=numpy.float64)
self.uniformNoise= False
self.hasNoise= True
self.noiseCovar= self.noise**2.
else:
self.hasNoise= False
def __copy__(self):
if self.hasNoise:
return self.__class__(listx=self.listx,listy=self.listy,
noiseCovar=self.noiseCovar)
else:
return self.__class__(listx=self.listx,listy=self.listy)
def __deepcopy__(self, memo={}):
from copy import deepcopy
newlistx= deepcopy(self.listx,memo)
newlisty= deepcopy(self.listy,memo)
if self.hasNoise:
newnoiseCovar= deepcopy(self.noiseCovar,memo)
if self.hasNoise:
return self.__class__(listx=newlistx,listy=newlisty,
noiseCovar=newnoiseCovar)
else:
return self.__class__(listx=newlistx,listy=newlisty)
memo[id(self)] = result
return result
class trainingSetError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
|
mlecours/fake-switches | tests/cisco/test_cisco_auto_enabled_switch.py | Python | apache-2.0 | 1,464 | 0.002049 | import unittest
from flexmock import flexmock_teardown
from tests.util.global_reactor import cisco_switch_ip, \
cisco_auto_enabled_switch_ssh_port, cisco_auto_enabled_switch_telnet_port
from tests.util.protocol_util import SshTester, TelnetTester, with_protocol
class TestCiscoAutoEnabledSwitchProtocol(unittest.TestCase):
__test__ = False
def setUp(self):
self.protocol = self.create_client()
def tearDown(self):
flexmock_teardown()
@with_protocol
def test_enable_command_requires_a_password(self, t):
t.write("enable")
t.read("my_switch#")
t.write("terminal length 0")
t.read("my_switch#")
t.write("terminal width 0")
t.read("my_switch#")
t.write("configure terminal")
t.readln("Enter configuration commands, one per line. End with CNTL/Z.")
t.read("my_switch(config)#")
t.write("exit")
t.read("my_switch#")
def create_client(self):
raise NotImplemented()
class TestCiscoSwitchProtocolSSH(TestCiscoAutoEnabledSwitchProtocol):
__test__ = True
def create_client(self):
return SshTester("ssh", cisco_switch_ip, cisco_auto_enabled_switch_ssh_port, 'root', | 'root')
class Test | CiscoSwitchProtocolTelnet(TestCiscoAutoEnabledSwitchProtocol):
__test__ = True
def create_client(self):
return TelnetTester("telnet", cisco_switch_ip, cisco_auto_enabled_switch_telnet_port, 'root', 'root')
|
Cloudzero/cloudzero-reactor-aws | reactor/common/plugin_loader.py | Python | bsd-3-clause | 2,965 | 0.005059 | # -*- coding: utf-8 -*-
# Copyright (c) 2016-present, CloudZero, Inc. All rights reserved.
# Licensed under the BSD-style license. See LICENSE file in the project root for full license information.
"""
Module that provides the core functionality used by various plugins.
"""
import re
import importlib
import os.path as path
import glob
import lambda_tools
logger = lambda_tools.setup_logging('reactor')
def list_available_plugins_for_loader(loader_module):
"""
Obtain a list of all plugins in a directory relative to a given loader module
Args:
loader_module (module): The loader module that is located adjacent to the plugins directory
Returns:
list(tuple) - The list of plugins. item[0] is the top-level key (usually service_type)
and item[1] is the sub-key, usuall | y resource_type.
"""
plugin_paths = glob.glob(path.dirname(loader_module.__file__) + "/**/*.py")
function_list = [
(path.dirname(f).split('/')[-1], path.basename(f)[:-3].replace('_', '-'))
for f in plugin_paths
if path.isfile( | f) and not f.endswith('__init__.py')
]
return function_list
def find_plugin(import_template, *args):
"""
Args:
import_template (str): A string template used to build an import expression, for example
'reactor.plugins.{}.{}'
*args (iterable(str): The keys used to load a plugin given the import_template. For example
given a template 'reactor.plugins.{}.{}', ['foo', 'bar'] will attempt
to import 'reactor.plugins.foo.bar'.
Keys can only be comprised of alphanumerics and hyphens.
Returns:
A module if a matching plugin could be loaded or None if it couldn't be loaded
"""
for arg in args:
if not re.match('^[a-z0-9A-Z\-]+$', arg):
# Protect against someone getting sneaky and trying to import modules outside the plugin folder
logger.warning(f'Non-Alphanumeric plugin key {arg} received')
return None
if not all(args):
return None
normalized_keys = [arg.replace('-', '_') for arg in args]
try:
module = importlib.import_module(import_template.format(*normalized_keys))
logger.debug(f'Found plugin for {normalized_keys}: {module}')
return module
except ModuleNotFoundError:
message = f'No plugin found for {normalized_keys}, attempting to load default plugin for {normalized_keys[0]}'
logger.debug(message)
try:
default_plugin_keys = [normalized_keys[0], 'default']
module = importlib.import_module(import_template.format(*default_plugin_keys))
logger.debug(f'Loaded default plugin for {normalized_keys[0]}')
return module
except ModuleNotFoundError:
logger.debug(f'No default plugin found for {normalized_keys[0]}')
return None
|
optima-ict/odoo | addons/professional_templates/controllers/__init__.py | Python | agpl-3.0 | 58 | 0.017241 | # -*- coding: utf-8 -*-
impo | rt controllers
#imp | ort models
|
jason-weirather/AlignQC | scripts/alignqc_annotation_to_bed_depth.py | Python | apache-2.0 | 2,431 | 0.032086 | #!/usr/bin/env python
import sys, argparse, gzip
from Bio.Format.GPD import GPD
from Bio.Range import ranges_to_coverage
def main():
parser = argparse.ArgumentParser(description="",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('input',help="Use - for STDIN")
parser.add_argument('genepred',help="the genepred used for this alignqc")
parser.add_argument('--min_exons',type=int,default=1,help="At least this number of exons")
parser.add_argument('--full',action='store_true',help="only use full matches")
parser.add_argument('-o','--output',help="OUTPUT file or nothing for STDOUT")
args = parser.parse_args()
inf = sys.stdin
if args.input != '-':
i | f args.input[-3:]=='.gz':
inf = gzip.open(args.input)
else: inf = open(args.input)
genes = {}
sys.stderr.write("Reading annotation file\n")
for line in inf:
f = line.rstrip().split("\t")
gene = f[2]
tx = f[3]
type = f[4]
if a | rgs.full and type != 'full': continue
if gene not in genes:
genes[gene] = {}
genes[gene]['transcripts'] = {}
genes[gene]['cnt'] = 0
if tx not in genes[gene]['transcripts']:
genes[gene]['transcripts'][tx] = 0
genes[gene]['cnt'] += 1
genes[gene]['transcripts'][tx] += 1
inf.close()
txs = {}
sys.stderr.write("Reading genepred file\n")
z = 0
with open(args.genepred) as inf:
for line in inf:
z +=1
if z%1000==0: sys.stderr.write(str(z)+" \r")
gpd = GPD(line)
exs = []
for ex in gpd.exons:
exs.append(ex.range)
txs[gpd.get_transcript_name()] = exs
sys.stderr.write("\n")
vals = []
sys.stderr.write("Traversing annotation file\n")
for gene in genes:
for tx in genes[gene]['transcripts']:
v = genes[gene]['transcripts'][tx]
exons = txs[tx]
if len(exons) < args.min_exons: continue
for i in range(0,v):
vals += exons[:]
sys.stderr.write("Generating coverage file "+str(len(vals))+"\n")
of = sys.stdout
if args.output:
if args.output[-3:]=='.gz':
of = gzip.open(args.output,'w')
else:
of = open(args.output,'w')
covs = ranges_to_coverage(vals)
for v in covs:
of.write(v.chr+"\t"+str(v.start-1)+"\t"+str(v.end)+"\t"+str(v.get_payload())+"\n")
# of.write(tx+"\t"+gene+"\t"+str(genes[gene]['transcripts'][tx])+"\t"+str(genes[gene]['cnt'])+"\n")
of.close()
if __name__=="__main__":
main()
|
geekone/mitiyu | mitiyu/wsgi.py | Python | apache-2.0 | 387 | 0.002584 | """
WSGI c | onfig for mitiyu project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mitiyu.settings")
from django.core.wsgi import get_w | sgi_application
application = get_wsgi_application()
|
cocosli/antlr4 | runtime/Python3/src/antlr4/PredictionContext.py | Python | bsd-3-clause | 24,709 | 0.008256 | #
# [The "BSD license"]
# Copyright (c) 2012 Terence Parr
# Copyright (c) 2012 Sam Harwell
# Copyright (c) 2014 Eric Vergnaud
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain | the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS | IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#/
from io import StringIO
from antlr4.RuleContext import RuleContext
from antlr4.atn.ATN import ATN
from antlr4.atn.ATNState import ATNState
class PredictionContext(object):
# Represents {@code $} in local context prediction, which means wildcard.
# {@code#+x =#}.
#/
EMPTY = None
# Represents {@code $} in an array in full context mode, when {@code $}
# doesn't mean wildcard: {@code $ + x = [$,x]}. Here,
# {@code $} = {@link #EMPTY_RETURN_STATE}.
#/
EMPTY_RETURN_STATE = 0x7FFFFFFF
globalNodeCount = 1
id = globalNodeCount
# Stores the computed hash code of this {@link PredictionContext}. The hash
# code is computed in parts to match the following reference algorithm.
#
# <pre>
# private int referenceHashCode() {
# int hash = {@link MurmurHash#initialize MurmurHash.initialize}({@link #INITIAL_HASH});
#
# for (int i = 0; i < {@link #size()}; i++) {
# hash = {@link MurmurHash#update MurmurHash.update}(hash, {@link #getParent getParent}(i));
# }
#
# for (int i = 0; i < {@link #size()}; i++) {
# hash = {@link MurmurHash#update MurmurHash.update}(hash, {@link #getReturnState getReturnState}(i));
# }
#
# hash = {@link MurmurHash#finish MurmurHash.finish}(hash, 2# {@link #size()});
# return hash;
# }
# </pre>
#/
def __init__(self, cachedHashCode:int):
self.cachedHashCode = cachedHashCode
# This means only the {@link #EMPTY} context is in set.
def isEmpty(self):
return self is self.EMPTY
def hasEmptyPath(self):
return self.getReturnState(len(self) - 1) == self.EMPTY_RETURN_STATE
def __hash__(self):
return self.cachedHashCode
def calculateHashCode(parent:PredictionContext, returnState:int):
return hash( str(parent) + str(returnState))
def calculateEmptyHashCode():
return hash("")
# Used to cache {@link PredictionContext} objects. Its used for the shared
# context cash associated with contexts in DFA states. This cache
# can be used for both lexers and parsers.
class PredictionContextCache(object):
def __init__(self):
self.cache = dict()
# Add a context to the cache and return it. If the context already exists,
# return that one instead and do not add a new context to the cache.
# Protect shared cache from unsafe thread access.
#
def add(self, ctx:PredictionContext):
if ctx==PredictionContext.EMPTY:
return PredictionContext.EMPTY
existing = self.cache.get(ctx, None)
if existing is not None:
return existing
self.cache[ctx] = ctx
return ctx
def get(self, ctx:PredictionContext):
return self.cache.get(ctx, None)
def __len__(self):
return len(self.cache)
class SingletonPredictionContext(PredictionContext):
@staticmethod
def create(parent:PredictionContext , returnState:int ):
if returnState == PredictionContext.EMPTY_RETURN_STATE and parent is None:
# someone can pass in the bits of an array ctx that mean $
return SingletonPredictionContext.EMPTY
else:
return SingletonPredictionContext(parent, returnState)
def __init__(self, parent:PredictionContext, returnState:int):
assert returnState!=ATNState.INVALID_STATE_NUMBER
hashCode = calculateHashCode(parent, returnState) if parent is not None else calculateEmptyHashCode()
super().__init__(hashCode)
self.parentCtx = parent
self.returnState = returnState
def __len__(self):
return 1
def getParent(self, index:int):
assert index == 0
return self.parentCtx
def getReturnState(self, index:int):
assert index == 0
return self.returnState
def __eq__(self, other):
if self is other:
return True
elif other is None:
return False
elif not isinstance(other, SingletonPredictionContext):
return False
elif hash(self) != hash(other):
return False # can't be same if hash is different
else:
return self.returnState == other.returnState and self.parentCtx==other.parentCtx
def __hash__(self):
return self.cachedHashCode
def __str__(self):
up = "" if self.parentCtx is None else str(self.parentCtx)
if len(up)==0:
if self.returnState == self.EMPTY_RETURN_STATE:
return "$"
else:
return str(self.returnState)
else:
return str(self.returnState) + " " + up
class EmptyPredictionContext(SingletonPredictionContext):
def __init__(self):
super().__init__(None, self.EMPTY_RETURN_STATE)
def isEmpty(self):
return True
def getParent(self, index:int):
return None
def getReturnState(self, index:int):
return self.returnState
def __eq__(self, other):
return self is other
def __str__(self):
return "$"
PredictionContext.EMPTY = EmptyPredictionContext()
class ArrayPredictionContext(PredictionContext):
# Parent can be null only if full ctx mode and we make an array
# from {@link #EMPTY} and non-empty. We merge {@link #EMPTY} by using null parent and
# returnState == {@link #EMPTY_RETURN_STATE}.
def __init__(self, parents:list, returnStates:list):
super().__init__(calculateHashCode(parents, returnStates))
assert parents is not None and len(parents)>0
assert returnStates is not None and len(returnStates)>0
self.parents = parents
self.returnStates = returnStates
def isEmpty(self):
# since EMPTY_RETURN_STATE can only appear in the last position, we
# don't need to verify that size==1
return self.returnStates[0]==PredictionContext.EMPTY_RETURN_STATE
def __len__(self):
return len(self.returnStates)
def getParent(self, index:int):
return self.parents[index]
def getReturnState(self, index:int):
return self.returnStates[index]
def __eq__(self, other):
if self is other:
return True
elif not isinstance(other, ArrayPredictionContext):
return False
elif hash(self) != hash(other):
return False # can't be same if hash is different
else:
return self.returnStates==other.returnStates and self.parents==other.parents
def __str__(self):
if self.isEmpty():
return "[]"
|
zhyq0826/test-lab | tornado-lab/main4.py | Python | mit | 2,107 | 0.009732 | #-*- coding:utf-8 -*-
from tornado.httpclient import HTTPClient, AsyncHTTPClient
import tornado.ioloop
from tornado.concurrent import Future
from tornado import gen
import time
from concurrent.fut | ures import ThreadPoolExecutor
executor = ThreadPoolExecutor(max_workers=4)
#同步方法调用
def synchronous_fetch(url, callback):
http_client = HTTPClient()
def handle_response(response):
callback(response)
http_client.fetch(url, callback=handle_response)
#异步调用, 当完成时 调用回调
def asynchronous_fetch(url, callback):
http_client = AsyncHTTPCli | ent()
def handle_response(response):
callback(response)
http_client.fetch(url, callback=handle_response)
#把异步结果封装成feture
def asyn_fetch_feture(url):
http_client = AsyncHTTPClient()
my_future = Future()
fetch_future = http_client.fetch(url)
def handler_feture(feture):
my_future.set_result(feture.result())
tornado.ioloop.IOLoop.current().add_callback(asyn_fetch_feture_callback, feture.result())
fetch_future.add_done_callback(handler_feture)
return my_future
@gen.coroutine
def fetch_coroutine(url):
http_client = AsyncHTTPClient()
response = yield http_client.fetch(url)
raise gen.Return(response)
def blocking_func(t):
print('call blocking func')
time.sleep(t)
@gen.coroutine
def call_blocking():
yield executor.submit(blocking_func, (4, ))
def synchronous_fetch_callback(result):
print('synchronous_callback')
print(result.body)
def asynchronous_fetch_callback(result):
print('asynchronous_callback')
print(result.body)
def asyn_fetch_feture_callback(result):
print('future_callback')
print(result.headers)
def fetch_coroutine_callback(result):
print('coroutine callback')
print(result.body)
if __name__ == '__main__':
url = 'http://baidu.com'
synchronous_fetch(url, synchronous_fetch_callback)
asynchronous_fetch(url, asynchronous_fetch_callback)
result = asyn_fetch_feture('https://apple.com')
ioloop = tornado.ioloop.IOLoop.current()
ioloop.start()
|
GregorioDiStefano/integration | tests/MenderAPI/inventory.py | Python | apache-2.0 | 3,224 | 0.003102 | #!/usr/bin/python
# Copyright 2017 Northern.tech AS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from MenderAPI import *
class Inventory():
auth = None
def __init__(self, auth):
self.reset()
self.auth = auth
def reset(self):
# Reset all temporary values.
pass
def get_inv_base_path(self):
return "https://%s/api/management/%s/inventory/" % (get_mender_gateway(), api_version)
def get_devices(self, has_group=None):
"""get_devices API. has_group can be True/False/None string."""
headers = self.auth.get_auth_token()
params = {}
if has_group is not None:
params = ({"has_group": has_group})
ret = requests.get(self.get_inv_base_path() + "devices", params=params, headers=self.auth.get_auth_token(), verify=False)
assert ret.status_code == requests.status_codes.codes.ok
return ret.json()
def get_device(self, device_id):
headers = self.auth.get_auth_tok | en()
devurl = "%s%s/%s" % (self.get_inv_base_path(), "device", device_id)
ret = requests.get(devurl, headers=self.auth.get_auth_token(), verify=False)
| return ret
def get_groups(self):
ret = requests.get(self.get_inv_base_path() + "groups", headers=self.auth.get_auth_token(), verify=False)
assert ret.status_code == requests.status_codes.codes.ok
return ret.json()
def get_devices_in_group(self, group):
req = "groups/%s/devices" % group
ret = requests.get(self.get_inv_base_path() + req, headers=self.auth.get_auth_token(), verify=False)
assert ret.status_code == requests.status_codes.codes.ok
return ret.json()
def get_device_group(self, device):
req = "devices/%s/group" % device
ret = requests.get(self.get_inv_base_path() + req, headers=self.auth.get_auth_token(), verify=False)
assert ret.status_code == requests.status_codes.codes.ok
return ret.json()
def put_device_in_group(self, device, group):
headers = {"Content-Type": "application/json"}
headers.update(self.auth.get_auth_token())
body = '{"group":"%s"}' % group
req = "devices/%s/group" % device
ret = requests.put(self.get_inv_base_path() + req, data=body, headers=headers, verify=False)
assert ret.status_code == requests.status_codes.codes.no_content
def delete_device_from_group(self, device, group):
req = "devices/%s/group/%s" % (device, group)
ret = requests.delete(self.get_inv_base_path() + req, headers=self.auth.get_auth_token(), verify=False)
assert ret.status_code == requests.status_codes.codes.no_content
|
erjohnso/ansible | lib/ansible/playbook/play_context.py | Python | gpl-3.0 | 27,712 | 0.00232 | # -*- coding: utf-8 -*-
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS | FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import pwd
import random
import re
import string
import sys
from ansibl | e import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils.six import iteritems
from ansible.module_utils.six.moves import shlex_quote
from ansible.module_utils._text import to_bytes
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.playbook.attribute import FieldAttribute
from ansible.playbook.base import Base
from ansible.plugins import get_plugin_class
from ansible.utils.ssh_functions import check_for_controlpersist
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
__all__ = ['PlayContext']
# the magic variable mapping dictionary below is used to translate
# host/inventory variables to fields in the PlayContext
# object. The dictionary values are tuples, to account for aliases
# in variable names.
MAGIC_VARIABLE_MAPPING = dict(
# base
connection=('ansible_connection', ),
module_compression=('ansible_module_compression', ),
shell=('ansible_shell_type', ),
executable=('ansible_shell_executable', ),
remote_tmp_dir=('ansible_remote_tmp', ),
# connection common
remote_addr=('ansible_ssh_host', 'ansible_host'),
remote_user=('ansible_ssh_user', 'ansible_user'),
password=('ansible_ssh_pass', 'ansible_password'),
port=('ansible_ssh_port', 'ansible_port'),
pipelining=('ansible_ssh_pipelining', 'ansible_pipelining'),
timeout=('ansible_ssh_timeout', 'ansible_timeout'),
private_key_file=('ansible_ssh_private_key_file', 'ansible_private_key_file'),
# networking modules
network_os=('ansible_network_os', ),
connection_user=('ansible_connection_user',),
# ssh TODO: remove
ssh_executable=('ansible_ssh_executable', ),
ssh_common_args=('ansible_ssh_common_args', ),
sftp_extra_args=('ansible_sftp_extra_args', ),
scp_extra_args=('ansible_scp_extra_args', ),
ssh_extra_args=('ansible_ssh_extra_args', ),
ssh_transfer_method=('ansible_ssh_transfer_method', ),
# docker TODO: remove
docker_extra_args=('ansible_docker_extra_args', ),
# become
become=('ansible_become', ),
become_method=('ansible_become_method', ),
become_user=('ansible_become_user', ),
become_pass=('ansible_become_password', 'ansible_become_pass'),
become_exe=('ansible_become_exe', ),
become_flags=('ansible_become_flags', ),
# deprecated
sudo=('ansible_sudo', ),
sudo_user=('ansible_sudo_user', ),
sudo_pass=('ansible_sudo_password', 'ansible_sudo_pass'),
sudo_exe=('ansible_sudo_exe', ),
sudo_flags=('ansible_sudo_flags', ),
su=('ansible_su', ),
su_user=('ansible_su_user', ),
su_pass=('ansible_su_password', 'ansible_su_pass'),
su_exe=('ansible_su_exe', ),
su_flags=('ansible_su_flags', ),
)
# TODO: needs to be configurable
b_SU_PROMPT_LOCALIZATIONS = [
to_bytes('Password'),
to_bytes('암호'),
to_bytes('パスワード'),
to_bytes('Adgangskode'),
to_bytes('Contraseña'),
to_bytes('Contrasenya'),
to_bytes('Hasło'),
to_bytes('Heslo'),
to_bytes('Jelszó'),
to_bytes('Lösenord'),
to_bytes('Mật khẩu'),
to_bytes('Mot de passe'),
to_bytes('Parola'),
to_bytes('Parool'),
to_bytes('Pasahitza'),
to_bytes('Passord'),
to_bytes('Passwort'),
to_bytes('Salasana'),
to_bytes('Sandi'),
to_bytes('Senha'),
to_bytes('Wachtwoord'),
to_bytes('ססמה'),
to_bytes('Лозинка'),
to_bytes('Парола'),
to_bytes('Пароль'),
to_bytes('गुप्तशब्द'),
to_bytes('शब्दकूट'),
to_bytes('సంకేతపదము'),
to_bytes('හස්පදය'),
to_bytes('密码'),
to_bytes('密碼'),
to_bytes('口令'),
]
TASK_ATTRIBUTE_OVERRIDES = (
'become',
'become_user',
'become_pass',
'become_method',
'become_flags',
'connection',
'docker_extra_args', # TODO: remove
'delegate_to',
'no_log',
'remote_user',
)
RESET_VARS = (
'ansible_connection',
'ansible_user',
'ansible_host',
'ansible_port',
# TODO: ???
'ansible_docker_extra_args',
'ansible_ssh_host',
'ansible_ssh_pass',
'ansible_ssh_port',
'ansible_ssh_user',
'ansible_ssh_private_key_file',
'ansible_ssh_pipelining',
'ansible_ssh_executable',
)
OPTION_FLAGS = ('connection', 'remote_user', 'private_key_file', 'verbosity', 'force_handlers', 'step', 'start_at_task', 'diff',
'ssh_common_args', 'docker_extra_args', 'sftp_extra_args', 'scp_extra_args', 'ssh_extra_args')
class PlayContext(Base):
'''
This class is used to consolidate the connection information for
hosts in a play and child tasks, where the task may override some
connection/authentication information.
'''
# base
_module_compression = FieldAttribute(isa='string', default=C.DEFAULT_MODULE_COMPRESSION)
_shell = FieldAttribute(isa='string')
_executable = FieldAttribute(isa='string', default=C.DEFAULT_EXECUTABLE)
# connection fields, some are inherited from Base:
# (connection, port, remote_user, environment, no_log)
_remote_addr = FieldAttribute(isa='string')
_remote_tmp_dir = FieldAttribute(isa='string', default=C.DEFAULT_REMOTE_TMP)
_password = FieldAttribute(isa='string')
_timeout = FieldAttribute(isa='int', default=C.DEFAULT_TIMEOUT)
_connection_user = FieldAttribute(isa='string')
_private_key_file = FieldAttribute(isa='string', default=C.DEFAULT_PRIVATE_KEY_FILE)
_pipelining = FieldAttribute(isa='bool', default=C.ANSIBLE_PIPELINING)
# networking modules
_network_os = FieldAttribute(isa='string')
# docker FIXME: remove these
_docker_extra_args = FieldAttribute(isa='string')
# ssh # FIXME: remove these
_ssh_executable = FieldAttribute(isa='string', default=C.ANSIBLE_SSH_EXECUTABLE)
_ssh_args = FieldAttribute(isa='string', default=C.ANSIBLE_SSH_ARGS)
_ssh_common_args = FieldAttribute(isa='string')
_sftp_extra_args = FieldAttribute(isa='string')
_scp_extra_args = FieldAttribute(isa='string')
_ssh_extra_args = FieldAttribute(isa='string')
_ssh_transfer_method = FieldAttribute(isa='string', default=C.DEFAULT_SSH_TRANSFER_METHOD)
# ???
_connection_lockfd = FieldAttribute(isa='int')
# privilege escalation fields
_become = FieldAttribute(isa='bool')
_become_method = FieldAttribute(isa='string')
_become_user = FieldAttribute(isa='string')
_become_pass = FieldAttribute(isa='string')
_become_exe = FieldAttribute(isa='string', default=C.DEFAULT_BECOME_EXE)
_become_flags = FieldAttribute(isa='string', default=C.DEFAULT_BECOME_FLAGS)
_prompt = FieldAttribute(isa='string')
# DEPRECATED: backwards compatibility fields for sudo/su
_sudo_exe = FieldAttribute(isa='string', default=C.DEFAULT_SUDO_EXE)
_sudo_flags = FieldAttribute(isa='string', default=C.DEFAULT_SUDO_FLAGS)
_sudo_pass = FieldAttribute(isa='string')
_su_exe = FieldAttribute(isa='string', default=C.DEFAULT_SU_EXE)
_su_flags = FieldAttribute(isa='string', default=C.DEFAULT_SU_FLAGS)
_su_pass = FieldAttribute(isa='string')
# general flags
_verbosity = FieldAttribute(isa='int', default=0)
_only_tags = FieldAttribute(isa='set', default=set())
_skip_tags = FieldAttri |
skynetera/skynet | skynet-agent/plugins/linux/iostats.py | Python | apache-2.0 | 1,028 | 0.001953 | #!/usr/bin/env python
# coding: utf-8
__author__ = 'whoami'
"""
@version: 1.0
@author: whoami
@license: Apache Licence 2.0
@contact: skutil@gmail.com
@site: http://www.itweet.cn
@software: PyCharm Community Edition
@file: disk.py
@time: 2015-11-28 下午1:53
"""
import time
import psutil
def monitor(frist_invoke=2):
"""
Return (inbytes, outbytes, in_num, out_num, ioms) of | disk.
"""
sdiskio = psutil.disk_io_counters()
# sleep some time
value_dic = {
'iostats': {
'io.disks_read': sdiskio.read_bytes/(1024*1024),
'io.disks_write': sdiskio.write_bytes/(1024*1024),
'io.disks_read_count': sdiskio.read_count/(1024 * 1024),
'io.disks_write_count': sdiskio.write_count/(1024 * 1024),
'io.disks_read_time': sdiskio.read_time/1000,
'io.disks_ | write_time': sdiskio.write_time/1000,
'io.disks_busy_time': sdiskio.write_time/1000,
}
}
return value_dic
if __name__ == '__main__':
print monitor()
|
opcode81/ProbCog | src/main/python/configMLN.py | Python | gpl-3.0 | 3,791 | 0.011606 | # configuration script for the MLN query & parameter learning tools
#
# If you want to use PyMLNs with the Alchemy System (http://alchemy.cs.washington.edu),
# set the path where it is installed on your system in the alchemy_versions dictionary below.
#
# Depending on your naming convention for mln and database files, you may need to change
# query_db_filemask, query_mln_filemask, learnwts_mln_filemask and learnwts_db_filemask
# to suit your needs.
#
# You can use os.getenv("VARIABLE") to retrieve the value of an environment variable
from configGUI import *
coloring = True
# --- settings for the parameter learning tool ---
learnwts_mln_filemask = "*.mln"
learnwts_db_filemask = "*.db"
def learnwts_output_filename(infile, engine, method, dbfile): # formats the output filename
if infile[:3] == "in.": infile = infile[3:]
elif infile[:4] == "wts.": infile = infile[4:]
if infile[-4:] == ".mln": infile = infile[:-4]
if dbfile[-3:] == ".db": dbfile = dbfile[:-3]
return "wts.%s%s.%s-%s.mln" % (engine, method, dbfile, infile)
learnwts_full_report = True # if True, add all the printed output to the Alchemy output file, otherwise (False) use a short report
learnwts_report_bottom = True # if True, the comment with the report is appended to the end of the file, otherwise it is inserted at the beginning
learnwts_edit_outfile_when_done = False # if True, open the learnt output file that is generated in the editor defined in configGUI
# --- settings for the query tool ---
query_mln_filemask = "*.mln"
query_db_filemask = ["*.db", "*.blogdb"]
def query_output_filename(mlnfile, dbfile):
if mlnfile[:4] == "wts.": mlnfile = mlnfile[4:]
if mlnfile[-4:] == ".mln": mlnfile = mlnfile[:-4]
if dbfile[-3:] == ".db": dbfile = dbfile[:-3]
return "%s-%s.results" % (dbfile, mlnfile)
query_edit_outfile_when_done = False # if True, open the output file that is generated by the Alchemy system in the editor defined above
keep_alchemy_conversions = True
# --- Alchemy settings ---
# define how the Alchemy system is to be used, i.e. what certain command line switches are
old_usage = {
"openWorld": "-o",
"maxSteps": "-mcmcMaxSteps",
"numChains": "-mcmcNumChains",
}
new_usage = {
"openWorld": "-ow",
"maxSteps": "-maxSteps",
"numChains": | "-numChains"
}
default_infer_usage = new_usage # the usage that is to apply when the "usage" of an Alchemy installation is not set explicitl | y in the dictionary below
# installed Alchemy versions:
# - Keys are names of the installations as they should appear in the two tools.
# - Values should be either paths to the Alchemy root or "bin" directory or
# a dictionary with at least the key "path" set to the Alchemy root or bin directory.
# The dictionary can additionally set "usage" to one of the above mappings
alchemy_versions = {
#"Alchemy - current (AMD64)": {"path": os.getenv("ALCHEMY_HOME"), "usage": new_usage},
#"Alchemy - July 2009 (AMD64)": {"path": r"/usr/wiss/jain/work/code/alchemy-2009-07-07/bin", "usage": new_usage},
#"Alchemy - June 2008 (AMD64)": {"path": r"/usr/wiss/jain/work/code/alchemy-2008-06-30/bin/amd64", "usage": new_usage},
#"Alchemy - August 2010 (AMD64)": {"path": os.getenv("ALCHEMY_HOME").replace("\\", "/"), "usage": new_usage},
#"Alchemy - June 2008 (i386)": {"path": r"/usr/wiss/jain/work/code/alchemy-2008-06-30/bin/i386", "usage": new_usage},
#"Alchemy (Win32 desktop)": {"path": r"c:\users\Domini~1\Research\code\alchemy-2010-08-23\bin", "usage": new_usage},
#"Alchemy (Win32 laptop)": {"path": r"c:\research\code\alchemy\bin", "usage": new_usage},
}
alchemy_home = os.getenv("ALCHEMY_HOME")
if alchemy_home is not None:
alchemy_versions["Alchemy"] = {"path": alchemy_home, "usage": new_usage}
|
CaterHatterPillar/wolfy | lw.py | Python | mit | 2,977 | 0.00739 | #!/bin/python
# lw.py
# Create a Lone Wolf character.
# System errors:
# 1 - Erronous arguments given.
# 2 - Command line syntax error.
# 3 - Specified file already exists.
import os
import sys
import argparse
import wolfy_util
import wolfy_config
argparser = argparse.ArgumentParser(prog='lw', description=
'Create a new wolfy character. The COMBAT'
' SKILL and ENDURANCE options may be'
' utilized if the user prefers randomizing'
' his/her numbers using the original'
' Random Number Tables.')
argparser.add_argument('name', type = str, help =
'Desired character name. This'
' will primarily act as the name of your character file'
' which you may, in turn, version control.')
argparser.add_argument('-cs', '--combatskill', type = int, help =
'Your initial COMBAT SKILL. If this option is not'
' specified, your initial COMBAT SKILL will be'
' randomized for you.')
argparser.add_argument('-en', '--endurance', type = int, help =
'Your initial ENDURANCE. If this option is not given,'
' your initial ENDURANCE will be automatically'
' randomized.')
argparser.add_argument('-ac', '--active', action='store_true', help =
'Whather or not to set the newly created character as'
' active in the wolfy repository configuration.')
args = argparser.parse_args(sys.argv[1:])
name = args.name
combat_skill = args.combatskill
endurance = args.endurance
set_as_active = args.active
filename = name.lower() + '.lw'
if(os.path.isfile(filename)):
argparser.error('There already exists a character with that name!'
' Please remove ' + filename + ' or give a different'
' character name.')
sys.exit(3)
file = open(filename, 'w')
if combat_skill==None:
combat_skill = str(wolfy_util.castD10() + 10)
print('Character COMBAT SKILL not given.'
'COMBAT SKILL is randomized to: ' + combat_skill)
if endurance==None:
endurance = str(wol | fy_util.castD10() + 20)
print('Character ENDURANCE not given. ENDURANCE is randomized to: '
+ endurance)
file.write('# ' + filename + '\n'
'# wolfy character file for Joe Dever\'s Lone Wolf series.\n'
'# See https://github.com/CaterHatterPillar/wolfy\n'
'# v. ' + wolfy_util.getVersionNumber() + '\n'
'# Character: ' + name + '\n')
print('Created character ' + name + ' with filename ' + filename + '.')
if set_as_active:
wolfy_config.setA | ctiveChar(name)
print('Set ' + name + ' as the active Lone Wolf character in the wolfy'
' repository configuration.')
file.close()
|
iafan/zing | pootle/apps/pootle_store/unit/filters.py | Python | gpl-3.0 | 5,933 | 0.000169 | # -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
# Copyright (C) Zing contributors.
#
# This file is a part of the Zing project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from django.db.models import Q
from pootle_statistics.models import SubmissionTypes
from pootle_store.constants import FUZZY, TRANSLATED, UNTRANSLATED
from pootle_store.util import SuggestionStates
class FilterNotFound(Exception):
pass
class BaseUnitFilter(object):
def __init__(self, qs, *args_, **kwargs_):
self.qs = qs
def filter(self, unit_filter):
try:
return getattr(
self, "filter_%s" % unit_filter.replace("-", "_"))()
except AttributeError:
raise FilterNotFound()
class UnitChecksFilter(BaseUnitFilter):
def __init__(self, qs, *args, **kwargs):
super(UnitChecksFilter, self).__init__(qs, *args, **kwargs)
self.checks = kwargs.get("checks")
self.category = kwargs.get("category")
def filter_checks(self):
if self.checks:
return self.qs.filter(
qualitycheck__false_positive=False,
qualitycheck__name__in=self.checks).distinct()
if self.category:
return self.qs.filter(
qualitycheck__false_positive=False,
qualitycheck__category=self.category).distinct()
return self.qs.filter(
qualitycheck__false_positive=False,
).distinct()
class UnitStateFilter(BaseUnitFilter):
"""Filter a Unit qs based on unit state"""
def filter_all(self):
return self.qs.all()
def filter_translated(self):
return self.qs.filter(state=TRANSLATED)
def filter_untranslated(self):
return self.qs.filter(state=UNTRANSLATED)
def filter_fuzzy(self):
return self.qs.filter(state=FUZZY)
def filter_incomplete(self):
return self.qs.filter(
Q(state=UNTRANSLATED) | Q(state=FUZZY))
class UnitContributionFilter(BaseUnitFilter):
"""Filter a Unit qs based on user contributions"""
def __init__(self, qs, *args, **kwargs):
super(UnitContributionFilter, self).__init__(qs, *args, **kwargs)
self.user = kwargs.get("user")
def filter_suggestions(self):
return self.qs.filter(
suggestion__state=SuggestionStates.PENDING).distinct()
def filter_user_suggestions(self):
if not self.user:
return self.qs.none()
return self.qs.filter(
suggestion__user=self.user,
suggestion__state=SuggestionStates.PENDING).distinct()
def filter_my_suggestions(self):
return self.filter_user_suggestions()
def filter_user_suggestions_accepted(self):
if not self.user:
return self.qs.none()
return self.qs.filter(
suggestion__user=self.user,
suggestion__state=SuggestionStates.ACCEPTED).distinct()
def filter_user_suggestions_rejected(self):
if not self.user:
return self.qs.none()
return self.qs.filter(
suggestion__user=self.user,
suggestion__state=SuggestionStates.REJECTED).distinct()
def filter_user_submissions(self):
if not self.user:
return self.qs.none()
return self.qs.filter(
submitted_by=self.user,
submission__type__in=SubmissionTypes.EDIT_TYPES).distinct()
def filter_my_submissions(self):
return self.filter_user_submissions()
def filter_user_submissions_overwritten(self):
if not self.user:
return self.qs.none()
qs = self.qs.filter(
submitted_by=self.user,
submission__type__in=SubmissionTypes.EDIT_TYPES)
return qs.exclude(submitted_by=self.user).distinct()
def filter_my_submissions_overwritten(self):
return self.filter_user_submissions_overwritten()
class UnitSearchFilter(object):
filters = (UnitChecksFilter, UnitStateFilter, UnitContributionFilter)
def filter(self, qs, unit_filter, *args, **kwargs):
for search_filter in self.filters:
# try each of the filter classes to find one with a method to handle
# `unit_filter`
try:
return search_filter(qs, *args, **kwargs).filter(unit_filter)
except FilterNotFound:
pass
# if none match then return the empty qs
return qs.none()
class UnitTextSearch(object):
"""Search Unit's fields for text strings
"""
search_fields = (
"source_f", "target_f", "locations",
"translator_comment", "developer_comment")
search_mappings = {
"notes": ["translator_comment", "developer_comment"],
"source": ["source_f"],
"target": ["target_f"]}
def __init__(self, qs):
self.qs = qs
def get_search_fields(self, sfields):
search_fields = set()
for field in sfields:
if field in self.search_mappings:
search_fields.update(self.search_mappings[field])
elif field in self.search_fields:
search_fields.add(field)
return search_fields
def get_words(self, text, exact):
if exact:
return [text]
return [t.strip() for t in text.split(" ") if t.strip()]
def search(self, text, sfields, exa | ct=False):
result = self.qs.none()
words = self.get_ | words(text, exact)
for k in self.get_search_fields(sfields):
result = result | self.search_field(k, words)
return result
def search_field(self, k, words):
subresult = self.qs
for word in words:
subresult = subresult.filter(
**{("%s__icontains" % k): word})
return subresult
|
AliZafar120/NetworkStimulatorSPl3 | rapidnet_visualizer/runner.py | Python | gpl-2.0 | 884 | 0.03733 | #!/usr/bin/python
import os, sys
log_file = 'py_ns3_visualizer.log'
# Remove log file
if os.path.exists (log_file):
os.system ('rm %s' % log_file)
if len (sys.argv) == 2 and sys.argv[1] == 'help': #not in [1, 2, 3]:
print 'Usage: runner [<events.log>] [<interval>]'
sys.exit (0)
if len (sys.argv) >= 2:
events_log = sys.argv[1]
else:
# Pick any link state periodic
os.system ('find traces/ -name "events.log" | head -1 > tmp.txt')
events_log = open ('tmp.txt', 'r').readline ().strip ()
os.system ('rm tmp.txt')
if not os.path.exists (ev | ents_log):
print 'File does not exist:', events_log
sys.exit (0)
if len (sys.argv) >= 3:
interval = sys.argv[2]
else:
interval = '500'
sys.argv = [sys.argv[0], events_log, 'styles/node-styles010', 'styles/link-styles010', interval]
sys.path.append ('src')
from ns3.rapidnet.rapidnet_visualizer import ma | in
main ()
|
messiasthi/Dduplicated | dduplicated/scans.py | Python | gpl-2.0 | 1,093 | 0.028362 | from os import path as opath, walk
from dduplicated import hashs
ignored = ['..']
files = {}
visited = []
def add_file(path):
global files
if not opath.islink(path):
file_hash = hashs.get_hash(path)
if file_hash in files:
if path not in files[file_hash]:
files[file_hash].append(path)
else:
files.update({file_hash: [path]})
def scan_dir(path, verbose=False):
global visited
if not opath.islink(path) and path not in ignored and path not in visited:
visited.append(path)
for (root, directories, dir_files) in walk(path, True):
for d in directories:
if verbose:
print("Analyse the directory: {}{}".format(root, d))
scan_dir(opath.join(root, d), verbose)
for f in dir_files:
if verbose:
print("Analyse the file: {}/{}".format(root, f))
add_file(opath.join(root, f))
def scan(paths, verbose=False):
for path in paths:
scan_dir(path, verbose)
duplicates = {}
# Clear files without duplicates
for (file_hash, pa | ths) in files.items():
if len(paths) > 1:
paths.sort()
duplicates[file_hash] = paths
| return duplicates
|
yuanzhao/gpdb | gpMgmt/bin/gppylib/gparray.py | Python | apache-2.0 | 94,119 | 0.009467 | #!/usr/bin/env python
#
# Copyright (c) Greenplum Inc 2008. All Rights Reserved.
#
"""
gparray.py:
Contains three classes representing configuration information of a
Greenplum array:
GpArray - The primary interface - collection of all GpDB within an array
GpDB - represents configuration information for a single dbid
Segment - collection of all GpDB with the same content id
"""
# ============================================================================
from datetime import date
import copy
import traceback
from gppylib.utils import checkNotNone, checkIsInt
from gppylib import gplog
from gppylib.db import dbconn
from gppylib.gpversion import GpVersion
from gppylib.commands.unix import *
import os
SYSTEM_FILESPACE = 3052 # oid of the system filespace
logger = gplog.get_default_logger()
DESTINATION_FILE_SPACES_DIRECTORY = "fs_directory"
ROLE_PRIMARY = 'p'
ROLE_MIRROR = 'm'
VALID_ROLES = [ROLE_PRIMARY, ROLE_MIRROR]
# Map gp_segment_configuration role values to values from gp_primarymirror.
ROLE_TO_MODE_MAP = {}
SEG_MODE_PRIMARY = "PrimarySegment"
SEG_MODE_MIRROR = "MirrorSegment"
ROLE_TO_MODE_MAP[ROLE_PRIMARY] = SEG_MODE_PRIMARY
ROLE_TO_MODE_MAP[ROLE_MIRROR] = SEG_MODE_MIRROR
STATUS_UP = 'u'
STATUS_DOWN = 'd'
VALID_STATUS = [STATUS_UP, STATUS_DOWN]
MODE_NOT_INITIALIZED = '' # no mirroring
MODE_CHANGELOGGING = 'c' # filerep logging
MODE_SYNCHRONIZED = 's' # filerep synchronized
MODE_RESYNCHRONIZATION = 'r' #
# Map gp_segment_configuration mode values to values retured from gp_primarymirror.
MODE_TO_DATA_STATE_MAP = {}
SEG_DATA_STATE_NOT_INITIALIZED = "NotInitialized"
SEG_DATA_STATE_IN_CHANGE_TRACKING = "InChangeTracking"
SEG_DATA_STATE_SYNCHRONIZED = "InSync"
SEG_DATA_STATE_IN_RESYNC = "InResync"
MODE_TO_DATA_STATE_MAP[MODE_NOT_INITIALIZED] = SEG_DATA_STATE_NOT_INITIALIZED
MODE_TO_DATA_STATE_MAP[MODE_CHANGELOGGING] = SEG_DATA_STATE_IN_CHANGE_TRACKING
MODE_TO_DATA_STATE_MAP[MODE_SYNCHRONIZED] = SEG_DATA_STATE_SYNCHRONIZED
MODE_TO_DATA_STATE_MAP[MODE_RESYNCHRONIZATION] = SEG_DATA_STATE_IN_RESYNC
# SegmentState values returned from gp_primarymirror.
SEGMENT_STATE_NOT_INITIALIZED = "NotInitialized"
SEGMENT_STATE_INITIALIZATION = "Initialization"
SEGMENT_STATE_IN_CHANGE_TRACKING_TRANSITION = "InChangeTrackingTransition"
SEGMENT_STATE_IN_RESYNCTRANSITION = "InResyncTransition"
SEGMENT_STATE_IN_SYNC_TRANSITION = "InSyncTransition"
SEGMENT_STATE_READY = "Ready"
SEGMENT_STATE_CHANGE_TRACKING_DISABLED = "ChangeTrackingDisabled"
SEGMENT_STATE_FAULT = "Fault"
SEGMENT_STATE_SHUTDOWN_BACKENDS = "ShutdownBackends"
SEGMENT_STATE_SHUTDOWN = "Shutdown"
SEGMENT_STATE_IMMEDIATE_SHUTDOWN = "ImmediateShutdown"
VALID_MODE = [
MODE_SYNCHRONIZED,
MODE_CHANGELOGGING,
MODE_RESYNCHRONIZATION,
]
MODE_LABELS = {
MODE_CHANGELOGGING: "Change Tracking",
MODE_SYNCHRONIZED: "Synchronized",
MODE_RESYNCHRONIZATION: "Resynchronizing"
}
# These are all the valid states primary/mirror pairs can
# be in. Any configuratio | n other than this will cause the
# FTS Prober to bring down the master postmaster until the
# con | figuration is corrected. Here, primary and mirror refer
# to the segments current role, not the preferred_role.
#
# The format of the tuples are:
# (<primary status>, <prmary mode>, <mirror status>, <mirror_mode>)
VALID_SEGMENT_STATES = [
(STATUS_UP, MODE_CHANGELOGGING, STATUS_DOWN, MODE_SYNCHRONIZED),
(STATUS_UP, MODE_CHANGELOGGING, STATUS_DOWN, MODE_RESYNCHRONIZATION),
(STATUS_UP, MODE_RESYNCHRONIZATION, STATUS_UP, MODE_RESYNCHRONIZATION),
(STATUS_UP, MODE_SYNCHRONIZED, STATUS_UP, MODE_SYNCHRONIZED)
]
def getDataModeLabel(mode):
return MODE_LABELS[mode]
MASTER_CONTENT_ID = -1
class InvalidSegmentConfiguration(Exception):
"""Exception raised when an invalid gparray configuration is
read from gp_segment_configuration or an attempt to save an
invalid gparray configuration is made."""
def __init__(self, array):
self.array = array
def __str__(self):
return "Invalid GpArray: %s" % self.array
# ============================================================================
# ============================================================================
class GpDB:
"""
GpDB class representing configuration information for a single dbid
within a Greenplum Array.
"""
# --------------------------------------------------------------------
def __init__(self, content, preferred_role, dbid, role, mode, status,
hostname, address, port, datadir, replicationPort):
# Todo: replace all these fields with private alternatives:
# e.g. '_content' instead of 'content'.
#
# Other code should go through class interfaces for access, this
# will allow easier modifications in the future.
self.content=content
self.preferred_role=preferred_role
self.dbid=dbid
self.role=role
self.mode=mode
self.status=status
self.hostname=hostname
self.address=address
self.port=port
self.datadir=datadir
self.replicationPort=replicationPort
# Filespace mappings for this segment
# Todo: Handle self.datadir more cleanly
# Todo: Handle initialization more cleanly
self.__filespaces = { SYSTEM_FILESPACE: datadir }
# Pending filespace creation
self.__pending_filespace = None
# Catalog directory for each database in this segment
self.catdirs = None
# Todo: Remove old dead code
self.valid = (status == 'u')
# --------------------------------------------------------------------
def __str__(self):
"""
Construct a printable string representation of a GpDB
"""
return "%s:%s:content=%s:dbid=%s:mode=%s:status=%s" % (
self.hostname,
self.datadir,
self.content,
self.dbid,
self.mode,
self.status
)
#
# Note that this is not an ideal comparison -- it uses the string representation
# for comparison
#
def __cmp__(self,other):
left = repr(self)
right = repr(other)
if left < right: return -1
elif left > right: return 1
else: return 0
#
# Moved here from system/configurationImplGpdb.py
#
def equalIgnoringModeAndStatusAndReplicationPort(self, other):
"""
Return true if none of the "core" attributes (e.g. filespace)
of two segments differ, false otherwise.
This method is used by updateSystemConfig() to know when a catalog
change will cause removing and re-adding a mirror segment.
"""
firstMode = self.getSegmentMode()
firstStatus = self.getSegmentStatus()
firstReplicationPort = self.getSegmentReplicationPort()
try:
# make the elements we don't want to compare match and see if they are then equal
self.setSegmentMode(other.getSegmentMode())
self.setSegmentStatus(other.getSegmentStatus())
self.setSegmentReplicationPort(other.getSegmentReplicationPort())
return self == other
finally:
#
# restore mode and status after comaprison
#
self.setSegmentMode(firstMode)
self.setSegmentStatus(firstStatus)
self.setSegmentReplicationPort(firstReplicationPort)
# --------------------------------------------------------------------
def __repr__(self):
"""
Construct a string representation of class, must be sufficient
information to call initFromString on the result and deterministic
so it can be used for __cmp__ comparison
"""
# Note: this doesn't currently handle "pending filespaces", but
# this is not currently required since gpfiles |
roubert/python-phonenumbers | python/phonenumbers/data/region_IO.py | Python | apache-2.0 | 1,488 | 0.009409 | """Auto-generated file, do not edit by hand. IO metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_IO = PhoneMetadata(id='IO', country_code=246, international_prefi | x='00',
general_desc=PhoneNumberDesc(national_number_pattern='3\\d{6}', possible_number_pattern='\\d{7}'),
fixed_line=PhoneNumberDesc(national_number_pattern='37\\d{5}', possible_number_pattern='\\d{7}', example_number='3709100'),
mobile=PhoneNumberDesc(national_number_pattern='38\\d{5}', possible_number_pattern='\\d{7}', example_number='3801234'),
toll_free=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
premium_rate=PhoneNumberDesc(national | _number_pattern='NA', possible_number_pattern='NA'),
shared_cost=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
personal_number=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voip=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
uan=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
number_format=[NumberFormat(pattern='(\\d{3})(\\d{4})', format='\\1 \\2')])
|
Haddy1/ClusterMDS | lib/libSMACOF_theano.py | Python | gpl-3.0 | 10,508 | 0.0059 | #!/usr/bin/python
from __future__ import print_function
import numpy as np
from sklearn.decomposition import PCA
import theano
import theano.tensor as T
class SMACOF():
"""
Multidimensional Scaling
Multidimensional Scaling using the "Scaling by MAjorizing a COmplicated Function" (SMACOF) alghorithm
Parameters
---------
data: array-like
array containing high dimensional data points
n_components: int, optional, default: 2
number of dimensions to which the data should be transformed
maxiter: int, optional, default: 10000
Maximum number of iterations of the SMACOF Alghorithm
mode: string, optional, default: 'step'
Whether to call Theano for each Iteration step or let Theano handle Iterations
'step' or 'all'
floatX: string, optional, default: 'float32'
Precision which Theano uses
'float32' or 'float64'
force_cpu: bool, optional, default: False
Forces Theano to only use the CPU in calculations.
Helpful, when gpu memory is insufficient
References
---------
Borg, I.; Groenen, P. (1997), Modern Multidimensional Scaling: theory and applications, New York: Springer-Verlag.
"""
maxiter = 10000
mode = 'step'
fstep = None
floop = None
def __init__(self, data, n_components = 2, maxiter = 10000, mode='step', floatX='float32', force_cpu=False):
theano.config.floatX = floatX
if force_cpu:
theano.config.device='cpu'
theano.config.force_device=True
self.n_components = n_components
self.maxiter = 10000
self.mode = mode
self.initTheano()
self.delta = self.fdist(data)
self.size_inv = 1.0 / data.shape[0]
def initTheano(self):
"""
compiles theano functions
"""
#function for distance matrix calculation
X = T.matrix()
dist = T.matrix()
dist = self.calcDist(X)
self.fdist = theano.function(inputs=[X], outputs = dist, allow_input_downcast=True)
#function for stress calculation
distX = T.matrix()
delta = T.matrix()
s = T.scalar()
s = self.sigma(distX, delta)
self.fsigma = theano.function(inputs=[distX, delta], outputs = s, allow_input_downcast=True)
#compile function for selected iteration mode
if self.mode == 'all':
self.init_loop()
else:
self.init_step()
def init_step(self):
"""compiles step as fstep"""
Z = T.matrix()
distZ = T.matrix()
delta = T.matrix()
size_inv = T.scalar()
X = T.matrix()
distX = T.matrix()
s = T.scalar()
s_old = T.scalar()
X, distX, s = self.step(Z, distZ, s_old, delta, size_inv)
self.fstep = theano.function(inputs=[Z, distZ, delta, size_inv], outputs=[X, distX, s], allow_input_downcast=True)
def step(self, Z, distZ, s_old, delta, size_inv):
"""
function for one iteration step
Parameters
----------
Z: array-like
X of previous iteration
distZ: array-like
Distance Matrix distX of previous Iteration
s_old: float
stress of previous Iteration
delta: array-like
Distance Matrix of high dimensional input data
size_inv: float
1 / (number of data points)
Returns
-------
X: array-like
updated low dimensional data
distX: array-like
distance matrix of updated X
s: float
updated stress between distX and delta
"""
#update X
X = self.guttmanTrans(Z, distZ, delta, size_inv)
distX = self.calcDist(X)
dist_norm = T.sqrt((X**2).sum(axis=1)).sum()
s = self.sigma(distX, delta) / dist_norm
return X, distX, s
def init_loop(self):
"""compiles loop_step as floop"""
Z = T.matrix()
distZ = T.matrix()
delta = T.matrix()
size_inv = T.scalar()
X = T.matrix()
distX = T.matrix()
s = T.scalar()
s_old = T.scalar()
eps = T.scalar()
([X, distX, s]), updates = theano.scan(fn = self.loop_step, outputs_info= [Z, distZ, s_old], non_sequences = [delta, size_inv, eps], n_steps=self.maxiter)
self.floop= theano.function(inputs=[Z, distZ, s_old, delta, size_inv, eps], outputs=[X[-1]], allow_input_downcast=True)
def loop_step(self, Z, distZ, s_old, delta, size_inv, eps):
"""
Body for Iteration loop
This provides the Body for the Iteration Loop
theano.scan uses this to create the actual Iteration loop
Parameters
----------
Z: array-like
X of previous iteration
distZ: array-like
Distance Matrix distX of previous Iteration
s_old: float
stress of previous Iteration
delta: array-like
Distance Matrix of high dimensional input data
size_inv: float
1 / (number of data points)
Returns
-------
return: list
list containing the following return values
X: array-like
updated low dimensional data
distX: array-like
distance matrix of updated X
s: float
updated stress between distX and delta
until: bool
evaluation of break condition for iteration loop
True when convergence is reached
"""
#update X
X = self.guttmanTrans(Z, distZ, delta, size_inv)
distX = self.calcDist(X)
dist_norm = T.sqrt((X**2).sum(axis=1)).sum()
s = self.sigma(distX, delta) / dist_norm
s_diff = s_old - s
condition = T.lt(s_diff, eps)
until = theano.scan_module.until(condition)
return [X, distX, s], until
def getInitValues(self, data):
"""
Provides initial values
Parameters
----------
data: array-like
high dimensional data
Returns
-------
init_best: array-like
Guess for initial low dimensional data
dist_best: array-like
Initial Distance Matrix for init_best
s_best: double
Initial Result of Cost Function sigma
"""
n_init = 4
#first guess: pca_transform
pca = PCA(self.n_components)
init_best = pca.fit(data).transform(data)
dist_best = self.fdist(init_best)
s_best = self.fsigma(dist_best, self.delta)
#try random initial values
for k in range(0,4):
init = np.random.uniform(0.000001, 10, (data.shape[0], self.n_components))
dist = self.fdist(init)
s = self.fsigma(dist, self.delta)
if s < s_best:
init_best = init
dist_best = dist
s_best = s
return init_best, dist_best, s_best
def sigma(self, distX, delta):
"""
Cost Function to be minimized
Computes the stress between high and low dimensional data,
stress: the squared difference of high- and lowdime | nsional distances
Parameters
--------
distX: array-like
| distance matrix of low dimensional X for current iteration step
delta: array-like
distance matrix of high dimensional input data
Returns
------
s: float
stress between high and low dimensional data
"""
s = T.sum(T.square(distX - delta))
return s
#calculate the distance matrix
def calcDist(self, X):
"""
Calculates Distance Matrix
Parameters
---------
X: array-like
Input Array
Returns
--------
dist: array-like
squared symmetric array containing the euclidian distances between each row of X
"""
XX = T.dot(T.sum(X**2, axis=1).dimshuffle(0,'x') , T.ones((1, T.shape(X)[0])))
YY = XX.T
|
CSF-JH/crossbarexamples | rest/needs_cleanup/python/example/auth/__init__.py | Python | apache-2.0 | 2,784 | 0.023707 | ###############################################################################
##
## Copyright 2012 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
## Tavendo WebMQ Application Key and Secret for our Web app
APPKEY = 'foobar'
APPSECRET = 'secret'
## The "user database" of our Web app
USERDB = {'joe': 'secret', 'admin': 'hoho'}
import json, uuid, sys
from flask import Flask, url_for, Response, request, session, \
render_template, redirect, escape, flash
app = Flask(__name__)
app.secret_key = str(uuid.uuid4())
import hmac, hashlib, binascii
def authSignature(authChallenge, authSecret = None):
if authSecret is None:
authSecret = ""
h = hmac.new(authSecret, authChallenge, hashlib.sha256)
sig = binascii.b2a_base64(h.digest()).strip()
return sig
@app.route('/')
def index():
if 'username' in session:
return render_template('index.html',
server = sys.argv[1],
topic = "http://example.com/simple")
else:
return redirect(url_for('login'))
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
username = request.form['username']
if not USERDB.has_key(username) or \
USERDB[username] != request.form['password'] != 'secret':
error = 'Invalid credentials'
else:
flash("You were successfully logged in as '%s'" % username)
session['username'] = username
return redirect(url_for('index'))
return render_template('login.html', error = error)
@app.route('/logout')
def logout():
session.pop('username', None)
return redirect(url_for('index'))
@app.route('/authsign', methods = ['POST'])
def authsign():
if 'username' in session:
try:
data = json.loads(request.data)
| print "Challenge:", data
if data['authkey'] == APP | KEY:
sig = authSignature(request.data, APPSECRET)
print "Signature:", sig
return sig
except Expection, e:
print e
return ""
if __name__ == "__main__":
app.run(host = "0.0.0.0", port = 8000, debug = True)
|
DailyActie/Surrogate-Model | 01-codes/tensorflow-master/tensorflow/models/rnn/ptb/ptb_word_lm.py | Python | mit | 10,596 | 0.000566 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example / benchmark for building a PTB LSTM model.
Trains the model described in:
(Zaremba, et. al.) Recurrent Neural Network Regularization
http://arxiv.org/abs/1409.2329
There are 3 supported model configurations:
===========================================
| config | epochs | train | valid | test
===========================================
| small | 13 | 37.99 | 121.39 | 115.91
| medium | 39 | 48.45 | 86.16 | 82.07
| large | 55 | 37.87 | 82.62 | 78.29
The exact results may vary depending on the random initialization.
The hyperparameters used in the model:
- init_scale - the initial scale of the weights
- learning_rate - the initial value of the learning rate
- max_grad_norm - the maximum permissible norm of the gradient
- num_layers - the number of LSTM layers
- num_steps - the number of unrolled steps of LSTM
- hidden_size - the number of LSTM units
- max_epoch - the number of epochs trained with the initial learning rate
- max_max_epoch - the total number of epochs for training
- keep_prob - the probability of keeping weights in the dropout layer
- lr_decay - the decay of the learning rate for each epoch after "max_epoch"
- batch_size - the batch size
The data required for this example is in the data/ dir of the
PTB dataset from Tomas Mikolov's webpage:
$ wget http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz
$ tar xvf simple-examples.tgz
To run:
$ python ptb_word_lm.py --data_path=simple-examples/data/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
import tensorflow as tf
from tensorflow.models.rnn.ptb import reader
flags = tf.flags
logging = tf.logging
flags.DEFINE_string(
"model", "small",
"A type of model. Possible options are: small, medium, large.")
flags.DEFINE_string("data_path", None, "data_path")
FLAGS = flags.FLAGS
class PTBModel(object):
"""The PTB model."""
def __init__(self, is_training, config):
self.batch_size = batch_size = config.batch_size
self.num_steps = num_steps = config.num_steps
size = config.hidden_size
vocab_size = config.vocab_size
self._input_data = tf.placeholder(tf.int32, [batch_size, num_steps])
self._targets = tf.placeholder(tf.int32, [batch_size, num_steps])
# Slightly better results can be obtained with forget gate biases
# initialized to 1 but the hyperparameters of the model would need to be
# different than reported in the paper.
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(size, forget_bias=0.0)
if is_training and config.keep_prob < 1:
lstm_cell = tf.nn.rnn_cell.DropoutWrapper(
lstm_cell, output_keep_prob=config.keep_prob)
cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * config.num_layers)
self._initial_state = cell.zero_state(batch_size, tf.float32)
with tf.device("/cpu:0"):
embedding = tf.get_variable("embedding", [vocab_size, size])
inputs = tf.nn.embedding_lookup(embedding, self._input_data)
if is_training and config.keep_prob < 1:
inputs = tf.nn.dropout(inputs, config.keep_prob)
# Simplified version of tensorflow.models.rnn.rnn.py's rnn().
# This builds an unrolled LSTM for tutorial purposes only.
# In general, use the rnn() or state_saving_rnn() from rnn.py.
#
# The alternative version of the code below is:
#
# from tensorflow.models.rnn import rnn
# inputs = [tf.squeeze(input_, [1])
# for input_ in tf.split(1, num_steps, inputs)]
# outputs, state = rnn.rnn(cell, inputs, initial_state=self._initial_state)
outputs = []
state = self._initial_state
with tf.variable_scope("RNN"):
for time_step in range(num_steps):
if time_step > 0: tf.get_variable_scope().reuse_variables()
(cell_output, state) = cell(inputs[:, time_step, :], state)
outputs.append(cell_output)
output = tf.reshape(tf.concat(1, outputs), [-1, size])
sof | tmax_w = tf.get_variable("softmax_w", [size, vocab_size])
| softmax_b = tf.get_variable("softmax_b", [vocab_size])
logits = tf.matmul(output, softmax_w) + softmax_b
loss = tf.nn.seq2seq.sequence_loss_by_example(
[logits],
[tf.reshape(self._targets, [-1])],
[tf.ones([batch_size * num_steps])])
self._cost = cost = tf.reduce_sum(loss) / batch_size
self._final_state = state
if not is_training:
return
self._lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars),
config.max_grad_norm)
optimizer = tf.train.GradientDescentOptimizer(self.lr)
self._train_op = optimizer.apply_gradients(zip(grads, tvars))
def assign_lr(self, session, lr_value):
session.run(tf.assign(self.lr, lr_value))
@property
def input_data(self):
return self._input_data
@property
def targets(self):
return self._targets
@property
def initial_state(self):
return self._initial_state
@property
def cost(self):
return self._cost
@property
def final_state(self):
return self._final_state
@property
def lr(self):
return self._lr
@property
def train_op(self):
return self._train_op
class SmallConfig(object):
"""Small config."""
init_scale = 0.1
learning_rate = 1.0
max_grad_norm = 5
num_layers = 2
num_steps = 20
hidden_size = 200
max_epoch = 4
max_max_epoch = 13
keep_prob = 1.0
lr_decay = 0.5
batch_size = 20
vocab_size = 10000
class MediumConfig(object):
"""Medium config."""
init_scale = 0.05
learning_rate = 1.0
max_grad_norm = 5
num_layers = 2
num_steps = 35
hidden_size = 650
max_epoch = 6
max_max_epoch = 39
keep_prob = 0.5
lr_decay = 0.8
batch_size = 20
vocab_size = 10000
class LargeConfig(object):
"""Large config."""
init_scale = 0.04
learning_rate = 1.0
max_grad_norm = 10
num_layers = 2
num_steps = 35
hidden_size = 1500
max_epoch = 14
max_max_epoch = 55
keep_prob = 0.35
lr_decay = 1 / 1.15
batch_size = 20
vocab_size = 10000
class TestConfig(object):
"""Tiny config, for testing."""
init_scale = 0.1
learning_rate = 1.0
max_grad_norm = 1
num_layers = 1
num_steps = 2
hidden_size = 2
max_epoch = 1
max_max_epoch = 1
keep_prob = 1.0
lr_decay = 0.5
batch_size = 20
vocab_size = 10000
def run_epoch(session, m, data, eval_op, verbose=False):
"""Runs the model on the given data."""
epoch_size = ((len(data) // m.batch_size) - 1) // m.num_steps
start_time = time.time()
costs = 0.0
iters = 0
state = m.initial_state.eval()
for step, (x, y) in enumerate(reader.ptb_iterator(data, m.batch_size,
m.num_steps)):
cost, state, _ = session.run([m.cost, m.final_state, eval_op],
{m.input_data: x,
m.targets: y,
m.initial_state: state})
costs += cost
|
AlanZatarain/opencamlib | scripts/issues/issue20data.py | Python | gpl-3.0 | 4,416 | 0.009284 | import ocl
trilist = []
trilist.append(ocl.Triangle(ocl.Point(2.91337, 1, 3.93857),
ocl.Point(2.89783, -7.09741e-016, 4.07462),
ocl.Point(4, -9.79685e-016, 3)))
trilist.append(ocl.Triangle(ocl.Point(2.91337, 1, 3.93857),
ocl.Point(4,0.673874, 2.92334),
ocl.Point(4, 1, 2.82843)))
trilist.append(ocl.Triangle(ocl.Point(2.91337, 1, 3.93857),
ocl.Point(4,0.339111, 2.98077),
ocl.Point(4, 0.673874, 2.92334)))
trilist.append(ocl.Triangle(ocl.Point(2.91337, 1, 3.93857), ocl.Point(4,
-9.79685e-016, 3), ocl.Point(4, 0.339111, 2.98077)))
trilist.append(ocl.Triangle(ocl.Point(1.53379, 1, 4.65268),
ocl.Point(1.521, -3.72525e-016, 4.76304), ocl.Point(2.89783, -7.09741e-016,
4.07462)))
trilist.append(ocl.Triangle(ocl.Point(1.53379, 1, 4.65268),
ocl.Point(2.89783, -7.09741e-016, 4.07462), ocl.Point(2.91337, 1, 3.93857)))
trilist.append(ocl.Triangle(ocl.Point(0, 0.669179, 4.95502),
ocl.Point(1.53379, 1, 4.65268), ocl.Point(-1.19986e-015, 1, 4.89898)))
trilist.append(ocl.Triangle(ocl.Point(0, 0.335345, 4.98874),
ocl.Point(1.53379, 1, 4.65268), ocl.Point(0, 0.669179, 4.95502)))
trilist.append(ocl.Triangle(ocl.Point(3.06152e-016, -7.4983e-032, 5),
ocl.Point(1.521, -3.72525e-016, 4.76304), ocl.Point(1.53379, 1, 4.65268)))
trilist.append(ocl.Triangle(ocl.Point(3.06152e-016, -7.4983e-032, 5),
ocl.Point(1.53379, 1, 4.65268), ocl.Point(0, 0.335345, 4.98874)))
trilist.append(ocl.Triangle(ocl.Point(2, 1, 2.44949), ocl.Point(0, 1,
0), ocl.Point(-1.19986e-015, 1, 4.89898)))
trilist.append(ocl.Triangle(ocl.Point(2, 1, 2.44949),
ocl.Point(-1.19986e-015, 1, 4.89898), ocl.Point(1.53379, 1, 4.65268)))
trilist.append(ocl.Triangle(ocl.Point(2, 1, 2.44949), ocl.Point(1.53379,
1, 4.65268), ocl.Point(2.91337, 1, 3.93857)))
trilist.append(ocl.Triangle(ocl.Point(2, 1, 2.44949), ocl.Point(2.91337,
1, 3.93857), ocl.Point(4, 1, 2.82843)))
trilist.append(ocl.Triangle(ocl.Point(2, 1, 2.44949), ocl.Point(4, 1,
2.82843), ocl.Point(4, 1, 0)))
trilist.append(ocl.Triangle(ocl.Point(2, 1, 2.44949), ocl.Point(4, 1,
0), ocl.Point(0, 1, 0)))
trilist.append(ocl.Triangle(ocl.Point(4, 0.5, 1.5), ocl.Point(4,
-9.79685e-016, 3), ocl.Point(4, 0, 0)))
trilist.append(ocl.Triangle(ocl.Point(4, 0.5, 1.5), ocl.Point(4, 0, 0),
ocl.Point(4, 1, 0)))
trilist.append(ocl.Triangle(ocl.Point(4, 0.5, 1.5), ocl.Point(4, 1, 0),
ocl.Point(4, 1, 2.82843)))
trilist.append(ocl.Triangle(ocl.Point(4, 0.5, 1.5), ocl.Point(4, 1,
2.82843), ocl.Point(4, 0.673874, 2.92334)))
trilist.append(ocl.Triangle(ocl.Point(4, 0.5, 1.5), ocl.Point(4,
0.673874, 2.92334), ocl.Point(4, 0.339111, 2.98077)))
trilist.append(ocl.Triangle(ocl.Point(4, 0.5, 1.5), ocl.Point(4,
0.339111, 2.98077), ocl.Point(4, -9.79685e-016, 3)))
trilist.append(ocl.Triangle(ocl.Point(2, 0, 2.5),
ocl.Point(3.06152e-016, -7.4983e-032, 5), ocl.Point(0, 0, 0)))
trilist.append(ocl.Triangle(ocl.Point(2, 0, 2.5), ocl.Point(4,
-9.79685e-016, 3), ocl.Point(2.89783, -7.09741e-016, 4.07462)))
trilist.append(ocl.Triangle(ocl.Point(2, 0, 2.5), ocl.Point(2.89783,
-7.09741e-016, 4.07462), ocl.Point(1.521, -3.72525e-016, 4.76304)))
trilist.append(ocl.Triangle(ocl.Point(2, 0, 2.5), ocl.Point(1.521,
-3.72525e-016, 4.76304), ocl.Point(3.06152e-016, -7.4983e-032, 5)))
trilist.append(ocl.Triangle(ocl.Point(2, 0, 2.5), ocl.Point(4, 0, 0),
ocl.Point(4, -9.79685e-016, 3)))
trilist.append(ocl.Triangle(ocl.Point(2, 0, 2.5), ocl.Point(0, 0, 0),
ocl.Point(4, 0, 0)))
trilist.append(ocl.Triangle(ocl.Point(0, 0.5, 2.5),
ocl.Point(-1.19986e-015, 1, 4.89898), ocl.Point(0, 1, 0)))
trilist.append(ocl.Triangle(ocl.Point(0, 0.5, 2.5), ocl.Point(0,
0.669179, 4.95502), ocl.Point(-1.19986e-015, 1, 4.89898)))
trilist.append(ocl.Triangle(ocl.Point(0, 0.5, 2.5), ocl.Point(0,
0.335345, 4.98874), ocl.Point(0, 0.669179, 4.95502)))
trilist.append(ocl.Triangle(ocl.Point(0, 0.5, 2.5),
ocl.Point(3.06152e-016, -7.4983e-032, 5), ocl | .Point(0, 0.335345, 4.98874)))
trilist.append(ocl.Triangle(ocl.Point(0, 0.5, 2.5), ocl.Point(0, 0, 0),
ocl. | Point(3.06152e-016, -7.4983e-032, 5)))
trilist.append(ocl.Triangle(ocl.Point(0, 0.5, 2.5), ocl.Point(0, 1, 0),
ocl.Point(0, 0, 0)))
trilist.append(ocl.Triangle(ocl.Point(4, 1, 0), ocl.Point(0, 0, 0),
ocl.Point(0, 1, 0)))
trilist.append(ocl.Triangle(ocl.Point(4, 1, 0), ocl.Point(4, 0, 0),
ocl.Point(0, 0, 0)))
|
tic-ull/portal-del-investigador | core/tests/test_user.py | Python | agpl-3.0 | 1,261 | 0 | # -*- encoding: UTF-8 -*-
#
# Copyright 2014-2015
#
# STIC-Investigación - Universidad de La Laguna (ULL) <gesinv@ull.edu.es>
#
# This file is part of Portal del Investigador.
#
# Portal del Investigador is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Portal del Investigador is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have receiv | ed a copy of the GNU Affero General Public License
# along with Portal del Investigador. If not, see
# <http://www.gnu.org/licenses/>.
#
from core.models import UserProfile
from django.test import TestCase
from factories impor | t UserFactory
class UserTests(TestCase):
def test_user_profile_created_on_user_creation(self):
user = UserFactory.create()
profile = UserProfile.objects.filter(user__username=user.username)
self.assertEqual(len(profile), 1)
|
tylertian/Openstack | openstack F/glance/glance/common/animation.py | Python | apache-2.0 | 6,628 | 0.000754 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import threading
import time
from glance.common import exception
from glance.common import utils
class UploadProgressStatus(threading.Thread):
"""
A class for showing:
1. progress;
2. rate;
3. ETA;
4. and status, e.g. active or stalled.
In order to sample the rate as closely as possible, this
implementation uses two FIFO buffers (times and bytes)
to record fine-grain transfer rate over a period of time.
"""
NUM_OF_ELEMENTS = 20 # number of element in FIFO
TIME_TO_STALL = 5 # if no data transfer longer this
# time(secs), the network will be
# considered as stalled.
REFRESH_STATUS_INTERVAL = 0.1 # time interval to refresh screen
MIN_SAMPLING_INTERVAL = 0.15 # Minimum sampling time for data
CALC_ETA_WITH_AVE_RATE = True # calc eta with average rate
def __init__(self, transfer_info):
self.current_size = 0L
self.size = transfer_info['size']
self.last_start = 0.0
self.last_bytes = 0L
self.elapsed_time = 0.0
self.total_times = 0.0
self.total_bytes = 0L
self.nelements = self.NUM_OF_ELEMENTS
self.times = [0.0, ] * self.nelements
self.bytes = [0L, ] * self.nelements
self.index = 0
self.stalled = False
self.transfer_info = transfer_info
threading.Thread.__init__(self)
def run(self):
self.start = self.last_start = time.time()
while self.current_size != self.size:
time.sleep(self.REFRESH_STATUS_INTERVAL)
bytes = self.transfer_info['so_far']
self.sampling(bytes, time.time())
self.render()
sys.stdout.write("\n")
sys.stdout.flush()
def _reset_buffer(self):
self.times = [0.0, ] * self.nelements
self.bytes = [0L, ] * self.nelements
def _update(self, bytes, time):
self.elapsed_time = time - self.last_start
transferred_bytes = bytes - self.current_size
self.last_bytes += transferred_bytes
self.current_size += transferred_bytes
if self.elapsed_time < self.MIN_SAMPLING_INTERVAL:
return False
if transferred_bytes == 0:
if self.elapsed_time > self.TIME_TO_STALL:
self.stalled = True
self._reset_buffer()
self.last_bytes = 0
return False
if self.stalled:
self.stalled = False
self.elapsed_time = 1.0
return True
def render(self):
fraction = self.current_size / float(self.size)
percentage = fraction * 100.0
str_percent = "[%3d%%]" % percentage
try:
height, width = utils.get_terminal_size()
sys.stdout.write("\b" * width)
eta = self._calc_eta()
rate = self._get_speed()
width -= len(eta)
width -= len(rate)
bar = ('=' * int((width - len(str_percent)) * fraction)
+ str_percent)
padding = ' ' * (width - len(bar))
sys.stdout.write(bar + padding + rate + eta)
sys.stdout.flush()
except (exception.Invalid, NotImplementedError):
sys.stdout.write("\b" * 6) # use the len of [%3d%%]
percent = (str_percent + ' '
if self.current_size == self.size else str_percent)
| percent += ' ' + self._get_speed() + 'ETA ' + self._calc_eta()
sys.stdout.write(percent)
sys.stdout.write("\b" * len(percent))
sys.stdout.flush()
def _get_speed(self):
speed, unit = self._calc_speed()
if speed > 0.0:
if speed >= 99.95:
rate = "%4f" % speed
elif speed >= 9.995:
rate = "%4.1f" % speed
else:
| rate = "%4.2f" % speed
return " " + rate + unit + ", "
else:
return " ?B/s "
def _calc_eta(self):
if self.stalled or self.current_size < self.size * 0.01:
return "ETA ??h ??m ??s"
if self.CALC_ETA_WITH_AVE_RATE:
eta = ((self.size - self.current_size)
* (time.time() - self.start) / self.current_size)
else:
eta = (((self.size - self.current_size)
* self.total_times) / (self.total_bytes))
eta = int(eta)
hrs = mins = secs = 0
hrs = eta / 3600
secs = eta - hrs * 3600
if secs >= 60:
mins = secs / 60
secs = secs % 60
return "ETA %dh %2dm %2ds" % (hrs, mins, secs)
def _calc_speed(self):
idx = 0
units = ('B/s', 'K/s', 'M/s', 'G/s')
total_times = self.total_times + time.time() - self.last_start
total_bytes = self.total_bytes + self.last_bytes
if self.stalled or total_times == 0:
return None, None
speed = total_bytes / float(total_times)
if speed < 1024:
idx = 0
elif speed < 1048576.: # 1024*1024
idx = 1
speed /= 1024.
elif speed < 1073741824.: # 1024*1024*1024
idx = 2
speed /= 1048576.
else:
idx = 3
speed /= 1073741824.
return speed, units[idx]
def sampling(self, bytes, time):
if not self._update(bytes, time):
return
self.total_times -= self.times[self.index]
self.total_bytes -= self.bytes[self.index]
self.times[self.index] = self.elapsed_time
self.bytes[self.index] = self.last_bytes
self.total_times += self.elapsed_time
self.total_bytes += self.last_bytes
self.last_start = time
self.last_bytes = 0L
self.index += 1
if self.index == self.nelements:
self.index = 0
|
OPENi-ict/ntua_demo | openiPrototype/openiPrototype/APIS/Products_and_Services/Card/models.py | Python | apache-2.0 | 585 | 0 |
__author__ = 'mpetyx'
from django.db import models
from OPENiapp.APIS.Context.models import OpeniContextAwareModel
class OpeniCard(Open | iContextAwareModel):
# id is missing because it is the default
url = models.TextField()
object_type = models.TextField()
service = models.TextField()
| From = models.TextField()
billing_address = models.TextField()
number = models.TextField()
card_owner_date_of_birth = models.TextField()
card_type = models.TextField()
expiration_date = models.TextField()
card_verification_number = models.TextField()
|
kikocorreoso/brython | www/src/Lib/test/test_fork1.py | Python | bsd-3-clause | 3,761 | 0.002659 | """This test checks for correct fork() behavior.
"""
import _imp as imp
import os
import signal
import sys
import threading
import time
import unittest
from test.fork_wait import ForkWait
from test.support import reap_children, get_attribute, verbose
# Skip test if fork does not exist.
get_attribute(os, 'fork')
class ForkTest(ForkWait):
def wait_impl(self, cpid):
deadline = time.monotonic() + 10.0
while time.monotonic() <= deadline:
# waitpid() shouldn't hang, but some of the buildbots seem to hang
# in the forking tests. This is an attempt to fix the problem.
spid, status = os.waitpid(cpid, os.WNOHANG)
if spid == cpid:
break
time.sleep(0.1)
self.assertEqual(spid, cpid)
self.assertEqual(status, 0, "cause = %d, exit = %d" % (status&0xff, status>>8))
def test_threaded_import_lock_fork(self):
"""Check fork() in main thread works while a subthread is doing an import"""
import_started = threading.Event()
fake_module_name = "fake test module"
partial_module = "partial"
complete_module = "complete"
def importer():
imp.acquire_lock()
sys.modules[fake_module_name] = partial_module
import_started.set()
time.sleep(0.01) # Give the other thread time to try and acquire.
sys.modules[fake_module_name] = complete_module
imp.release_lock()
t = threading.Thread(target=importer)
t.start()
import_started.wait()
pid = os.fork()
try:
# PyOS_BeforeFork should have waited for the import to complete
# before forking, so the child can recreate the import lock
# correctly, but also won't see a partially initialised module
if not pid:
m = __import__(fake_module_name)
if m == complete_module:
os._exit(0)
else:
if verbose > 1:
print("Child encountered partial module")
os._exit(1)
else:
t.join()
# Exitcode 1 means the child got a partial module (bad.) No
# exitcode (but a hang, which manifests as 'got pid 0')
# means the child deadlocked (also bad.)
self.wait_impl(pid)
finally:
try:
os.kill(pid, signal.SIGKILL)
except OSError:
pass
def test_nested_import_lock_fork(self):
"""Check fork() in main thread works while the main thread is doing an import"""
# Issue 9573: this used to trigger RuntimeError in the child process
def fork_with_import_lock(level):
| release = 0
in_child = False
try:
try:
for i in range(level):
imp.acquire_ | lock()
release += 1
pid = os.fork()
in_child = not pid
finally:
for i in range(release):
imp.release_lock()
except RuntimeError:
if in_child:
if verbose > 1:
print("RuntimeError in child")
os._exit(1)
raise
if in_child:
os._exit(0)
self.wait_impl(pid)
# Check this works with various levels of nested
# import in the main thread
for level in range(5):
fork_with_import_lock(level)
def tearDownModule():
reap_children()
if __name__ == "__main__":
unittest.main()
|
twstrike/tor_guardsim | lib/main.py | Python | cc0-1.0 | 1,345 | 0.005948 | #!/usr/bin/python
from __future__ import pri | nt_function
from py3hax import *
import tornet
import simtime
import client
def trivialSimulation():
net = tornet.Network(100)
# Decorate the network.
# Uncomment one or two of these at a time, kthx!
#net = tornet.FascistNetwork(net)
#net = tornet.FlakyNetwork(net)
#net = tornet.EvilFilteringNetwork(net)
| #net = tornet.SniperNetwork(net)
c = client.Client(net, client.ClientParams())
ok = 0
bad = 0
for period in xrange(30): # one hour each
for subperiod in xrange(30): # two minutes each
if (subperiod % 10) == 0:
# nodes left and arrived
net.do_churn()
# nodes went up and down
net.updateRunning()
for attempts in xrange(6): # 20 sec each
# actually have the client act.
if c.buildCircuit():
ok += 1
else:
bad += 1
# time passed
simtime.advanceTime(20)
# new consensus
c.updateGuardLists()
print("Successful client circuits (total): %d (%d)" % (ok, (ok + bad)))
print("Percentage of successful circuilts: %f%%"
% ((ok / float(ok + bad)) * 100.0))
if __name__ == '__main__':
trivialSimulation()
|
tparks5/tor-stem | stem/manual.py | Python | lgpl-3.0 | 20,582 | 0.009523 | # Copyright 2015-2017, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Information available about Tor from `its manual
<https://www.torproject.org/docs/tor-manual.html.en>`_. This provides three
methods of getting this information...
* :func:`~stem.manual.Manual.from_cache` provides manual content bundled with
Stem. This is the fastest and most reliable method but only as up-to-date as
Stem's release.
* :func:`~stem.manual.Manual.from_man` reads Tor's local man page for
information about it.
* :func:`~stem.manual.Manual.from_remote` fetches the latest manual information
remotely. This is the slowest and least reliable method but provides the most
recent information about Tor.
Manual information includes arguments, signals, and probably most usefully the
torrc configuration options. For example, say we want a little script that t | old
us what our torrc options do...
.. literalinclude:: /_static/example/manual_config_options.py
:language: python
|
.. image:: /_static/manual_output.png
|
**Module Overview:**
::
is_important - In | dicates if a configuration option is of particularly common importance.
download_man_page - Downloads tor's latest man page.
Manual - Information about Tor available from its manual.
| |- from_cache - Provides manual information cached with Stem.
| |- from_man - Retrieves manual information from its man page.
| +- from_remote - Retrieves manual information remotely from tor's latest manual.
|
+- save - writes the manual contents to a given location
.. versionadded:: 1.5.0
"""
import os
import shutil
import sys
import tempfile
import stem.prereq
import stem.util.conf
import stem.util.enum
import stem.util.log
import stem.util.system
from stem.util import _hash_attr
try:
# added in python 2.7
from collections import OrderedDict
except ImportError:
from stem.util.ordereddict import OrderedDict
try:
# added in python 3.2
from functools import lru_cache
except ImportError:
from stem.util.lru_cache import lru_cache
try:
# account for urllib's change between python 2.x and 3.x
import urllib.request as urllib
except ImportError:
import urllib2 as urllib
Category = stem.util.enum.Enum('GENERAL', 'CLIENT', 'RELAY', 'DIRECTORY', 'AUTHORITY', 'HIDDEN_SERVICE', 'TESTING', 'UNKNOWN')
GITWEB_MANUAL_URL = 'https://gitweb.torproject.org/tor.git/plain/doc/tor.1.txt'
CACHE_PATH = os.path.join(os.path.dirname(__file__), 'cached_tor_manual.cfg')
CATEGORY_SECTIONS = {
'GENERAL OPTIONS': Category.GENERAL,
'CLIENT OPTIONS': Category.CLIENT,
'SERVER OPTIONS': Category.RELAY,
'DIRECTORY SERVER OPTIONS': Category.DIRECTORY,
'DIRECTORY AUTHORITY SERVER OPTIONS': Category.AUTHORITY,
'HIDDEN SERVICE OPTIONS': Category.HIDDEN_SERVICE,
'TESTING NETWORK OPTIONS': Category.TESTING,
}
class ConfigOption(object):
"""
Tor configuration attribute found in its torrc.
:var str name: name of the configuration option
:var stem.manual.Category category: category the config option was listed
under, this is Category.UNKNOWN if we didn't recognize the category
:var str usage: arguments accepted by the option
:var str summary: brief description of what the option does
:var str description: longer manual description with details
"""
def __init__(self, name, category = Category.UNKNOWN, usage = '', summary = '', description = ''):
self.name = name
self.category = category
self.usage = usage
self.summary = summary
self.description = description
def __hash__(self):
return _hash_attr(self, 'name', 'category', 'usage', 'summary', 'description')
def __eq__(self, other):
return hash(self) == hash(other) if isinstance(other, ConfigOption) else False
def __ne__(self, other):
return not self == other
@lru_cache()
def _config(lowercase = True):
"""
Provides a dictionary for our settings.cfg. This has a couple categories...
* manual.important (list) - configuration options considered to be important
* manual.summary.* (str) - summary descriptions of config options
:param bool lowercase: uses lowercase keys if **True** to allow for case
insensitive lookups
"""
config = stem.util.conf.Config()
config_path = os.path.join(os.path.dirname(__file__), 'settings.cfg')
try:
config.load(config_path)
config_dict = dict([(key.lower() if lowercase else key, config.get_value(key)) for key in config.keys() if key.startswith('manual.summary.')])
config_dict['manual.important'] = [name.lower() if lowercase else name for name in config.get_value('manual.important', [], multiple = True)]
return config_dict
except Exception as exc:
stem.util.log.warn("BUG: stem failed to load its internal manual information from '%s': %s" % (config_path, exc))
return {}
def _manual_differences(previous_manual, new_manual):
"""
Provides a description of how two manuals differ.
"""
lines = []
for attr in ('name', 'synopsis', 'description', 'commandline_options', 'signals', 'files', 'config_options'):
previous_attr = getattr(previous_manual, attr)
new_attr = getattr(new_manual, attr)
if previous_attr != new_attr:
lines.append("* Manual's %s attribute changed\n" % attr)
if attr in ('name', 'synopsis', 'description'):
lines.append(' Previously...\n\n%s\n' % previous_attr)
lines.append(' Updating to...\n\n%s' % new_attr)
elif attr == 'config_options':
for config_name, config_attr in new_attr.items():
previous = previous_attr.get(config_name)
if previous is None:
lines.append(' adding new config option => %s' % config_name)
elif config_attr != previous:
for attr in ('name', 'category', 'usage', 'summary', 'description'):
if getattr(config_attr, attr) != getattr(previous, attr):
lines.append(' modified %s (%s) => %s' % (config_name, attr, getattr(config_attr, attr)))
for config_name in set(previous_attr.keys()).difference(new_attr.keys()):
lines.append(' removing config option => %s' % config_name)
else:
added_items = set(new_attr.items()).difference(previous_attr.items())
removed_items = set(previous_attr.items()).difference(new_attr.items())
for added_item in added_items:
lines.append(' adding %s => %s' % added_item)
for removed_item in removed_items:
lines.append(' removing %s => %s' % removed_item)
lines.append('\n')
return '\n'.join(lines)
def is_important(option):
"""
Indicates if a configuration option of particularly common importance or not.
:param str option: tor configuration option to check
:returns: **bool** that's **True** if this is an important option and
**False** otherwise
"""
return option.lower() in _config()['manual.important']
def download_man_page(path = None, file_handle = None, url = GITWEB_MANUAL_URL, timeout = 20):
"""
Downloads tor's latest man page from `gitweb.torproject.org
<https://gitweb.torproject.org/tor.git/plain/doc/tor.1.txt>`_. This method is
both slow and unreliable - please see the warnings on
:func:`~stem.manual.Manual.from_remote`.
:param str path: path to save tor's man page to
:param file file_handle: file handler to save tor's man page to
:param str url: url to download tor's asciidoc manual from
:param int timeout: seconds to wait before timing out the request
:raises: **IOError** if unable to retrieve the manual
"""
if not path and not file_handle:
raise ValueError("Either the path or file_handle we're saving to must be provided")
elif not stem.util.system.is_available('a2x'):
raise IOError('We require a2x from asciidoc to provide a man page')
dirpath = tempfile.mkdtemp()
asciidoc_path = os.path.join(dirpath, 'tor.1.txt')
manual_path = os.path.join(dirpath, 'tor.1')
try:
try:
with open(asciidoc_path, 'wb') as asciidoc_file:
request = urllib.urlopen(url, timeout = timeout)
shutil.copyfileobj(request, asciidoc_file)
except:
exc = sys.exc_info()[1]
raise IOError("Unab |
drakedevel/pass | passgen/__init__.py | Python | bsd-2-clause | 2,565 | 0.00039 | #!/usr/bin/env python
"""Dictionary-based password generator.
Usage: pass.py [options]
Options:
-h --help Show this help text
-d --dictionary=<path> Specify a non-default dictionary
-n --length=N Specify number of words to use [default: 4]
-v --verbose Print entropy estimate
--complex Bypass complexity requirements
--truncate=SIZE Truncate dictionary to specified size
--uncontrolled Generate a naively-random password from the list
The default mode ensures words are spread throughout the list, slightly
reducing absolute entropy but generally improving password memorability if the
dictionary is ordered by frequency.
"""
import math
import os
from docopt import docopt
from secrets import SystemRandom
def main():
# Normalize arguments
args = docopt( | __doc__)
word_count = int(args['--length'])
# Read and transform dictionary file
if args['--dictionary']:
dict_path = args['--dictionary']
else:
dict_path = os.path.join(os.path.dirname(__file__), 'words.txt')
dictionary = [w for w in | [l.strip() for l in open(dict_path)] if w]
if args['--truncate']:
dictionary = dictionary[:int(args['--truncate'])]
elif not args['--dictionary']:
# Default truncation for built-in dictionary
dictionary = dictionary[:8192]
# Basic entropy calculation
if args['--uncontrolled']:
entropy = math.log(math.pow(len(dictionary), word_count), 2)
else:
batch_size = len(dictionary) // word_count
entropy = math.log(math.pow(batch_size, word_count) *
math.factorial(word_count), 2)
if args['--verbose']:
print("Pessimistic password entropy: %.1f bits" % entropy)
print("Approximate time to crack at 20k/s: %.1f days" %
(math.pow(2, entropy) / 20000 / 60 / 60 / 24))
# Generate password
rng = SystemRandom()
if args['--uncontrolled']:
# Select random words
words = [rng.choice(dictionary) for i in range(word_count)]
else:
# Generate batches in random order
batches = [dictionary[i*batch_size:(i+1)*batch_size]
for i in range(word_count)]
rng.shuffle(batches)
# Select word from each batch
words = [rng.choice(batches[i]) for i in range(word_count)]
# Reveal to user
print(" ".join(words))
if args['--complex']:
print("Complexified: %s1." % "".join(words).capitalize())
if __name__ == '__main__':
main()
|
AlexKuhnle/ShapeWorld | models/agreement/always_true.py | Python | mit | 371 | 0.005391 | from models.TFMacros.tf_macros import *
def model(model, inputs, dataset_parameters):
agreement = (
| Input(name='world', shape=dataset_parameters['world_shape'], tensor=inputs.get('wor | ld')) >>
Constant(value=True, dtype='float') >>
Binary(name='agreement', binary_transform=False, tensor=inputs.get('agreement'))
)
return agreement
|
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.3/Lib/plat-mac/Carbon/QuickTime.py | Python | mit | 40,695 | 0.000737 | # Generated from 'Movies.h'
def FOUR_CHAR_CODE(x): return x
MovieFileType = FOUR_CHAR_CODE('MooV')
MovieScrapType = FOUR_CHAR_CODE('moov')
MovieResourceType = FOUR_CHAR_CODE('moov')
MovieForwardPointerResourceType = FOUR_CHAR_CODE('fore')
MovieBackwardPointerResourceType = FOUR_CHAR_CODE('back')
MovieResourceAtomType = FOUR_CHAR_CODE('moov')
MovieDataAtomType = FOUR_CHAR_CODE('mdat')
FreeAtomType = FOUR_CHAR_CODE('free')
SkipAtomType = FOUR_CHAR_CODE('skip')
WideAtomPlaceholderType = FOUR_CHAR_CODE('wide')
MediaHandlerType = FOUR_CHAR_CODE('mhlr')
DataHandlerType = FOUR_CHAR_CODE('dhlr')
VideoMediaType = FOUR_CHAR_CODE('vide')
SoundMediaType = FOUR_CHAR_CODE('soun')
TextMediaType = FOUR_CHAR_CODE('text')
BaseMediaType = FOUR_CHAR_CODE('gnrc')
MPEGMediaType = FOUR_CHAR_CODE('MPEG')
MusicMediaType = FOUR_CHAR_CODE('musi')
TimeCodeMediaType = FOUR_CHAR_CODE('tmcd')
SpriteMediaType = FOUR_CHAR_CODE('sprt')
FlashMediaType = FOUR_CHAR_CODE('flsh')
MovieMediaType = FOUR_CHAR_CODE('moov')
TweenMediaType = FOUR_CHAR_CODE('twen')
ThreeDeeMediaType = FOUR_CHAR_CODE('qd3d')
HandleDataHandlerSubType = FOUR_CHAR_CODE('hndl')
PointerDataHandlerSubType = FOUR_CHAR_CODE('ptr ')
NullDataHandlerSubType = FOUR_CHAR_CODE('null')
ResourceDataHandlerSubType = FOUR_CHAR_CODE('rsrc')
URLDataHandlerSubType = FOUR_CHAR_CODE('url ')
WiredActionHandlerType = FOUR_CHAR_CODE('wire')
VisualMediaCharacteristic = FOUR_CHAR_CODE('eyes')
AudioMediaCharacteristic = FOUR_CHAR_CODE('ears')
kCharacteristicCanSendVideo = FOUR_CHAR_CODE('vsnd')
kCharacteristicProvidesActions = FOUR_CHAR_CODE('actn')
kCharacteristicNonLinear = FOUR_CHAR_CODE('nonl')
kCharacteristicCanStep = FOUR_CHAR_CODE('step')
kCharacteristicHasNoDuration = FOUR_CHAR_CODE('noti')
kUserDataMovieControllerType = FOUR_CHAR_CODE('ctyp')
kUserDataName = FOUR_CHAR_CODE('name')
kUserDataTextAlbum = FOUR_CHAR_CODE('\xa9alb')
kUserDataTextArtist = FOUR_CHAR_CODE('\xa9ART')
kUserDataTextAuthor = FOUR_CHAR_CODE('\xa9aut')
kUserDataTextChapter = FOUR_CHAR_CODE('\xa9chp')
kUserDataTextComment = FOUR_CHAR_CODE('\xa9cmt')
kUserDataTextComposer = FOUR_CHAR_CODE('\xa9com')
kUserDataTextCopyright = FOUR_CHAR_CODE('\xa9cpy')
kUserDataTextCreationDate = FOUR_CHAR_CODE('\xa9day')
kUserDataTextDescription = FOUR_CHAR_CODE('\xa9des')
kUserDataTextDirector = FOUR_CHAR_CODE('\xa9dir')
kUserDataTextDisclaimer = FOUR_CHAR_CODE('\xa9dis')
kUserDataTextEncodedBy = FOUR_CHAR_CODE('\xa9enc')
kUserDataTextFullName = FOUR_CHAR_CODE('\xa9nam')
kUserDataTextGenre = FOUR_CHAR_CODE('\xa9gen')
kUserDataTextHostComputer = FOUR_CHAR_CODE('\xa9hst')
kUserDataTextInformation = FOUR_CHAR_CODE('\xa9inf')
kUserDataTextKeywords = FOUR_CHAR_CODE('\xa9key')
kUserDataTextMake = FOUR_CHAR_CODE('\xa9mak')
kUserDataTextModel = FOUR_CHAR_CODE('\xa9mod')
kUserDataTextOriginalArtist = FOUR_CHAR_CODE('\xa9ope')
kUserDataTextOriginalFormat = FOUR_CHAR_CODE('\xa9fmt')
kUserDataTextOriginalSource = FOUR_CHAR_CODE('\xa9src')
kUserDataTextPerformers = FOUR_CHAR_CODE('\xa9prf')
kUserDataTextProducer = FOUR_CHAR_CODE('\xa9prd')
kUserDataTextProduct = FOUR_CHAR_CODE('\xa9PRD')
kUserDataTextSoftware = FOUR_CHAR_CODE('\xa9swr')
kUserDataTextSpecialPlaybackRequirements = FOUR_CHAR_CODE('\xa9req')
kUserDataTextTrack = FOUR_CHAR_CODE('\xa9trk')
kUserDataTextWarning = FOUR_CHAR_CODE('\xa9wrn')
kUserDataTextWriter = FOUR_CHAR_CODE('\xa9wrt')
kUserDataTextURLLink = FOUR_CHAR_CODE('\xa9url')
kUserDataTextEditDate1 = FOUR_CHAR_CODE('\xa9ed1')
kUserDataUnicodeBit = 1L << 7
DoTheRightThing = 0
kQTNetworkStatusNoNetwork = -2
kQTNetworkStatusUncertain = -1
kQTNetworkStatusNotConnected = 0
kQTNetworkStatusConnected = 1
kMusicFlagDontPlay2Soft = 1L << 0
kMusicFlagDontSlaveToMovie = 1L << 1
dfDontDisplay = 1 << 0
dfDontAutoScale = 1 << 1
dfClipToTextBox = 1 << 2
dfUseMovieBGColor = 1 << 3
dfShrinkTextBoxToFit = 1 << 4
dfScrollIn = 1 << 5
dfScrollOut = 1 << 6
dfHorizScroll = 1 << 7
dfReverseScroll = 1 << 8
dfContinuousScroll = 1 << 9
dfFlowHoriz = 1 << 10
dfContinuousKaraoke = 1 << 11
dfDropShadow = 1 << 12
dfAntiAlias = 1 << 13
dfKeyedText = 1 << 14
dfInverseHilite = 1 << 15
dfTextColorHilite = 1 << 16
searchTextDontGoToFoundTime = 1L << 16
searchTextDontHiliteFoundText = 1L << 17
searchTextOneTrackOnly = 1L << 18
searchTextEnabledTracksOnly = 1L << 19
kTextTextHandle = 1
kTextTextPtr = 2
kTextTEStyle = 3
kTextSelection = 4
kTextBackColor = 5
kTextForeColor = 6
kTextFace = 7
kTextFont = 8
kTextSize = 9
kTextAlignment = 10
kTextHilite = 11
kTextDropShadow = 12
kTextDisplayFlags = 13
kTextScroll = 14
kTextRelativeScroll = 15
kTextHyperTextFace = 16
kTextHyperTextColor = 17
kTextKeyEntry = 18
kTextMouseDown = 19
kTextTextBox = 20
kTextEditState = 21
kTextLength = 22
k3DMediaRendererEntry = FOUR_CHAR_CODE('rend')
k3DMediaRendererName = FOUR_CHAR_CODE('name')
k3DMediaRendererCode = FOUR_CHAR_CODE('rcod')
movieProgressOpen = 0
movieProgressUpdatePercent = 1
movieProgressClose = 2
progressOpFlatten = 1
progressOpInsertTrackSegment = 2
progressOpInsertMovieSegment = 3
progressOpPaste = 4
progressOpAddMovieSelection = 5
progressOpCopy = 6
progressOpCut = 7
progressOpLoadMovieIntoRam = 8
progressOpLoadTrackIntoRam = 9
progressOpLoadMediaIntoRam = 10
progressOpImportMovie = 11
progressOpExportMovie = 12
mediaQualityDraft = 0x0000
mediaQualityNormal = 0x0040
mediaQualityBetter = 0x0080
mediaQualityBest = 0x00C0
kQTEventPayloadIsQTList = 1L << 0
kActionMovieSetVolume = 1024
kActionMovieSetRate = 1025
kActionMovieSetLoopingFlags = 1026
kActionMovieGoToTime = 1027
kActionMovieGoToTimeByName = 1028
kActionMovieGoToBeginning = 1029
kActionMovieGoToEnd = 1030
kActionMovieStepForward = 1031
kActionMovieStepBackward = 1032
kActionMovieSetSelection = 1033
kActionMovieSetSelectionByName = 1034
kActionMoviePlaySelection = 1035
kActionMovieSetLanguage = 1036
kActionMovieChanged = 1037
kActionMovieRestartAtTime = 1038
kActionTrackSetVolume = 2048
kActionTrackSetBalance = 2049
kActionTrackSetEnabled = 2050
kActionTrackSetMatrix = 2051
kActionTrackSetLayer = 2052
kActionTrackSetClip = 2053
kActionTrackSetCursor = 2054
kActionTrackSetGraphicsMode = 2055
kActionTrackSetIdleFrequency = 2056
kActionTrackSetBassTreble = 2057
kActionSpriteSetMatrix = 3072
kActionSpriteSetImageIndex = 3073
kActionSpriteSetVisible = 3074
kActionSpriteSetLayer = 3075
kActionSpriteSetGraphicsMode = 3076
kActionSpritePassMouseToCodec = 3078
kActionSpriteClickOnCodec = 3079
kActionSpriteTranslate = 3080
kActionSpriteScale = 3081
kActionSpriteRotate = 3082
kActionSpriteStretch = 3083
kActionQTVRSetPanAngle = 4096
kActionQTVRSetTiltAngle = 4097
kActionQTVRSetFieldOfView = 4098
kActionQTVRShowDefaultView = 4099
kActionQTVRGoToNodeID = 4100
kActionQTVREnableHotSpot = 4101
kActionQTVRShowHotSpots = 4102
kActionQTVRTranslateObject = 4103
kActionMusicPlayNote = 5120
kActionMusicSetController = 5121
kActionCase = 6144
kActionWhile = 6145
kActionGoToURL = 6146
kActionSendQTEventToSprite = 6147
kActionDebugStr = 6148
kActionPushCurrentTime = 6149
kActionPushCurrentTimeWithLabel = 6150
kActionPopAndGotoTopTime = 6151
kActionPopAndGotoLabeledTime = 6152
| kActionStatusString = 6153
kActionSendQTEventToTrackObject = 6154
kActionAddChannelSubscription = 6155
kAction | RemoveChannelSubscription = 6156
kActionOpenCustomActionHandler = 6157
kActionDoScript = 6158
kActionDoCompressedActions = 6159
kActionSendAppMessage = 6160
kActionLoadComponent = 6161
kActionSetFocus = 6162
kActionDontPassKeyEvent = 6163
kActionSpriteTrackSetVariable = 7168
kActionSpriteTrackNewSprite = 7169
kActionSpriteTrackDisposeSprite = 7170
kActionSpriteTrackSetVariableToString = 7171
kActionSpriteTrackConcatVariables = 7172
kActionSpriteTrackSetVariableToMovieURL = 7173
kActionSpriteTrackSetVariableToMovieBaseURL = 7174
kActionApplicationNumberAndString = 8192
kActionQD3DNamedObjectTranslateTo = 9216
kActionQD3DNamedObjectScaleTo = 9217
kActionQD3DNamedObjectRotateTo = 9218
kActionFlashTrackSetPan = 10240
kActionFlashTrackSetZoom = 10241
kActionFlashTrackSetZoomRect = 10242
kActionFlashTrackGotoFrameNumber = 10243
kActionFlashTrackGotoFrameLabel = 10244
kActionFlashTrackSetFlashVariable = 10245
kActionFlashTrackDoButtonActions = 10246
kActionMovieTrackAddChildMovie = 11264 |
ezequielpereira/Time-Line | libs64/wx/lib/agw/flatmenu.py | Python | gpl-3.0 | 215,487 | 0.007267 | # --------------------------------------------------------------------------------- #
# FLATMENU wxPython IMPLEMENTATION
#
# Andrea Gavana, @ 03 Nov 2006
# Latest Revision: 21 Sep 2010, 23.00 GMT
#
# TODO List
#
# 1. Work is still in progress, so other functionalities may be added in the future;
# 2. No shadows under MAC, but it may be possible to create them using Carbon.
#
#
# For All Kind Of Problems, Requests Of Enhancements And Bug Reports, Please
# Write To Me At:
#
# gavana@kpo.kz
# andrea.gavana@gmail.com
#
# Or, Obviously, To The wxPython Mailing List!!!
#
#
# End Of Comments
# --------------------------------------------------------------------------------- #
"""
FlatMenu is a generic menu implementation.
Description
===========
FlatMenu, like the name implies, it is a generic menu implementation.
I tried to provide a full functionality for menus, menubar and toolbar.
FlatMenu supports the following features:
- Fires all the events (UI & Cmd);
- Check items;
- Separators;
- Enabled / Disabled menu items;
- Images on items;
- Toolbar support, with images and separators;
- Controls in toolbar (work in progress);
- Toolbar tools tooltips (done: thanks to Peter Kort);
- Accelerators for menus;
- Accelerators for menubar;
- Radio items in menus;
- Integration with AUI;
- Scrolling when menu is too big to fit the screen;
- Menu navigation with keyboard;
- Drop down arrow button to the right of the menu, it always contains the
"Customize" option, which will popup an options dialog. The dialog has the
following abilities:
(a) Ability to add/remove menus;
(b) Select different colour schemes for the menu bar / toolbar;
(c) Control various options, such as: colour for highlight menu item, draw
border around menus (classic look only);
(d) Toolbar floating appearance.
- Allows user to specify grey bitmap for disabled menus/toolbar tools;
- If no grey bitmap is provided, it generates one from the existing bitmap;
- Hidden toolbar items / menu bar items - will appear in a small popmenu
to the right if they are hidden;
- 4 different colour schemes for the menu bar (more can easily added);
- Scrolling is available if the menu height is greater than the screen height;
- Context menus for menu items;
- Show/hide the drop down arrow which allows the customization of FlatMenu;
- Multiple columns menu window;
- Tooltips for menus and toolbar items on a `wx.StatusBar` (if present);
- Transparency (alpha channel) for menu windows (for platforms supporting it);
- First attempt in adding controls to FlatToolbar;
- Added a MiniBar (thanks to Vladiuz);
- Added `wx.ToolBar` methods AddCheckTool/AddRadioTool (thanks to Vladiuz).
Supported Platforms
===================
FlatMenu v0.8 has been tested on the following platforms:
* Windows (Windows XP);
* Linux Ubuntu (Dapper 6.06)
v0.9.* has been tested on
* Windows (Windows XP, Vista);
Window Styles
=============
This class supports the following window styles:
========================= =========== ==================================================
Window Styles Hex Value Description
========================= =========== ==================================================
``FM_OPT_IS_LCD`` 0x1 Use this style if your computer uses a LCD screen.
``FM_OPT_MINIBAR`` 0x2 Use this if you plan to use the toolbar only.
``FM_OPT_SHOW_CUSTOMIZE`` 0x4 Show "customize link" in the `More` menu, you will need to write your own handler. See demo.
``FM_OPT_SHOW_TOOLBAR`` 0x8 Set this option is you are planning to use the toolbar.
========================= =========== ==================================================
Events Processing
=================
This class | processes the following events:
================================= ==================================================
Event Name Description
================================= ==================================================
``EVT_FLAT_MENU_DISMISSED`` Used internally.
``EVT_FLAT_MENU_ITEM_MOUSE_OUT`` Fires an event when the mouse leaves a `FlatMenuItem`.
``EVT_FLAT_MENU_ITEM_MOUSE_OVER`` Fires an event whe | n the mouse enters a `FlatMenuItem`.
``EVT_FLAT_MENU_SELECTED`` Fires the `wx.EVT_MENU` event for `FlatMenu`.
================================= ==================================================
License And Version
===================
FlatMenu is distributed under the wxPython license.
Latest Revision: Andrea Gavana @ 21 Sep 2010, 23.00 GMT
Version 0.9.6
"""
__docformat__ = "epytext"
__version__ = "0.9.6"
import wx
import math
import cStringIO
import wx.lib.colourutils as colourutils
from fmcustomizedlg import FMCustomizeDlg
from artmanager import ArtManager, DCSaver
from fmresources import *
# FlatMenu styles
FM_OPT_IS_LCD = 1
""" Use this style if your computer uses a LCD screen. """
FM_OPT_MINIBAR = 2
""" Use this if you plan to use the toolbar only. """
FM_OPT_SHOW_CUSTOMIZE = 4
""" Show "customize link" in the `More` menu, you will need to write your own handler. See demo. """
FM_OPT_SHOW_TOOLBAR = 8
""" Set this option is you are planning to use the toolbar. """
# Some checking to see if we can draw shadows behind the popup menus
# at least on Windows. *REQUIRES* Mark Hammond's win32all extensions
# and ctypes, on Windows obviouly. Mac and GTK have no shadows under
# the menus, and it has been reported that shadows don't work well
# on Windows 2000 and previous.
_libimported = None
_DELAY = 5000
if wx.Platform == "__WXMSW__":
osVersion = wx.GetOsVersion()
# Shadows behind menus are supported only in XP
if osVersion[1] == 5 and osVersion[2] == 1:
try:
import win32api
import win32gui
_libimported = "MH"
except:
try:
import ctypes
_libimported = "ctypes"
except:
pass
else:
_libimported = None
# Simple hack, but I don't know how to make it work on Mac
# I don't have Mac ;-)
#if wx.Platform == "__WXMAC__":
# try:
# import ctypes
# _carbon_dll = ctypes.cdll.LoadLibrary(r'/System/Frameworks/Carbon.framework/Carbon')
# except:
# _carbon_dll = None
# FIXME: No way to get shadows on Windows with the original code...
# May anyone share some suggestion on how to make it work??
# Right now I am using win32api to create shadows behind wx.PopupWindow,
# but this will result in *all* the popup windows in an application
# to have shadows behind them, even the user defined wx.PopupWindow
# that do not derive from FlatMenu.
import wx.aui as AUI
AuiPaneInfo = AUI.AuiPaneInfo
try:
import aui as PyAUI
PyAuiPaneInfo = PyAUI.AuiPaneInfo
except ImportError:
pass
# Check for the new method in 2.7 (not present in 2.6.3.3)
if wx.VERSION_STRING < "2.7":
wx.Rect.Contains = lambda self, point: wx.Rect.Inside(self, point)
wxEVT_FLAT_MENU_DISMISSED = wx.NewEventType()
wxEVT_FLAT_MENU_SELECTED = wx.wxEVT_COMMAND_MENU_SELECTED
wxEVT_FLAT_MENU_ITEM_MOUSE_OVER = wx.NewEventType()
wxEVT_FLAT_MENU_ITEM_MOUSE_OUT = wx.NewEventType()
EVT_FLAT_MENU_DISMISSED = wx.PyEventBinder(wxEVT_FLAT_MENU_DISMISSED, 1)
""" Used internally. """
EVT_FLAT_MENU_SELECTED = wx.PyEventBinder(wxEVT_FLAT_MENU_SELECTED, 2)
""" Fires the wx.EVT_MENU event for `FlatMenu`. """
EVT_FLAT_MENU_ITEM_MOUSE_OUT = wx.PyEventBinder(wxEVT_FLAT_MENU_ITEM_MOUSE_OUT, 1)
""" Fires an event when the mouse leaves a `FlatMenuItem`. """
EVT_FLAT_MENU_ITEM_MOUSE_OVER = wx.PyEventBinder(wxEVT_FLAT_MENU_ITEM_MOUSE_OVER, 1)
""" Fires an event when the mouse enters a `FlatMenuItem`. """
def GetAccelIndex(label):
"""
Returns the mnemonic index of the label and the label stripped of the ampersand mnemonic
(e.g. 'lab&el' ==> will result in 3 and labelOnly = label).
:param `label`: a string containining an ampersand.
"""
indexAccel = 0
while True:
indexAccel = label.find("&", indexAccel)
if indexAccel == -1:
return indexAccel, label
if label[indexAccel:indexAccel+2] == "&&":
label = |
globality-corp/microcosm-pubsub | microcosm_pubsub/tests/sentry_fixture.py | Python | apache-2.0 | 4,744 | 0.005902 | sample_event = {
'level': 'error',
'exception': {
'values': [
{
'module': None, 'type': 'TypeError', 'value': 'Testing stuff', 'mechanism': None,
'stacktrace': {
'frames': [
{
'filename': 'microcosm_pubsub/result.py',
'abs_path': '/Users/rob/dev/microcosm-pubsub/microcosm_pubsub/result.py',
'function': 'invoke',
'module': 'microcosm_pubsub.result', 'lineno': 85,
'pre_context': [
' retry_timeout_seconds: Optional[int] = None', '', ' @classmethod',
' def invoke(cls, handler, message: SQSMe | ssage):', ' try:'
],
'context_line': ' succ | ess = handler(message.content)',
'post_context': [
' return cls.from_result(message, bool(success))',
' except Exception as error:',
' return cls.from_error(message, error)',
'',
' @classmethod'
],
'vars': {
'cls': "<class 'microcosm_pubsub.result.MessageHandlingResult'>",
'handler': '<function context_logger.<locals>.wrapped at 0x109422ef0>',
'message': '<microcosm_pubsub.message.SQSMessage object at 0x10952bcd0>'
},
'in_app': True},
{
'filename': 'test/daemon/test_daemon/handlers/test_handler.py',
'abs_path': '/Users/rob/dev/test/daemon/test_daemon/handlers/test_handler.py',
'function': 'do_something',
'module': 'test.daemon.test_daemon.handlers.test_handler', 'lineno': 50,
'pre_context': [
' def resource_type(self):',
' return self.test.get_model("TestHandler")',
'', ' @extracts("something")',
],
'context_line': ' raise TypeError("Testing stuff")',
'post_context': [' @extracts("something")'],
'vars': {
'self':
'<test.daemon.test_daemon.handlers.test_handler.TestHandler object at 0x10953cbd0>',
'something_id': "'1f40066c-f457-41b3-aa4c-72cdac5146e4'",
'project_description': "'this is some secret info'",
'other_id': "'70375dff-2d46-40c4-a1d1-f5d49a25698d'"
}, 'in_app': True
}
]
}
}
]
},
'event_id': 'c25a874a6d964c8b832e00c10009d9bc', 'timestamp': '2020-05-16T11:01:35.987342Z',
'breadcrumbs': [
{'ty': 'log', 'level': 'info', 'category': 'something', 'message': 'Starting daemon test',
'timestamp': '2020-05-16T11:01:35.935538Z', 'data': {}, 'type': 'default'},
{'ty': 'log', 'level': 'warning', 'category': 'TestHandler',
'message': 'Result for media type: application/vnd.globality.pubsub._.created.do_something was : FAILED ',
'timestamp': '2020-05-16T11:01:35.983545Z',
'data': {'media_type': 'application/vnd.globality.pubsub._.created.do_something',
'message_id': 'message-id-b7fa5993-a966-4390-a6b1-ed9eb5026134', 'X-Request-Ttl': '31',
'uri': 'http://localhost:5452/api/v2/message/6dee4da6-8af1-4636-93b6-7770bc6990bc',
'handler': 'Handler Test', 'elapsed_time': 47.17707633972168}, 'type': 'default'}
],
'tags': {'x-request-id': None, 'message-id': 'message-id-b7fa5993-a966-4390-a6b1-ed9eb5026134', 'media-type': None},
'contexts': {'runtime': {'name': 'CPython', 'version': '3.7.4',
'build': ''}},
'modules': {'microcosm-pubsub': '2.17.0'},
'extra': {'sys.argv': []}, 'environment': 'localhost',
'server_name': 'some_daemon',
'sdk': {'name': 'sentry.python', 'version': '0.14.4',
'packages': [{'name': 'pypi:sentry-sdk', 'version': '0.14.4'}],
'integrations': []},
'platform': 'python'
}
|
MADindustries/WhatManager2 | books/utils.py | Python | mit | 566 | 0.003534 | from subprocess import call
isbn_regex = '^(97(8|9)-?)?\d{9}(\d|X)$'
def fix_author(author):
parts = author.split(u', ')
if len(parts) == 2:
return parts[1] + u' ' + parts[0]
return author
def call_mktorrent(target, torrent_filename, announce, torrent_name=None):
args = [
'mktorrent',
'-a', announce,
'-p',
'-o', torrent_filename,
]
| if torrent_name:
args.extend(('-n', torrent_name))
args.append(target)
if call(args) != 0:
raise Exception('mktorrent returned non-ze | ro')
|
ProjectSWGCore/NGECore2 | scripts/loot/lootPools/tatooine/re_junk_escaped_criminal.py | Python | lgpl-3.0 | 107 | 0.093458 |
def itemNames():
return ['id_chip','ledger','magseal_detector' | ]
def itemCha | nces():
return [33,33,34] |
florianfesti/CocktailOverlord | cocktailoverlord/db.py | Python | gpl-3.0 | 14,288 | 0.006789 | """
Copyright (C) 2017 - The CocktailOverlord Authors
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
import sqlite3
class RecipeEntry:
def __init__(self, ingredient, amount):
self.ingredient = ingredient
self.amount = amount
class Ingredient:
def __init__(self, name, abv):
self.name = name
self.abv = abv
def __repr__(self):
if self.abv == 0:
return self.name
else:
return "{} ({:.1f}%)".format(self.name, 100*self.abv)
class Storage:
def __init__(self, location, ingredient, amount):
self.location = location
self.ingredient = ingredient
self.amount = amount
class Cocktail:
def __init__(self, name, picture, recipe, cid=None):
self.name = name
self.picture = picture
self.recipe = recipe
self.id = cid
def abv(self):
total_volume = 0
total_alcohol = 0
for row in self.recipe:
total_volume += row[1]
total_alcohol += row[1] * row[0].abv
return total_alcohol / total_volume
def pretty_print(self):
return self.name + "\n" + "\n".join([
"- {amount} ml of {name}".format(name=row.ingredient.name, amount=row.amount)
for row in sorted(self.recipe, key=lambda entry: -entry.ingredient.abv)])
def __repr__(self):
return self.name
class CocktailDB:
def __init__(self, dbfile):
self.conn = sqlite3.connect(dbfile, check_same_thread=False)
self.cur = self.conn.cursor()
def create_db(self):
self.cur.execute("""CREATE TABLE IF NOT EXISTS cocktail (id INTEGER PRIMARY KEY ASC AUTOINCREMENT,
name TEXT UNIQUE NOT NULL,
picture TEXT)""")
self.cur.execute("""CREATE TABLE IF NOT EXISTS ingredient (id INTEGER PRIMARY KEY ASC AUTOINCREMENT,
name TEXT UNIQUE NOT NULL,
abv REAL NOT NULL)""")
self.cur.execute("""CREATE TABLE IF NOT EXISTS recipe (cocktail INTEGER NOT NULL REFERENCES cocktail(id) ON DELETE CASCADE,
ingredient INTEGER NOT NULL REFERENCES ingredient(id) ON DELETE CASCADE,
amount NUMERIC NOT NULL)""")
self.cur.execute("" | "CREATE TABLE IF NOT EXISTS storage (location INTEGER PRIMARY KEY, ingredient INTEGER REFERENCES ingredient(id) ON DELETE SET | NULL, amount REAL NOT NULL)""")
self.cur.execute("CREATE VIEW IF NOT EXISTS available_ingredients (ingredient, amount) AS SELECT ingredient, SUM(amount) FROM storage GROUP BY ingredient")
self.cur.execute("""CREATE VIEW IF NOT EXISTS recipe_plus_storage (cocktail, ingredient, amount, stored, enough) AS
SELECT r.cocktail, r.ingredient, r.amount, ifnull(a.amount,0), ifnull(a.amount,0)>=r.amount AS enough FROM recipe AS r LEFT JOIN available_ingredients AS a ON a.ingredient=r.ingredient""")
self.cur.execute("CREATE INDEX IF NOT EXISTS cocktail_name ON cocktail(name)")
self.cur.execute("CREATE INDEX IF NOT EXISTS ingredient_name ON ingredient(name)")
self.cur.execute("CREATE UNIQUE INDEX IF NOT EXISTS recipe_combination on recipe(cocktail, ingredient)")
self.cur.execute("CREATE INDEX IF NOT EXISTS recipe_cocktail on recipe(cocktail)")
self.cur.execute("CREATE INDEX IF NOT EXISTS recipe_ingredient on recipe(ingredient)")
self.cur.execute("CREATE INDEX IF NOT EXISTS storage_ingredient on storage(ingredient)")
def delete_db(self):
self.cur.execute("DROP TABLE IF EXISTS cocktail")
self.cur.execute("DROP TABLE IF EXISTS ingredient")
self.cur.execute("DROP TABLE IF EXISTS recipe")
self.cur.execute("DROP INDEX IF EXISTS cocktail_name")
self.cur.execute("DROP INDEX IF EXISTS ingredient_name")
self.cur.execute("DROP INDEX IF EXISTS recipe_combination")
self.cur.execute("DROP INDEX IF EXISTS recipe_cocktail")
self.cur.execute("DROP INDEX IF EXISTS recipe_ingredient")
self.cur.execute("DROP INDEX IF EXISTS storage_ingredient")
self.cur.execute("DROP VIEW IF EXISTS available_ingredients")
# Adds a new cocktail and returns its id
# Setting force to True will overwrite the existing cocktail instead
# returns id or None
def add_cocktail(self, name, picture, ingredients, force=False):
self.cur.execute("SELECT id FROM cocktail WHERE name=?", (name,))
existing_id = self.cur.fetchone()
if existing_id is not None :
existing_id = existing_id[0]
if not force:
return None
self.cur.execute("UPDATE cocktail SET picture=?", (picture,))
self.cur.execute("DELETE FROM recipe WHERE cocktail=?", existing_id)
else:
self.cur.execute("INSERT INTO cocktail (name, picture) VALUES (?, ?)", (name, picture))
existing_id = self.cur.lastrowid
self.cur.executemany("INSERT INTO recipe (cocktail, ingredient, amount) SELECT :cocktail, ingredient.id, :amount FROM ingredient WHERE name=:ingredient",
[ {'cocktail': existing_id, 'ingredient': ingredient, 'amount': ingredients[ingredient]} for ingredient in ingredients])
self.conn.commit()
# Deletes a cocktail by name
def delete_cocktail(self, name):
self.cur.execute("DELETE FROM cocktail WHERE name=?", (name,))
self.conn.commit()
# Deletes a cocktail by id
def delete_cocktail_id(self, cid):
self.cur.execute("DELETE FROM cocktail WHERE id=?", (cid,))
self.conn.commit()
# Adds a new ingredients and returns its id (or false if it already existed)
# Setting force to True will overwrite the existing ingredient instead
def add_ingredient(self, name, abv, force=False):
self.cur.execute("SELECT id FROM ingredient WHERE name=?", (name,))
existing_id = self.cur.fetchone()
if existing_id is not None:
existing_id = existing_id[0]
if not force:
return None
self.cur.execute("UPDATE ingredient SET abv=? WHERE id=?", (abv, existing_id))
self.conn.commit()
return existing_id
else:
self.cur.execute("INSERT INTO INGREDIENT (name, abv) VALUES (?, ?)", (name, abv))
self.conn.commit()
return self.cur.lastrowid
# Deletes an ingredient by name, and cocktails that use it
def delete_ingredient(self, name):
self.cur.execute("DELETE FROM ingredient WHERE name=?", (name,))
self.conn.commit()
# Deletes an ingredient by id, and cocktails that use it
def delete_ingredient_id(self, iid):
self.cur.execute("DELETE FROM ingredient WHERE id=?", (iid,))
self.conn.commit()
# Sets a storage location's ingredient and amount
def set_storage_contents(self, location, ingredient, amount = 0):
if ingredient is None:
self.cur.execute("INSERT OR REPLACE INTO storage (location, ingredient, amount) VALUES (:location, NULL, :amount)", {'location':location, 'amount':amount})
else:
self.cur.execute("INSERT OR REPLACE INTO storage (location, ingredient, amount) SELECT :location, id, :amount |
giubil/trackit | api/files/api/migrations/versions/b6533e0a4e58_make_the_aws_bucket_optional.py | Python | apache-2.0 | 605 | 0.016529 | """M | ake the AWS bucket optional.
Revision ID: b6533e0a4e58
Revises: eca04db5c363
Create Date: 2017-02-06 02:16:03. | 926797
"""
# revision identifiers, used by Alembic.
revision = 'b6533e0a4e58'
down_revision = 'eca04db5c363'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.alter_column('aws_key',
column_name='billing_bucket_name',
existing_type=sa.String(length=63),
nullable=True,
)
def downgrade():
op.alter_column('aws_key',
column_name='billing_bucket_name',
existing_type=sa.String(length=63),
nullable=False,
)
|
martinrusev/amonone | amon/apps/servers/tests/views_tests.py | Python | mit | 2,693 | 0.012254 | from django.test.client import Client
from django.test import TestCase
from django.urls import reverse
from nose.tools import *
from django.contrib.auth import get_user_model
User = get_user_model()
from amon.apps.servers.models import server_model
class TestServerViews(TestCase):
def setUp(self):
User.objects.all().delete()
self.c = Client()
self.user = User.objects.create_user(password='qwerty', email='foo@test.com')
self.c.login(username='foo@test.com', password='qwerty')
def tearDown(self):
self.c.logout()
self.user.delete()
server_model.collection.remove()
def all_servers_test(self):
url = reverse('servers')
response = self.c.get(url)
assert response.status_code == 200
def add_server_test(self):
server_model.collection.remove()
url = reverse('add_server')
response = self.c.get(url)
assert response.status_code == 200
response = self.c.post(url, {'name': 'test', 'check_every': 60,'keep_data': 30})
created_server = server_model.collection.find_one()
eq_(created_server['name'], 'test')
response_url = "{0}#{1}".format(reverse('servers'), created_server['_id'])
self.assertRedirects(response, response_url)
server_model.collection.remove()
def edit_server_test(self):
| server_model.collection.remove()
server_model.collection.insert({'name': 'test' , 'check_every': 60,'keep_data': 30, "key": "test"})
server = server_model.collection.find_one()
url = reverse('edit_server', kwargs={'server_id': server['_id']})
response = self.c.get(url)
assert response.status_code == 200
response = self.c.post(url, {'name': 'changetest', 'check_every': 300,'keep_data': 30})
|
updated_server = server_model.collection.find_one()
self.assertRedirects(response, reverse('servers'))
eq_(updated_server['name'], 'changetest')
eq_(updated_server['check_every'], 300)
server_model.collection.remove()
def delete_server_test(self):
server_model.collection.remove()
server_model.collection.insert({'name': 'test'})
server = server_model.collection.find_one()
url = reverse('delete_server', kwargs={'server_id': server['_id']})
response = self.c.get(url)
self.assertRedirects(response, reverse('servers'))
deleted_server = server_model.collection.find().count()
eq_(deleted_server, 0)
server_model.collection.remove()
|
Acehaidrey/incubator-airflow | airflow/providers/google/marketing_platform/hooks/campaign_manager.py | Python | apache-2.0 | 11,618 | 0.001635 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Campaign Manager hook."""
from typing import Any, Dict, List, Optional, Sequence, Union
from googleapiclient import http
from googleapiclient.discovery import Resource, build
from airflow.exceptions import AirflowException
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class GoogleCampaignManagerHook(GoogleBaseHook):
"""Hook for Google Campaign Manager."""
_conn = None # type: Optional[Resource]
def __init__(
self,
api_version: str = "v3.3",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
delegate_to=delegate_to,
impersonation_chain=impersonation_chain,
)
self.api_version = api_version
def get_conn(self) -> Resource:
"""Retrieves connection to Campaign Manager."""
if not self._conn:
http_authorized = self._authorize()
self._conn = build(
"dfareporting",
self.api_version,
http=http_authorized,
cache_discovery=False,
)
return self._conn
def delete_report(self, profile_id: str, report_id: str) -> Any:
"""
Deletes a report by its ID.
:param profile_id: The DFA user profile ID.
:param report_id: The ID of the report.
"""
response = (
self.get_conn()
.reports()
.delete(profileId=profile_id, reportId=report_id)
.execute(num_retries=self.num_retries)
)
return response
def insert_report(self, profile_id: str, report: Dict[str, Any]) -> Any:
"""
Creates a report.
:param profile_id: The DFA user profile ID.
:param report: The report resource to be inserted.
"""
response = (
self.get_conn()
.reports()
.insert(profileId=profile_id, body=report)
.execute(num_retries=self.num_retries)
)
return response
def list_reports(
self,
profile_id: str,
max_results: Optional[int] = None,
scope: Optional[str] = None,
sort_field: Optional[str] = None,
sort_order: Optional[str] = None,
) -> List[dict]:
"""
Retrieves list of reports.
:param profile_id: The DFA user profile ID.
:param max_results: Maximum number of results to return.
:param scope: The scope that defines which results are returned.
:param sort_field: The field by which to sort the list.
:param sort_order: Order of sorted results.
"""
reports: List[dict] = []
conn = self.get_conn()
request = conn.reports().list(
profileId=profile_id,
maxResults=max_results,
scope=scope,
sortField=sort_field,
sortOrder=sort_order,
)
while request is not None:
response = request.execute(num_retries=self.num_retries)
reports.extend(response.get("items", []))
request = conn.reports().list_next(previous_request=request, previous_response=response)
return reports
def patch_report(self, profile_id: str, report_id: str, update_mask: dict) -> Any:
"""
Updates a report. This method supports patch semantics.
:param profile_id: The DFA user profile ID.
:param report_id: The ID of the report.
:param update_mask: The relevant portions of a report resource,
according to the rules of patch semantics.
"""
response = (
self.get_conn()
.reports()
.patch(profileId=profile_id, reportId=report_id, body=update_mask)
.execute(num_retries=self.num_retries)
)
return response
def run_report(self, profile_id: str, report_id: str, synchronous: Optional[bool] = None) -> Any:
"""
Runs a report.
:param profile_id: The DFA profile ID.
:param report_id: The ID of the report.
:param synchronous: If set and true, tries to run the report synchronously.
"""
response = (
self.get_conn()
| .reports()
.run(profileId=profile_id, reportId=report_id, synchronous=synchronous)
.execute(num_retries=self.num_retries)
)
return response
def update_report(self, profile_id: str, report_id: str) -> Any:
"""
Updates a report.
:param | profile_id: The DFA user profile ID.
:param report_id: The ID of the report.
"""
response = (
self.get_conn()
.reports()
.update(profileId=profile_id, reportId=report_id)
.execute(num_retries=self.num_retries)
)
return response
def get_report(self, file_id: str, profile_id: str, report_id: str) -> Any:
"""
Retrieves a report file.
:param profile_id: The DFA user profile ID.
:param report_id: The ID of the report.
:param file_id: The ID of the report file.
"""
response = (
self.get_conn()
.reports()
.files()
.get(fileId=file_id, profileId=profile_id, reportId=report_id)
.execute(num_retries=self.num_retries)
)
return response
def get_report_file(self, file_id: str, profile_id: str, report_id: str) -> http.HttpRequest:
"""
Retrieves a media part of report file.
:param profile_id: The DFA user profile ID.
:param report_id: The ID of the report.
:param file_id: The ID of the report file.
:return: googleapiclient.http.HttpRequest
"""
request = (
self.get_conn()
.reports()
.files()
.get_media(fileId=file_id, profileId=profile_id, reportId=report_id)
)
return request
@staticmethod
def _conversions_batch_request(
conversions: List[Dict[str, Any]],
encryption_entity_type: str,
encryption_entity_id: int,
encryption_source: str,
kind: str,
) -> Dict[str, Any]:
return {
"kind": kind,
"conversions": conversions,
"encryptionInfo": {
"kind": "dfareporting#encryptionInfo",
"encryptionEntityType": encryption_entity_type,
"encryptionEntityId": encryption_entity_id,
"encryptionSource": encryption_source,
},
}
def conversions_batch_insert(
self,
profile_id: str,
conversions: List[Dict[str, Any]],
encryption_entity_type: str,
encryption_entity_id: int,
encryption_source: str,
max_failed_inserts: int = 0,
) -> Any:
"""
Inserts conversions.
:param profile_id: User profile ID associated with this request.
:param conversions: Conversations to insert, should by type of Conversation:
https://developers.google.com/doubleclick-advertisers/v3.3/conversions#resource
:param encryption_entity_type: The encryption |
hasadna/OpenCommunity | src/communities/forms.py | Python | bsd-3-clause | 4,808 | 0.002912 | from communities.models import Community, SendToOption
from datetime import datetime, date, time
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from ocd.formfields import HTMLArea, OCSplitDateTime, OCCheckboxSelectMultiple
from users.models import OCUser, Membership
import floppyforms as forms
from haystack.forms import SearchForm, ModelSearchForm
class EditUpcomingMeetingForm(forms.ModelForm):
class Meta:
model = Community
fields = (
'upcoming_meeting_title',
'upcoming_meeting_location',
'upcoming_meeting_scheduled_at',
# 'voting_ends_at',
'upcoming_meeting_comments',
)
widgets = {
'upcoming_meeting_title': forms.TextInput,
'upcoming_meeting_scheduled_at': OCSplitDateTime,
'upcoming_meeting_location': forms.TextInput,
# 'voting_ends_at': OCSplitDateTime,
'upcoming_meeting_comments': HTMLArea,
}
def __init__(self, *args, **kwargs):
super(EditUpcomingMeetingForm, self).__init__(*args, **kwargs)
self.fields['upcoming_meeting_title'].label = _('Title')
self.fields['upcoming_meeting_scheduled_at'].label = _('Scheduled at')
self.fields['upcoming_meeting_location'].label = _('Location')
self.fields['upcoming_meeting_comments'].label = _('Background')
"""
removed this function as we don't include voting_end_time in the form any more.
# ----------------------------------------------------------------------------
def clean(self):
#prevent voting end time from illegal values (past time,
#time after meeting schedule)
try:
voting_ends_at = self.cleaned_data['voting_ends_at']
except KeyError:
voting_ends_at = None
try:
meeting_time = self.cleaned_data['upcoming_meeting_scheduled_at']
except KeyError:
meeting_time = None
if voting_ends_at:
if voting_ends_at <= timezone.now():
raise forms.ValidationError(_("End voting time cannot be set to the past"))
if meeting_time and voting_ends_at > meeting_time:
raise forms.ValidationError(_("End voting time cannot be set to after the meeting time"))
return self.cleaned_data
"""
def save(self):
c = super(EditUpcomingMeetingForm, self).save()
c.voting_ends_at = datetime.combine(date(2025, 1, 1), time(12, 0, 0))
c.save()
return c
class PublishUpcomingMeetingForm(forms.ModelForm):
send_to = forms.TypedChoiceField(label=_("Send to"), coerce=int,
choices=SendToOption.choices,
widget=forms.RadioSelect)
class Meta:
model = Community
fields = ()
class EditUpcomingMeetingSummaryForm(forms.ModelForm):
class Meta:
model = Community
fields = (
'upcoming_meeting_summary',
)
widgets = {
'upcoming_meeting_summary': HTMLArea,
}
class UpcomingMeetingParticipantsForm(forms.ModelForm):
board = forms.MultipleChoiceField(widget=OCCheckboxSelectMultiple, required=False)
class Meta:
model = Community
fields = (
'upcoming_meeting_participants',
'upcoming_meeting_guests',
)
widgets = {
'upcoming_meeting_participants': OCCheckboxSelectMultiple,
'upcoming_meeting_guests': forms.Textarea,
}
def __init__(self, *args, **kwargs):
super(UpcomingMeetingParticipantsForm, self).__init__(*args, **kwargs)
participants = self.instance.upcoming_meeting_participants.values_list(
'id', flat=True)
board_in = []
board_choices = []
for b in self.instance.get_board_members():
board_choices.append((b.id, b.display_name,))
if b.id in particip | ants:
board | _in.append(b.id)
self.fields['board'].choices = board_choices
self.initial['board'] = board_in
self.fields['upcoming_meeting_participants'].queryset = self.instance.get_members()
self.fields['upcoming_meeting_participants'].label = ""
class CommunitySearchForm(ModelSearchForm):
pass
# def search(self):
# # First, store the SearchQuerySet received from other processing.
# sqs = super(DateRangeSearchForm, self).search()
#
# if not self.is_valid():
# return self.no_query_found()
#
# return sqs
|
grammarware/slps | shared/python/MBGF.py | Python | bsd-3-clause | 5,360 | 0.031951 | #!/Library/Frameworks/Python.framework/Versions/3.1/bin/python3
import os, sys
sys.path.append(os.getcwd().split('slps')[0]+'slps/shared/python')
import slpsns, BGF3
import xml.etree.ElementTree as ET
cx = {}
class TopModel:
def getData(self, id):
if id in self.data.keys():
return self.data[id]
else:
return None
def who(self):
return self.__class__.__name__
def parsebasic(self, xml):
global cx
if 'id' in xml.attrib:
self.id = xml.attrib['id']
else:
if self.who() in cx:
cx[self.who()] += 1
else:
cx[self.who()] = 1
self.id = self.who()+str(cx[self.who()])
if 'depends' in xml.attrib:
self.depends = xml.attrib['depends']
else:
self.depends = ''
if 'blocks' in xml.attrib:
self.blocks = xml.attrib['blocks']
else:
self.blocks = ''
self.data = {}
self.ids = {}
class SrcSimpleModel (TopModel):
def parse(self, xml):
self.parsebasic(xml)
for ss in xml.findall('state'):
for s in ss.attrib['src'].split(','):
self.data[s] = ss.text
if 'id' in ss.attrib:
self.ids[s] = ss.attrib['id']
class SrcProdModel (TopModel):
def getNTs(self,id):
nts = []
for p in self.getProds(id):
if p.nt not in nts:
nts.append(p.nt)
return nts
def getProds(self,id):
if id in self.data.keys():
return self.data[id][0]
else:
return []
def getScope(self,id):
if id in self.data.keys():
return self.data[id][1]
else:
return []
def getData(self, id):
if id in self.data.keys():
return '; '.join(map(str,self.data[id][0])).replace(':\n ',' ← ').replace('\n ',' | ')
else:
return '∅'
def parse(self, xml):
self.parsebasic(xml)
for ss in xml.findall('state'):
for s in ss.attrib['src'].split(','):
self.data[s] = [[], | []]
for p in ss.findall(slpsns.bgf_('pro | duction')):
xp = BGF3.Production()
xp.parse(p)
self.data[s][0].append(xp)
self.data[s][1] = ss.findall('in/*')
#
# <sources>
# <src name="dcg">snapshot/dcg.bgf</src>
# <src name="sdf">snapshot/sdf.bgf</src>
# <src name="rsc">snapshot/rascal.bgf</src>
# </sources>
class Sources (SrcSimpleModel):
def __init__(self, xml):
self.parsebasic(xml)
for s in xml.findall('src'):
self.data[s.attrib['name']] = s.text
# <naming-convention>
# <default>l!</default>
# <src name="dcg">l!</src>
# <src name="sdf,rsc">C!</src>
# </naming-convention>
class NamingConvention (SrcSimpleModel):
def __init__(self, xml):
self.default = xml.findtext('default')
self.parse(xml)
def getSpecifics(self):
return self.default
# <name-bind>
# <name>function</name>
# <src name="dcg">function</src>
# <src name="sdf,rsc">Function</src>
# </name-bind>
class NameBind (SrcSimpleModel):
def __init__(self, xml):
self.nt = xml.findtext('name')
self.parse(xml)
def getSpecifics(self):
return self.nt
# <width>
# <bgf:expression>
# <nonterminal>newline</nonterminal>
# </bgf:expression>
# <src name="dcg,sdf">+</src>
# <src name="rsc">!</src>
# <in>
# <nonterminal>function</nonterminal>
# </in>
# </width>
class Width (SrcSimpleModel):
def __init__(self, xml):
self.expr = BGF3.Expression([])
self.expr.parse(xml.findall(slpsns.bgf_('expression'))[0])
# apply namemap!!!
self.parse(xml)
self.scope = xml.findall('in')
def getSpecifics(self):
return str(self.expr)
# <unification>
# <name>expr</name>
# <src name="dcg" labels="apply,binary">
# <bgf:production>
# ...
# </bgf:production>
# </src>
# </unification>
class Unification (SrcProdModel):
def __init__(self, xml):
self.nt = xml.findtext('name')
self.parse(xml)
def getSpecifics(self):
return 'n('+self.nt+')'
# <iteration>
# <label>binary</label>
# <name>expr</name>
# <separator>ops</separator>
# <src name="dcg">iterate</src>
# <src name="sdf,rsc">lassoc</src>
# </iteration>
class Iteration (SrcSimpleModel):
def __init__(self, xml):
self.label = xml.findtext('label')
if not self.label:
self.label = ''
self.nt = xml.findtext('name')
self.sep = xml.findtext('separator')
self.parse(xml)
def getSpecifics(self):
s = ''
if self.label:
s += '['+self.label+'], '
s += 'n('+self.nt+')'
if self.sep:
s += ', n('+self.sep+')'
return s
# <selectables>
# <src name="...">
# <bgf:production>
# ...
# <marked>
# ...
# </marked>
# ...
# </bgf:production>
# </src>
# </selectables>
class Selectables (SrcProdModel):
def __init__(self, xml):
self.parse(xml)
def getSpecifics(self):
return '—'
# <production-label>
# <src name="...">
# <bgf:production>
# <label>...</label>
# ...
# </bgf:production>
# </src>
# </production-label>
class ProdLabel (SrcProdModel):
def __init__(self, xml):
self.parse(xml)
def getSpecifics(self):
return '—'
# <top-choice>
# <name>ops</name>
# <src name="ant">horizontal</src>
# <src name="dcg,sdf,rsc">vertical</src>
# </top-choice>
class TopChoice (SrcSimpleModel):
def __init__(self, xml):
self.nt = xml.findtext('name')
self.parse(xml)
def getSpecifics(self):
return 'n('+self.nt+')'
# <folding>
# <name>apply</name>
# <src name="ant">
# <bgf:production>
# ...
# </bgf:production>
# </src>
# </folding>
class Folding (SrcProdModel):
def __init__(self, xml):
self.nt = xml.findtext('state/'+slpsns.bgf_('production')+'/nonterminal')
self.parse(xml)
def getSpecifics(self):
return 'n('+self.nt+')'
|
manuelep/openshift_v3_test | wsgi/web2py/gluon/xmlrpc.py | Python | mit | 748 | 0 | #!/usr/bin/env python
# -*- co | ding: utf-8 -*-
"""
| This file is part of the web2py Web Framework
| Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
| License: LGPLv3 (http://www. | gnu.org/licenses/lgpl.html)
"""
from SimpleXMLRPCServer import SimpleXMLRPCDispatcher
def handler(request, response, methods):
response.session_id = None # no sessions for xmlrpc
dispatcher = SimpleXMLRPCDispatcher(allow_none=True, encoding=None)
for method in methods:
dispatcher.register_function(method)
dispatcher.register_introspection_functions()
response.headers['Content-Type'] = 'text/xml'
dispatch = getattr(dispatcher, '_dispatch', None)
return dispatcher._marshaled_dispatch(request.body.read(), dispatch)
|
manassolanki/erpnext | erpnext/setup/doctype/email_digest/email_digest.py | Python | gpl-3.0 | 18,380 | 0.029489 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import fmt_money, formatdate, format_time, now_datetime, \
get_url_to_form, get_url_to_list, flt
from datetime import timedelta
from dateutil.relativedelta import relativedelta
from frappe.core.doctype.user.user import STANDARD_USERS
import frappe.desk.notifications
from erpnext.accounts.utils import get_balance_on, get_count_on
user_specific_content = ["calendar_events", "todo_list"]
from frappe.model.document import Document
class EmailDigest(Document):
def __init__(self, *args, **kwargs):
super(EmailDigest, self).__init__(*args, **kwargs)
self.from_date, self.to_date = self.get_from_to_date()
self.set_dates()
self._accounts = {}
self.currency = frappe.db.get_value("Company", self.company, "default_currency")
def get_users(self):
"""get list of users"""
user_list = frappe.db.sql("""
select name, enabled from tabUser
where name not in ({})
and user_type != "Website User"
order by enabled desc, name asc""".format(", ".join(["%s"]*len(STANDARD_USERS))), STANDARD_USERS, as_dict=1)
if self.recipient_list:
recipient_list = self.recipient_list.split("\n")
else:
recipient_list = []
for p in user_list:
p["checked"] = p["name"] in recipient_list and 1 or 0
frappe.response['user_list'] = user_list
def send(self):
# send email only to enabled users
valid_users = [p[0] for p in frappe.db.sql("""select name from `tabUser`
where enabled=1""")]
recipients = filter(lambda r: r in valid_users,
self.recipient_list.split("\n"))
original_user = frappe.session.user
if recipients:
for user_id in recipients:
frappe.set_user(user_id)
frappe.set_user_lang(user_id)
msg_for_this_recipient = self.get_msg_html()
if msg_for_this_recipient:
frappe.sendmail(
recipients=user_id,
subject=_("{0} Digest").format(self.frequency),
message=msg_for_this_recipient,
reference_doctype = self.doctype,
reference_name = self.name,
unsubscribe_message = _("Unsubscribe from this Email Digest"))
frappe.set_user(original_user)
frappe.set_user_lang(original_user)
def get_msg_html(self):
"""Build email digest content"""
frappe.flags.ignore_account_permission = True
from erpnext.setup.doctype.email_digest.quotes import get_random_quote
context = frappe._dict()
context.update(self.__dict__)
self.set_title(context)
self.set_style(context)
self.set_accounting_cards(context)
if self.get("calendar_events"):
context.events, context.event_count = self.get_calendar_events()
if self.get("todo_list"):
context.todo_list = self.get_todo_list()
context.todo_count = self.get_todo_count()
if self.get("notifications"):
context.notifications = self.get_notifications()
if self.get("issue"):
context.issue_list = self.get_issue_list()
context.issue_count = self.get_issue_count()
if self.get("project"):
context.project_list = self.get_project_list()
context.project_count = self.get_project_count()
quote = get_random_quote()
context.quote = {"text": quote[0], "author": quote[1]}
if not (context.events or context.todo_list or context.notifications or context.cards):
return None
frappe.flags.ignore_account_permission = False
# style
return frappe.render_template("erpnext/setup/doctype/email_digest/templates/default.html",
context, is_path=True)
def set_title(self, context):
"""Set digest title"""
if self.frequency=="Daily":
context.title = _("Daily Reminders")
context.subtitle = _("Pending activities for today")
elif self.frequency=="Weekly":
context.title = _("This Week's Summary")
context.subtitle = _("Summary for this week and pending activities")
elif self.frequency=="Monthly":
context.title = _("This Month's Summary")
context.subtitle = _("Summary for this month and pending activities")
def set_style(self, context):
"""Set standard digest style"""
context.text_muted = '#8D99A6'
context.text_color = '#36414C'
context.h1 = 'margin-bottom: 30px; margin-top: 40px; font-weight: 400; font-size: 30px;'
context.h2 = 'margin-bottom: 30px; margin-top: -20px; font-weight: 400; font-size: 20px;'
context.label_css = '''display: inline-block; color: {text_muted};
padding: 3px 7px; margin-right: 7px;'''.format(text_muted = context.text_muted)
context.section_head = 'margin-top: 60 | px; font-size: 16px;'
context.line_item = 'padding: 5px 0px; margin: 0; border-bottom: 1px solid #d1d8dd;'
context.link_css = 'color: {text_color}; text-decoration: none;'.format(text_color = context.text_color)
def get_notifications(self):
"""Get notifications for user"""
notifications = frappe.desk.notifications.get_notifica | tions()
notifications = sorted(notifications.get("open_count_doctype", {}).items(),
lambda a, b: 1 if a[1] < b[1] else -1)
notifications = [{"key": n[0], "value": n[1],
"link": get_url_to_list(n[0])} for n in notifications if n[1]]
return notifications
def get_calendar_events(self):
"""Get calendar events for given user"""
from frappe.desk.doctype.event.event import get_events
events = get_events(self.future_from_date.strftime("%Y-%m-%d"),
self.future_to_date.strftime("%Y-%m-%d")) or []
event_count = 0
for i, e in enumerate(events):
e.starts_on_label = format_time(e.starts_on)
e.ends_on_label = format_time(e.ends_on) if e.ends_on else None
e.date = formatdate(e.starts)
e.link = get_url_to_form("Event", e.name)
event_count += 1
return events, event_count
def get_todo_list(self, user_id=None):
"""Get to-do list"""
if not user_id:
user_id = frappe.session.user
todo_list = frappe.db.sql("""select *
from `tabToDo` where (owner=%s or assigned_by=%s) and status="Open"
order by field(priority, 'High', 'Medium', 'Low') asc, date asc limit 20""",
(user_id, user_id), as_dict=True)
for t in todo_list:
t.link = get_url_to_form("ToDo", t.name)
return todo_list
def get_todo_count(self, user_id=None):
"""Get count of Todo"""
if not user_id:
user_id = frappe.session.user
return frappe.db.sql("""select count(*) from `tabToDo`
where status='Open' and (owner=%s or assigned_by=%s)""",
(user_id, user_id))[0][0]
def get_issue_list(self, user_id=None):
"""Get issue list"""
if not user_id:
user_id = frappe.session.user
meta = frappe.get_meta("Issue")
role_permissions = frappe.permissions.get_role_permissions(meta, user_id)
if not role_permissions.get("read"):
return None
issue_list = frappe.db.sql("""select *
from `tabIssue` where status in ("Replied","Open")
order by modified asc limit 10""", as_dict=True)
for t in issue_list:
t.link = get_url_to_form("Issue", t.name)
return issue_list
def get_issue_count(self):
"""Get count of Issue"""
return frappe.db.sql("""select count(*) from `tabIssue`
where status in ('Open','Replied') """)[0][0]
def get_project_list(self, user_id=None):
"""Get project list"""
if not user_id:
user_id = frappe.session.user
project_list = frappe.db.sql("""select *
from `tabProject` where status='Open' and project_type='External'
order by modified asc limit 10""", as_dict=True)
for t in project_list:
t.link = get_url_to_form("Issue", t.name)
return project_list
def get_project_count(self):
"""Get count of Project"""
return frappe.db.sql("""select count(*) from `tabProject`
where status='Open' and project_type='External'""")[0][0]
def set_accounting_cards(self, context):
"""Create accounting cards if checked"""
cache = frappe.cache()
context.cards = []
for key in ("income", "expenses_booked", "income_year_to_date","expense_year_to_date",
"new_quotations","pending_quotations","sales_order","purchase_order","pending_sales_orders","pending_purchase_orders",
"invoiced_amount", "payables", "bank_balance", "credit_balance"):
if self.get(key):
cache_key = "email_digest:card:{0}:{1}:{2}:{3}".format(self.company, self.frequency, key, self.f |
vincentschen/tab_clustering | server/server.py | Python | mit | 645 | 0.012403 | from flask import Flask, request
im | port cluster
import json
app = Flask(__name__)
@app.route('/cluster/', methods=['GET', 'POST'])
def similarities():
if request.method == 'POST':
data = json.loads(request.data)
docs = data['docs']
# docs = request.form.getlist('docs')
print "docs: ", len(docs)
print type(docs)
# input_ = request.form['input']
inp | ut_ = data['input']
# print "input:", input_
sims = cluster.compute_similarity(docs, input_)
response = json.dumps(sims)
print response
return response
|
biosustain/memote | src/memote/support/consistency_helpers.py | Python | apache-2.0 | 11,239 | 0.000356 | # -*- coding: utf-8 -*-
# Copyright 2017 Novo Nordisk Foundation Center for Biosustainability,
# Technical University of Denmark.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for stoichiometric consistency checks."""
from __future__ import absolute_import, division
import logging
from builtins import dict, zip
from collections import defaultdict
import numpy as np
import sympy
from numpy.linalg import svd
from optlang.symbolics import add
from pylru import lrudecorator
from six import iteritems, itervalues
from memote.support.helpers impor | t find_biomass_reaction
__all__ = ("stoichiometry_matrix", "nullspace")
LOGGER = logging.getLogger(__name__)
def add_reaction_constraints(model, reactions, Constraint):
"""
Add the stoichiometric coefficients as constraints.
Parameters
----------
model : optlang.Model
The transposed stoichiometric matrix representation.
reactions : iterable
Container of `cobra.Reaction` instances.
Constra | int : optlang.Constraint
The constraint class for the specific interface.
"""
constraints = []
for rxn in reactions:
expression = add(
[c * model.variables[m.id] for m, c in rxn.metabolites.items()]
)
constraints.append(Constraint(expression, lb=0, ub=0, name=rxn.id))
model.add(constraints)
def stoichiometry_matrix(metabolites, reactions):
"""
Return the stoichiometry matrix representation of a set of reactions.
The reactions and metabolites order is respected. All metabolites are
expected to be contained and complete in terms of the reactions.
Parameters
----------
reactions : iterable
A somehow ordered list of unique reactions.
metabolites : iterable
A somehow ordered list of unique metabolites.
Returns
-------
numpy.array
The 2D array that represents the stoichiometry matrix.
dict
A dictionary mapping metabolites to row indexes.
dict
A dictionary mapping reactions to column indexes.
"""
matrix = np.zeros((len(metabolites), len(reactions)))
met_index = dict((met, i) for i, met in enumerate(metabolites))
rxn_index = dict()
for i, rxn in enumerate(reactions):
rxn_index[rxn] = i
for met, coef in iteritems(rxn.metabolites):
j = met_index[met]
matrix[j, i] = coef
return matrix, met_index, rxn_index
def rank(matrix, atol=1e-13, rtol=0):
"""
Estimate the rank, i.e., the dimension of the column space, of a matrix.
The algorithm used by this function is based on the singular value
decomposition of `stoichiometry_matrix`.
Parameters
----------
matrix : ndarray
The matrix should be at most 2-D. A 1-D array with length k
will be treated as a 2-D with shape (1, k)
atol : float
The absolute tolerance for a zero singular value. Singular values
smaller than ``atol`` are considered to be zero.
rtol : float
The relative tolerance for a zero singular value. Singular values less
than the relative tolerance times the largest singular value are
considered to be zero
Notes
-----
If both `atol` and `rtol` are positive, the combined tolerance is the
maximum of the two; that is::
tol = max(atol, rtol * smax)
Singular values smaller than ``tol`` are considered to be zero.
Returns
-------
int
The estimated rank of the matrix.
See Also
--------
numpy.linalg.matrix_rank
matrix_rank is basically the same as this function, but it does not
provide the option of the absolute tolerance.
"""
matrix = np.atleast_2d(matrix)
sigma = svd(matrix, compute_uv=False)
tol = max(atol, rtol * sigma[0])
return int((sigma >= tol).sum())
def nullspace(matrix, atol=1e-13, rtol=0.0): # noqa: D402
"""
Compute an approximate basis for the null space (kernel) of a matrix.
The algorithm used by this function is based on the singular value
decomposition of the given matrix.
Parameters
----------
matrix : ndarray
The matrix should be at most 2-D. A 1-D array with length k
will be treated as a 2-D with shape (1, k)
atol : float
The absolute tolerance for a zero singular value. Singular values
smaller than ``atol`` are considered to be zero.
rtol : float
The relative tolerance for a zero singular value. Singular values less
than the relative tolerance times the largest singular value are
considered to be zero.
Notes
-----
If both `atol` and `rtol` are positive, the combined tolerance is the
maximum of the two; that is::
tol = max(atol, rtol * smax)
Singular values smaller than ``tol`` are considered to be zero.
Returns
-------
ndarray
If ``matrix`` is an array with shape (m, k), then the returned
nullspace will be an array with shape ``(k, n)``, where n is the
estimated dimension of the nullspace.
References
----------
Adapted from:
https://scipy.github.io/old-wiki/pages/Cookbook/RankNullspace.html
"""
matrix = np.atleast_2d(matrix)
_, sigma, vh = svd(matrix)
tol = max(atol, rtol * sigma[0])
num_nonzero = (sigma >= tol).sum()
return vh[num_nonzero:].conj().T
@lrudecorator(size=2)
def get_interface(model):
"""
Return the interface specific classes.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
"""
return (
model.solver.interface.Model,
model.solver.interface.Constraint,
model.solver.interface.Variable,
model.solver.interface.Objective,
)
@lrudecorator(size=2)
def get_internals(model):
"""
Return non-boundary reactions and their metabolites.
Boundary reactions are unbalanced by their nature. They are excluded here
and only the metabolites of the others are considered.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
"""
biomass = set(find_biomass_reaction(model))
if len(biomass) == 0:
LOGGER.warning(
"No biomass reaction detected. Consistency test results "
"are unreliable if one exists."
)
return set(model.reactions) - (set(model.boundary) | biomass)
def create_milp_problem(kernel, metabolites, Model, Variable, Constraint, Objective):
"""
Create the MILP as defined by equation (13) in [1]_.
Parameters
----------
kernel : numpy.array
A 2-dimensional array that represents the left nullspace of the
stoichiometric matrix which is the nullspace of the transpose of the
stoichiometric matrix.
metabolites : iterable
The metabolites in the nullspace. The length of this vector must equal
the first dimension of the nullspace.
Model : optlang.Model
Model class for a specific optlang interface.
Variable : optlang.Variable
Variable class for a specific optlang interface.
Constraint : optlang.Constraint
Constraint class for a specific optlang interface.
Objective : optlang.Objective
Objective class for a specific optlang interface.
References
----------
.. [1] Gevorgyan, A., M. G Poolman, and D. A Fell.
"Detection of Stoichiometric Inconsistencies in Biomolecular
Models."
Bioinformatics 24, no. 19 (2008): 2245.
"""
assert (
len(metabolites) == kernel.shape[0]
), "metabolite vect |
mas178/Fragments | bot/twitter2.py | Python | mit | 934 | 0.004283 | from __future__ import print_function
import random
import boto3
from requests_oauthlib import OAuth1Session
def lambda_handler(event, context):
bucket = boto3.resource('s3').Bucket('gengo-hacker')
# Quotes
quotes = bucket.Object('quotes.txt').get()['Body'].read().decode('utf-8')
quotes = [q.split("\t") for q in quotes.split("\n")]
quotes = ["{} by {} #{}".format(q[0], q[1], q[2]) for q in quotes if len(q) == 3 and all([item != '' for item in q])]
quotes = [q for q in quotes if len(unicode(q, 'utf-8')) <= 140]
# AUTH
token = bucket.Object('token.tsv').get()['Body'].read().decode('utf-8')
token = dict([t.split('=') f | or t in token.split("\n")])
tw = OAuth1Session(token['consumer_key'], token['consumer_secret'], token['access_token_key'], token['access_token_secret'])
return tw.post('https://api.twitter.com/1.1/statuses/update.json', params | ={"status": random.choice(quotes)})
|
zmsp/AIY-data-center-assistant | src/aiy/cloudspeech.py | Python | apache-2.0 | 4,262 | 0.001408 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An API to access Google Speech recognition service."""
import os.path
import aiy._apis._speech
import aiy.audio
import aiy.voicehat
# Global variables. They are lazily initialized.
_cloudspeech_recognizer = None
# Expected location of the CloudSpeech credentials file:
CLOUDSPEECH_CREDENTIALS_FILE = os.path.expanduser('~/cloud_speech.json')
class _CloudSpeechRecogn | izer(object):
"""A speech recognizer backed by the Google CloudSpeech APIs.
"""
def __init__(self, credentials_file):
self._request = aiy._apis._speech.CloudSpeechRequest(credentials_file)
self._recorder = aiy.audio.get_recorder()
self._hotwords = []
def recognize(self):
"""Re | cognizes the user's speech and transcript it into text.
This function listens to the user's speech via the VoiceHat speaker. Then it
contacts Google CloudSpeech APIs and returns a textual transcript if possible.
If hotword list is populated this method will only respond if hotword is said.
"""
self._request.reset()
self._request.set_endpointer_cb(self._endpointer_callback)
self._recorder.add_processor(self._request)
text = self._request.do_request().transcript
if self._hotwords and text:
text = text.lower()
loc_min = len(text)
hotword_found = ''
for hotword in self._hotwords:
loc_temp = text.find(hotword)
if loc_temp > -1 and loc_min > loc_temp:
loc_min = loc_temp
hotword_found = hotword
if hotword_found:
parse_text = text.split(hotword_found)[1]
return parse_text.strip()
else:
return ''
else:
return '' if self._hotwords else text
def expect_hotword(self, hotword_list):
"""Enables hotword detection for a selected list
This method is optional and populates the list of hotwords
to be used for hotword activation.
For example, to create a recognizer for Google:
recognizer.expect_hotword('Google')
recognizer.expect_hotword(['Google','Raspberry Pi'])
"""
if isinstance(hotword_list, list):
for hotword in hotword_list:
self._hotwords.append(hotword.lower())
else:
self._hotwords.append(hotword_list.lower())
def expect_phrase(self, phrase):
"""Explicitly tells the engine that the phrase is more likely to appear.
This method is optional and makes speech recognition more accurate
especially when certain commands are expected.
For example, a light control system may want to add the following commands:
recognizer.expect_phrase('light on')
recognizer.expect_phrase('light off')
"""
self._request.add_phrase(phrase)
def _endpointer_callback(self):
self._recorder.remove_processor(self._request)
def get_recognizer():
"""Returns a recognizer that uses Google CloudSpeech APIs.
Sample usage:
button = aiy.voicehat.get_button()
recognizer = aiy.cloudspeech.get_recognizer()
while True:
print('Press the button and speak')
button.wait_for_press()
text = recognizer.recognize()
if 'light on' in text:
turn_on_light()
elif 'light off' in text:
turn_off_light()
"""
global _cloudspeech_recognizer
if not _cloudspeech_recognizer:
_cloudspeech_recognizer = _CloudSpeechRecognizer(CLOUDSPEECH_CREDENTIALS_FILE)
return _cloudspeech_recognizer
|
salopensource/sal | sal/tests/test_decorators.py | Python | apache-2.0 | 6,760 | 0.001183 | """General functional tests for the server app."""
from django.http.response import Http404, HttpResponseServerError
from django.contrib.auth.models import User
from django.core.exceptions import PermissionDenied
from django.test import TestCase, RequestFactory
from django.urls import reverse
from sal.decorators import (
access_required, has_access, is_global_admin, staff_required, required_level, ProfileLevel,
key_auth_required)
from sal.decorators import get_business_unit_by as func_get_business_unit
from server.models import BusinessUnit, MachineGroup, Machine
SUCCESS = 'Nice work, amigo.'
class AccessFunctionTest(TestCase):
fixtures = ['user_fixture.json', 'business_unit_fixtures.json', 'machine_group_fixtures.json',
'machine_fixtures.json']
def setUp(self):
self.user = User.objects.get(pk=2)
self.ga_user = User.objects.get(pk=1)
user_profile = self.ga_user.userprofile
user_profile.level = 'GA'
user_profile.save()
self.business_unit = BusinessUnit.objects.get(pk=1)
self.business_unit2 = BusinessUnit.objects.get(pk=2)
def test_no_membership_has_access(self):
self.assertFalse(has_access(self.user, self.business_unit))
def test_member_has_access(self):
self.business_unit.users.add(self.user)
self.assertTrue(has_access(self.user, self.business_unit))
def test_ga_has_access(self):
self.assertTrue(has_access(self.ga_user, self.business_unit))
def test_all_bu_has_access(self):
"""Test has_access with user who has all BU membership.
If user is member of all bus, ensure access to 'all' dashboards
"""
# Currently, "None" business units are only used by the
# inventory app.
self.assertFalse(has_access(self.user, None))
self.business_unit.users.add(self.user)
self.business_unit2.users.add(self.user)
self.assertTrue(has_access(self.user, None))
def test_get_business_unit(self):
self.assertEqual(
(self.business_unit, self.business_unit), func_get_business_unit(BusinessUnit, bu_id=1))
def test_get_business_unit_from_mg(self):
machine_group = MachineGroup.objects.get(pk=1)
self.assertEqual(
(machine_group, self.business_unit), func_get_business_unit(MachineGroup, mg_id=1))
def test_get_business_unit_from_machine(self):
machine = Machine.objects.get(pk=1)
self.assertEqual(
(machine, self.business_unit), func_get_business_unit(Machine, machine_id=1))
self.assertEqual(
(machine, self.business_unit), func_get_business_unit(Machine, machine_id='C0DEADBEEF'))
def test_get_business_unit_errors(self):
self.assertRaises(
Http404, func_get_business_unit, MachineGroup, mg_id=500)
self.assertRaises(
Http404, func_get_business_unit, Machine, machine_id='DOES_NOT_EXIST')
self.assertRaises(
ValueError, func_get_business_unit, Machine, not_the_kwarg_you_are_looking_for=0)
def test_is_global_admin(self):
self.assertTrue(is_global_admin(self.ga_user))
self.assertFalse(is_global_admin(self.user))
class FunctionDecoratorsTest(TestCase):
"""Test the view function access decorators."""
fixtures = ['user_fixture.json', 'business_unit_fixtures.json']
def setUp(self):
self.factory = RequestFactory()
self.normal_user = User.objects.get(pk=2)
self.staff_user = User.objects.get(pk=1)
@access_required(BusinessUnit)
def test_view(request, **kwargs):
return SUCCESS
self.test_view = test_view
def test_access_required_for_nonmembers(self):
request = self.factory.get('/test/')
# functools.wraps has a conniption because this is missing.
request.__name__ = 'Test'
request.user = self.normal_user
self.assertRaises(PermissionDenied, self.test_view, request, bu_id=2)
def test_access_required_with_member(self):
request = self.factory.get('/test/')
# functools.wraps has a conniption because this is missing.
request.__name__ = 'Test'
BusinessUnit.objects.get(pk=1).users.add(self.normal_user)
request.user = self.normal_user
response = self.test_view(request, bu_id=1)
self.assertEqual(response, SUCCESS)
def test_access_required_with_ga(self):
request = self.factory.get('/test/')
# functools.wraps has a conniption because this is missing.
request.__name__ = 'Test'
user_profile = self.staff_user.userprofile
user_profile.level = 'GA'
user_profile.save()
request.user = self.staff_user
response = self.test_view(request, bu_id=1)
self.assertEqual(response, SUCCESS)
def test_key_auth_required(self):
@key_auth_required
def test_view(request, *args, **kwargs):
return SUCCESS
# TODO: Needs to be written to test for:
# - BASIC_AUTH not set (use_auth defaults to True)
# - BASIC_AUTH = false
# - BASIC_AUTH = True
pass
def test_required_level(self):
@required_level(ProfileLevel.global_admin, ProfileLevel.read_write)
def test_view(request, *args, **kwargs):
return SUCCESS
request = self.factory.get('/test/')
request.user = self.normal_user
response = test_view(request)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/')
| # Elevate user to RW status.
request = self.factory.get('/test/')
user_profile = self.normal_user.userprofile
user_profile.level = 'RW'
user_profile.save()
request.user = self.normal_user
response = test_view(request)
self.assertEqual(response, SUCCESS)
# Elevate staff user to GA status.
| request = self.factory.get('/test/')
user_profile = self.staff_user.userprofile
user_profile.level = 'GA'
user_profile.save()
request.user = self.staff_user
response = test_view(request)
self.assertEqual(response, SUCCESS)
def test_staff_required(self):
@staff_required
def test_view(request, *args, **kwargs):
return SUCCESS
request = self.factory.get('/test/')
request.user = self.normal_user
response = test_view(request)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/')
request = self.factory.get('/test/')
request.user = self.staff_user
response = test_view(request)
self.assertEqual(response, SUCCESS)
|
relrod/pib | bot.py | Python | bsd-3-clause | 1,205 | 0.033195 | #!/usr/bin/env python
from django.core.management import setup_environ
import settings
setup_environ(settings)
import socket
from trivia.models import *
irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
irc.connect((settings.IRC_SERVER, settings.IRC_PORT))
def send(msg):
irc.send(msg + "\r\n")
print "{SEN | T} " + msg
return
def msg(user, msg):
send("PRIVMSG " + user + " :" + msg)
return
def processline(line):
parts = line.split(' :',1)
args = parts[0].split(' ')
if (len(parts) > 1):
args.append(parts[1])
if args[0] == "PING":
send("PONG :" + args[1])
return
try:
| if args[3] == "!questions":
questions = str(Question.objects.all())
msg(args[2], questions)
return
except IndexError:
return
# When we're done, remember to return.
return
send("USER " + (settings.IRC_NICKNAME + " ")*4)
send("NICK " + settings.IRC_NICKNAME)
for channel in settings.IRC_CHANNELS:
send("JOIN " + channel)
while True:
# EXIST
line = irc.recv(1024).rstrip()
if "\r\n" in line:
linesep = line.split()
for l in linesep:
processline(l)
continue
processline(line)
|
regular/pyglet-avbin-optimizations | pyglet/input/darwin_hid.py | Python | bsd-3-clause | 17,379 | 0.005179 | #!/usr/bin/env python
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import ctypes
import pyglet
from pyglet.libs.darwin import carbon, _oscheck, create_cfstring
from pyglet.libs.darwin.constants import *
from base import Device, Control, AbsoluteAxis, RelativeAxis, Button
from base import Joystick, AppleRemote
from base import DeviceExclusiveException
# non-broken c_void_p
void_p = ctypes.POINTER(ctypes.c_int)
class CFUUIDBytes(ctypes.Structure):
_fields_ = [('byte%d' % i, ctypes.c_uint8) for i in range(16)]
mach_port_t = void_p
io_iterator_t = void_p
kern_return_t = ctypes.c_int
IOReturn = ctypes.c_uint
CFDictionaryRef = void_p
CFMutableDictionaryRef = void_p
CFArrayRef = void_p
CFStringRef = void_p
CFUUIDRef = ctypes.POINTER(CFUUIDBytes)
AbsoluteTime = ctypes.c_double
HRESULT = ctypes.c_int
REFIID = CFUUIDBytes
IOHIDElementType = ctypes.c_int
kIOHIDElementTypeInput_Misc = 1
kIOHIDElementTypeInput_Button = 2
kIOHIDElementTypeInput_Axis = 3
kIOHIDElementTypeInput_ScanCodes = 4
kIOHIDElementTypeOutput = 129
kIOHIDElementTypeFeature = 257
kIOHIDElementTypeCollection = 513
IOHIDElementCookie = ctypes.c_void_p
# Full list in IOHIDUsageTables.h
kHIDPage_GenericDesktop = 0x01
kHIDUsage_GD_Joystick = 0x04
kHIDUsage_GD_GamePad = 0x05
kHIDUsage_GD_Keyboard = 0x06
kHIDUsage_GD_Keypad = 0x07
kHIDUsage_GD_MultiAxisController = 0x08
kHIDUsage_GD_SystemAppMenu = 0x86
kHIDUsage_GD_SystemMenu = 0x89
kHIDUsage_GD_SystemMenuRight = 0x8A
kHIDUsage_GD_SystemMenuLeft = 0x8B
kHIDUsage_GD_SystemMenuUp = 0x8C
kHIDUsage_GD_SystemMenuDown = 0x8D
MACH_PORT_NULL = 0
kIOHIDDeviceKey = "IOHIDDevice"
kIOServicePlane = "IOService"
kIOHIDProductIDKey = "ProductID"
kCFNumberIntType = 9
kIOHIDOptionsTypeSeizeDevice = 1
kIOReturnExclusiveAccess = 0xe00002c5
carbon.CFUUIDGetConstantUUIDWithBytes.restype = CFUUIDRef
kIOHIDDeviceUserClientTypeID = carbon.CFUUIDGetConstantUUIDWithBytes(None,
0xFA, 0x12, 0xFA, 0x38, 0x6F, 0x1A, 0x11, 0xD4,
0xBA, 0x0C, 0x00, 0x05, 0x02, 0x8F, 0x18, 0xD5)
kIOCFPlugInInterfaceID = carbon.CFUUIDGetConstantUUIDWithBytes(None,
0xC2, 0x44, 0xE8, 0x58, 0x10, 0x9C, 0x11, 0xD4,
0x91, 0xD4, 0x00, 0x50, 0xE4, 0xC6, 0x42, 0x6F)
kIOHIDDeviceInterfaceID = carbon.CFUUIDGetConstantUUIDWithBytes(None,
0x78, 0xBD, 0x42, 0x0C, 0x6F, 0x14, 0x11, 0xD4,
0x94, 0x74, 0x00, 0x05, 0x02, 0x8F, 0x18, 0xD5)
IOHIDCallbackFunction = ctypes.CFUNCTYPE(None,
void_p, IOReturn, ctypes.c_void_p, ctypes.c_void_p)
CFRunLoopSourceRef = ctypes.c_void_p
class IOHIDEventStruct(ctypes.Structure):
_fields_ = (
('type', IOHIDElementType),
('elementCookie', IOHIDElementCookie),
('value', ctypes.c_int32),
('timestamp', AbsoluteTime),
('longValueSize', ctypes.c_uint32),
('longValue', ctypes.c_void_p)
)
Self = ctypes.c_void_p
class IUnknown(ctypes.Structure):
_fields_ = (
('_reserved', ctypes.c_void_p),
('QueryInterface',
ctypes.CFUNCTYPE(HRESULT, Self, REFIID, ctypes.c_void_p)),
('AddRef',
ctypes.CFUNCTYPE(ctypes.c_ulong, Self)),
('Release',
ctypes.CFUNCTYPE(ctypes.c_ulong, Self)),
)
# Most of these function prototypes are not filled in yet because I haven't
# bothered.
class IOHIDQueueInterface(ctypes.Structure):
_fields_ = IUnknown._fields_ + (
('createAsyncEventSource', ctypes.CFUNCTYPE(IOReturn,
Self, ctypes.POINTER(CFRunLoopSourceRef))),
| ('getAsyncEventSource', ctypes.c_void_p),
('createAsyncPort', ctypes.c_void_p),
('getAsyncPort', ctypes.c_void_p),
('create', ctypes.CFUNCTYPE(IOReturn,
Self, ctypes.c_uint32, ctypes.c_uint32)),
('dispose', ctypes.CFUNCTYPE(IOReturn,
Self)),
('addElement', ctypes.CFUNCTYPE(IOReturn,
Self, IOHIDElementCookie)),
('removeElement', ctypes.c_void_p),
('hasElement', ctypes.c_void_p),
('st | art', ctypes.CFUNCTYPE(IOReturn,
Self)),
('stop', ctypes.CFUNCTYPE(IOReturn,
Self)),
('getNextEvent', ctypes.CFUNCTYPE(IOReturn,
Self, ctypes.POINTER(IOHIDEventStruct), AbsoluteTime,
ctypes.c_uint32)),
('setEventCallout', ctypes.CFUNCTYPE(IOReturn,
Self, IOHIDCallbackFunction, ctypes.c_void_p, ctypes.c_void_p)),
('getEventCallout', ctypes.c_void_p),
)
class IOHIDDeviceInterface(ctypes.Structure):
_fields_ = IUnknown._fields_ + (
('createAsyncEventSource', ctypes.c_void_p),
('getAsyncEventSource', ctypes.c_void_p),
('createAsyncPort', ctypes.c_void_p),
('getAsyncPort', ctypes.c_void_p),
('open', ctypes.CFUNCTYPE(IOReturn,
Self, ctypes.c_uint32)),
('close', ctypes.CFUNCTYPE(IOReturn,
Self)),
('setRemovalCallback', ctypes.c_void_p),
('getElementValue', ctypes.CFUNCTYPE(IOReturn,
Self, IOHIDElementCookie, ctypes.POINTER(IOHIDEventStruct))),
('setElementValue', ctypes.c_void_p),
('queryElementValue', ctypes.c_void_p),
('startAllQueues', ctypes.c_void_p),
('stopAllQueues', ctypes.c_void_p),
('allocQueue', ctypes.CFUNCTYPE(
ctypes.POINTER(ctypes.POINTER(IOHIDQueueInterface)),
Self)),
('allocOutputTransaction', ctypes.c_void_p),
# 1.2.1 (10.2.3)
('setReport', ctypes.c_void_p),
('getReport', ctypes.c_void_p),
# 1.2.2 (10.3)
('copyMatchingElements', ctypes.CFUNCTYPE(IOReturn,
Self, CFDictionaryRef, ctypes.POINTER(CFArrayRef))),
('setInterruptReportHandlerCallback', ctypes.c_void_p),
)
def get_master_port():
master_port = mach_port_t()
_oscheck(
carbon.IOMasterPort(MACH_PORT_NULL, ctypes.byref(master_port))
)
return master_port
def get_matching_dictionary():
carbon.IOServiceMatching.restype = CFMutableDictionaryRef
matching_dictionary = carbon.IOServiceMatching(kIOHIDDeviceKey)
return matching_dictionary
def get_matching_services(master_port, matching_dictionary):
# Consumes reference to matching_dictionary
iterator = io_iterator_t()
_oscheck(
carbon.IOServiceGetMatchingServices(master_port,
matching_dictionary,
ctypes.byref(iterator))
)
services = []
while carbon.IOIteratorIsValid(iterator):
service = carbon.IOIteratorNext(iterator)
if not service:
break
services.append(service)
carbon.IOObjectRelease(iterator)
return services
def cfstring_to_string(value_string):
value_length = carbon.CFStringGetLength(value_string)
buffer_length = carbon.CFStringGetMaximumSizeForEncoding(
value_length, kCFStringEncodingUTF8)
buffer = ctypes.c_buffer(buffer_length + 1)
result = carbon.CFStringGetCString(value_string,
buffer,
len(buffer),
kCFStringEncodingUTF8)
if not result:
return
return buffer.value
def cfnumber_to_int(value):
result = ctypes.c_int()
carbon.CFNumberGetValue(value, kCFNumberIntType, ctypes.byref(result))
return result.value
def cfboolean_to_bool(value):
return bool(carbon.CFBooleanGetValue(value))
def cfvalue_to_value(value):
if not value:
return None
value_type = carbon.CFGetTypeID(value)
if value_type == carbon.CFStringGetTypeID():
return cfstring_to_string(value)
elif value_type == carbon.CFNumberGetTypeID():
return cfnumber_to_int(value)
elif value_type == carbon.CFBooleanGetTypeID():
return cfboolean_to_bool(value)
else:
return None
def get_property_value(properties, key):
key_string = create_cfstring(key)
value = ctypes.c_void_p()
present = carbon.CFDictionaryGetValueIfPresent(properties,
key_string,
ctypes.byre |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_08_01/models/express_route_circuits_routes_table_list_result.py | Python | mit | 1,282 | 0.00078 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ExpressRouteCircuitsRoutesTableListResult(Model):
"""Response for ListRoutesTable associated with the Express Route Circuits
API.
:param value: The list of routes table.
:type value:
list[~azure.mgmt.network.v2017_08_01.models.ExpressRouteCircuitRoutesTable]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ExpressRouteCircuitRoutesTable] | '},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__( | self, **kwargs):
super(ExpressRouteCircuitsRoutesTableListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
|
CroissanceCommune/autonomie | autonomie/tests/utils/test_files.py | Python | gpl-3.0 | 850 | 0.002364 | # -*- coding: utf-8 -*-
# * Authors:
# * TJEBBES Gaston <g.t@majerti.fr>
# * Arezki Feth <f.a@majerti.fr>;
# * Miotte Julien <j.m@majerti.fr>;
from autonomie.utils.files import (
encode_path,
decode_path,
issubdir,
filesizeformat,
)
def test_encode_decode():
st = u"$deù % ù$ùdeù % - /// // \ \dekodok %spkoij idje ' kopk \""
encoded = encode_path(st)
assert decode_path(encoded) == st
def test_i | ssubdir():
assert(issubdir("/root/foo", "/root/foo/bar"))
assert(not issubdir("/root/foo", "/root/bar"))
assert(not issubdir("/root/foo", "/root/../../foo/bar"))
def test_filesizeformat():
a | ssert(filesizeformat(1024, 0) == "1ko")
assert(filesizeformat(1024, 1) == "1.0ko")
assert(filesizeformat(1024*1024, 0) == "1Mo")
assert(filesizeformat(1024*1024, 1) == "1.0Mo")
|
ramcn/demo3 | venv/lib/python3.4/site-packages/rest_framework/authtoken/south_migrations/0001_initial.py | Python | mit | 3,298 | 0.006064 | # -*- coding: utf-8 -*-
from south.db import db
from south.v2 import SchemaMigration
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Token'
db.create_table('authtoken_token', (
('key', self.gf('django.db.models.fields.CharField')(max_length=40, primary_key=True)),
('user', self.gf('django.db.models.fields.related.OneToOneField')(related_name='auth_token', unique=True, to=orm['%s.%s' % (User._meta.app_label, User._meta.object_name)])),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal('authtoken', ['Token'])
def backwards(self, orm):
# Deleting model 'Token'
db.delete_table('authtoken_token')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
"%s.%s" % (User._meta.app_label, User._meta.module_name): {
'Meta': {'object_name': User._meta.module_name, 'db_table': repr(User._meta.db_table)},
},
'authtoken.token': {
'Meta': {'object_name': 'Token'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'auth_token'", 'unique': 'True', 'to': "orm['%s.%s']" % (User._meta.app_label, User._meta.object_name)})
},
'contenttypes.contenttype': {
| 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('dj | ango.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['authtoken']
|
LeonNie52/dk-plus | formation.py | Python | gpl-3.0 | 20,767 | 0.002841 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/3/5
# @Author : Leon.Nie
# @Site :
# @File : formation.py
"""
Drones proceed in formation base on the algorithm Artificial Potential Field
"""
from geo_tools import *
import numpy as np
import logging
import logging.config
logging.config.fileConfig("../logging.conf")
logger = logging.getLogger()
class Formation:
def __init__(self, network):
self.network = network
self.MaxLeadForce = 10
self.MaxForce = 10
self.safeDistance_K = 0.6
self.leadForce_K = 1.5
self.dampForce_K = -1.0
# self.FormationForce_K = 0.5 * 10e3
self.FormationForce_K = 0.1
self.AvoidanceForce_K = 2.0
self.TeamHomeLocation = None
self.targetLocation = None
self.FormationPosition = None
self.target_reached = False
self.home_returned = False # For team
self.ownhome_returned = False # For self unit
self.home_returning = False # For team when flying home location
self.distancePrecise = 10 # in meters
self.distance_ownPrecise = 5
def setFormation(self, lat, lon, formation_set):
"""
Altitude for home location is referred to ZERO
:param lat:
:param lon:
:param formation_set:
:return:
"""
self.FormationPosition = np.matrix(formation_set)
self.TeamHomeLocation = get_location_metres(lat,
lon,
0, 0, 0)
logger.info("Team Home Location set: %s", self.TeamHomeLocation)
def set_target_Loc(self, alt, dNorth, dEast):
"""
Target Location. Set before taking off
:param lat: original latitude
:param lon: original longitude
:param alt: Target altitude
:param dNorth:
:param dEast:
:return:
"""
self.targetLocation = get_location_metres(self.TeamHomeLocation.lat,
self.TeamHomeLocation.lon,
alt, dNorth, dEast)
self.target_reached = False
logger.info("Target Location set: %s", self.targetLocation)
def set_owntarget_Loc(self, target):
"""
Target Location. Set before taking off
:param lat: original latitude
:param lon: original longitude
:param alt: Target altitude
:param dNorth:
:param dEast:
:return:
"""
self.targetLocation = target
self.target_reached = False
logger.info("Target Location set: %s", self.targetLocation)
def get_target_Loc(self):
return self.targetLocation
def get_distance2target(self):
return get_distance_metres(self.network.vehicle_params.global_lat,
self.network.vehicle_params.global_lon,
self.targetLocation.lat,
self.targetLocation.lon)
def getPosition(self, teammate):
"""
Return self target position in Team
:param teammate:
:return:
"""
x, y, z = self.get_cenPos(teammate)
Vx, Vy, Vz = self.get_cenVel(teammate)
if Vx > 0:
theta = math.pi / 2 - math.atan(Vy / Vx)
elif Vx < 0:
theta = - math.pi / 2 - math.atan(Vy / Vx)
else:
if Vy > 0:
theta = 0
else:
theta = math.pi
# phi = math.atan(Vz / math.sqrt(Vx ** 2 + Vy ** 2))
phi = 0
Rotaz = np.matrix(
[[math.cos(theta), math.sin(theta), 0],
[-math.sin(theta), math.cos(theta), 0],
[0, 0, 1]])
Rotax = np.matrix(
[[1, 0, 0],
[0, math.cos(phi), -math.sin(phi)],
[0, math.sin(phi), math.cos(phi)]])
c = self.FormationPosition[:, int(self.network.vehicle_params.SYSID_THISMAV - 1)].reshape(3, 1)
abPos = np.array(Rotaz * Rotax * c).ravel()
# logger.debug("Local Formation Position : %s", abPos)
Pos = np.array(get_location_formation(x,
y,
z, abPos[0], abPos[1], abPos[2]))
# logger.debug("Global Formation Position : %s", Pos)
return Pos
def get_cenPos(self, teammate):
"""
Nearly proximate because we are running in a quite small range
:param teammate:
:return:
"""
lat = self.network.vehicle_params.global_lat
lon = self.network.vehicle_params.global_lon
alt = self.network.vehicle_params.global_alt
for drone in teammate:
lat += drone.global_lat
lon += drone.global_lon
alt += drone.global_alt
c_lat = lat / float(len(teammate) + 1)
c_lon = lon / float(len(teammate) + 1)
c_alt = alt / float(len(teammate) + 1)
return c_lat, c_lon, c_alt
def get_cenVel(self, teammate):
velocity = self.network.vehicle_params.velocity
for drone in teammate:
velocity = [drone.vel | ocity[i] + velocity[i] for i in range(len(velocity))]
cenVel = [x | / float(len(teammate) + 1) for x in velocity]
return cenVel
def DampForce(self):
dampForce = self.dampForce_K * np.array(self.network.vehicle_params.velocity)
return dampForce
def LeadForce(self, teammate, single):
if len(teammate) == 0 or single:
cenPos_NED = get_location_NED(self.TeamHomeLocation,
self.network.vehicle_params.global_lat,
self.network.vehicle_params.global_lon,
self.network.vehicle_params.global_alt)
# cenPos = np.array([self.network.vehicle_params.global_lat,
# self.network.vehicle_params.global_lon])
# cenAlt = np.array([self.network.vehicle_params.global_alt])
else:
cenPos_NED = get_location_NED(self.TeamHomeLocation,
self.get_cenPos(teammate)[0],
self.get_cenPos(teammate)[1],
self.get_cenPos(teammate)[2])
# cenPos = self.get_cenPos(teammate)[0:2]
# cenAlt = self.get_cenPos(teammate)[-1]
cenPos = np.array([cenPos_NED.north, cenPos_NED.east])
tarPos_NED = get_location_NED(self.TeamHomeLocation,
self.targetLocation.lat,
self.targetLocation.lon,
self.targetLocation.alt)
tarPos = np.array([tarPos_NED.north, tarPos_NED.east])
# tarPos = np.array([self.targetLocation.lat,
# self.targetLocation.lon])
#
# tarAlt = np.array([self.targetLocation.lat])
logger.debug("Center_Position: %s ; Target_Position: %s ;", cenPos, tarPos)
leadforce = self.leadForce_K * (tarPos - cenPos) / np.linalg.norm(tarPos - cenPos)
if np.linalg.norm(leadforce) > self.MaxLeadForce:
leadforce = leadforce * self.MaxLeadForce / np.linalg.norm(leadforce)
# For now ,no force on altitude
leadforce = np.append(leadforce, np.zeros(1, ))
return leadforce
def ReturnForce(self):
cenPos_NED = get_location_NED(self.TeamHomeLocation,
self.network.vehicle_params.global_lat,
self.network.vehicle_params.global_lon,
self.network.vehicle_params.global_alt)
# cenPos = np.array([self.network.vehicle_params.global_lat,
# self.network.vehicle_params.global_lon])
# cenAlt = np.array([self.network.vehicle_params.global_alt])
cenPos = np.array([cenPos_NED.north, cenPos_NED.east])
tarPos_NED |
sparkslabs/guild | examples/blog/accounts-stm-3.py | Python | apache-2.0 | 3,204 | 0.000624 | #!/usr/bin/python
import random
import time
from guild.actor import *
from guild.stm import Store, ConcurrentUpdate, BusyRetry, retry
import logging
logger = logging.getLogger('__main__.' + "MischiefMaker")
ch = logging.StreamHandler()
ch.setLevel(logging.D | EBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
class InsufficientFunds(ActorEx | ception):
pass
class Account(object):
def __init__(self, balance=10):
self.account_info = Store()
curr_balance = self.account_info.checkout("balance")
curr_balance.set(balance)
curr_balance.commit()
@retry # Retries until transaction succeeds
def deposit(self, amount):
curr_balance = self.account_info.checkout("balance")
new_balance = curr_balance.value + amount
curr_balance.set(new_balance)
curr_balance.commit()
return new_balance
@retry(max_tries=50) # Retries up to 50 times
def withdraw(self, amount):
curr_balance = self.account_info.checkout("balance")
if curr_balance.value < amount:
raise InsufficientFunds("Insufficient Funds in your account",
requested=amount,
balance=curr_balance.value)
new_balance = curr_balance.value - amount
curr_balance.set(new_balance)
curr_balance.commit()
return amount
@property
def balance(self):
curr_balance = self.account_info.checkout("balance")
return curr_balance.value
# Transfer is the same function from the non-STM code
def transfer(amount, payer, payee):
funds = payer.withdraw(amount)
payee.deposit(funds)
# Transfer is the same class from the non-STM code
class MischiefMaker(Actor):
def __init__(self, myaccount, friendsaccount, name):
super(MischiefMaker, self).__init__()
self.myaccount = myaccount
self.friendsaccount = friendsaccount
self.grabbed = 0
self.name = name
@process_method
def process(self):
try:
grab = random.randint(1, 10) * 10
transfer(grab, self.friendsaccount, self.myaccount)
except InsufficientFunds as e:
print "Awww, Tapped out", e.balance, "<", e.requested
self.stop()
return
except MaxRetriesExceeded as e:
print "Gotta wait a moment"
time.sleep(0.001)
self.grabbed = self.grabbed + grab
# These two are now just plain objects rather than actors, so they
# don't need to be started - or stopped. Otherwise this code is
# un-modified
account1 = Account(1000)
account2 = Account(1000)
barney = MischiefMaker(account2, account1, "barney").go()
fred = MischiefMaker(account1, account2, "fred").go()
wait_for(fred, barney)
print "GAME OVER"
print "Fred grabbed", fred.grabbed
print "Barney grabbed", barney.grabbed
print "Total grabbed", fred.grabbed + barney.grabbed
print "Since they stopped grabbing..."
print "Money left", account1.balance, account2.balance
print "Ending money", account1.balance + account2.balance
|
DamCB/tyssue | tyssue/core/history.py | Python | gpl-3.0 | 16,124 | 0.001985 | import os
import warnings
import traceback
import logging
import pandas as pd
import numpy as np
from pathlib import Path
from collections import defaultdict
from .sheet import Sheet
from .objects import Epithelium
logger = logging.getLogger(name=__name__)
def _filter_columns(cols_hist, cols_in, element):
if not set(cols_hist).issubset(cols_in):
warnings.warn(
f"""
Columns {set(cols_hist).difference(cols_in)} are in the history
{element} dataframe but not in the sheet {element} dataframe.
These non existent columns will not be saved."""
)
cols_hist = set(cols_hist).intersection(cols_in)
return list(cols_hist)
class History:
"""This class handles recording and retrieving time series
of sheet objects.
"""
def __init__(
self,
sheet,
save_every=None,
dt=None,
save_only=None,
extra_cols=None,
save_all=True,
):
"""Creates a `SheetHistory` instance.
Parameters
----------
sheet : a :class:`Sheet` object which we want to record
save_every : float, set every time interval to save the sheet
dt : float, time step
save_only: dict : dictionnary with sheet.datasets as keys and list of
columns as values. Default None
extra_cols : dictionnary with sheet.datasets as keys and list of
columns as values. Default None
save_all : bool
if True, saves all the data at each time point
"""
if extra_cols is not None:
warnings.warn(
"extra_cols and save_all parameters are deprecated. Use save_only instead. "
)
extra_cols = {k: list(sheet.datasets[k].columns) for k in sheet.datasets}
if save_only is not None:
extra_cols = defaultdict(list, **extra_cols)
self.sheet = sheet
self.time = 0.0
self.index = 0
if save_every is not None:
self.save_every = save_every
self.dt = dt
else:
self.save_ever | y = None
self.datasets = {}
self.columns = {}
vcols = sheet.coords + extra_cols["vert"]
vcols = list(set(vcols))
self.vcols = _filter_columns(vcols, sheet.vert_df.columns, "vertex")
_vert_h = sheet.vert_df[self.vcols].reset_index(drop=False)
if not "time" in self.vcols:
_vert_h["time"] = 0
self.datasets["vert"] = | _vert_h
self.columns["vert"] = self.vcols
fcols = extra_cols["face"]
self.fcols = _filter_columns(fcols, sheet.face_df.columns, "face")
_face_h = sheet.face_df[self.fcols].reset_index(drop=False)
if not "time" in self.fcols:
_face_h["time"] = 0
self.datasets["face"] = _face_h
self.columns["face"] = self.fcols
if sheet.cell_df is not None:
ccols = extra_cols["cell"]
self.ccols = _filter_columns(ccols, sheet.cell_df.columns, "cell")
_cell_h = sheet.cell_df[self.ccols].reset_index(drop=False)
if not "time" in self.ccols:
_cell_h["time"] = 0
self.datasets["cell"] = _cell_h
self.columns["cell"] = self.ccols
extra_cols["edge"].append("cell")
ecols = ["srce", "trgt", "face"] + extra_cols["edge"]
ecols = list(set(ecols))
self.ecols = _filter_columns(ecols, sheet.edge_df.columns, "edge")
_edge_h = sheet.edge_df[self.ecols].reset_index(drop=False)
if not "time" in self.ecols:
_edge_h["time"] = 0
self.datasets["edge"] = _edge_h
self.columns["edge"] = self.ecols
def __len__(self):
return self.time_stamps.__len__()
def to_archive(self, hf5file):
"""Saves the history to a HDF file
This file can later be accessed again with the `HistoryHdf5.from_archive`
class method
"""
with pd.HDFStore(hf5file, "a") as store:
for key, df in self.datasets.items():
kwargs = {"data_columns": ["time"]}
if "segment" in df.columns:
kwargs["min_itemsize"] = {"segment": 7}
store.append(key=key, value=df, **kwargs)
@property
def time_stamps(self):
return self.datasets["vert"]["time"].unique()
@property
def vert_h(self):
return self.datasets["vert"]
@property
def edge_h(self):
return self.datasets["edge"]
@property
def face_h(self):
return self.datasets["face"]
@property
def cell_h(self):
return self.datasets.get("cell", None)
def record(self, time_stamp=None):
"""Appends a copy of the sheet datasets to the history instance.
Parameters
----------
time_stamp : float, save specific timestamp
"""
if time_stamp is not None:
self.time = time_stamp
else:
self.time += 1
if (self.save_every is None) or (
self.index % (int(self.save_every / self.dt)) == 0
):
for element in self.datasets:
hist = self.datasets[element]
cols = self.columns[element]
df = self.sheet.datasets[element][cols].reset_index(drop=False)
if not "time" in cols:
times = pd.Series(np.ones((df.shape[0],)) * self.time, name="time")
df = pd.concat([df, times], ignore_index=False, axis=1, sort=False)
if self.time in hist["time"]:
# erase previously recorded time point
hist = hist[hist["time"] != self.time]
hist = pd.concat([hist, df], ignore_index=True, axis=0, sort=False)
self.datasets[element] = hist
self.index += 1
def retrieve(self, time):
"""Return datasets at time `time`.
If a specific dataset was not recorded at time time, the closest record before that
time is used.
"""
if time > self.datasets["vert"]["time"].values[-1]:
warnings.warn(
"""
The time argument you requested is bigger than the maximum recorded time,
are you sure you passed the time stamp as parameter, and not an index ?
"""
)
sheet_datasets = {}
for element in self.datasets:
hist = self.datasets[element]
cols = self.columns[element]
df = _retrieve(hist, time)
df = df.set_index(element)[cols]
sheet_datasets[element] = df
return type(self.sheet)(
f"{self.sheet.identifier}_{time:04.3f}", sheet_datasets, self.sheet.specs
)
def __iter__(self):
"""Iterates over all the time points of the history"""
for t in self.time_stamps:
sheet = self.retrieve(t)
yield t, sheet
def slice(self, start=0, stop=None, size=None, endpoint=True):
"""Returns a slice of the history's time_stamps array
The slice is over or under sampled to have exactly size point
between start and stop
"""
if size is not None:
if stop is not None:
time_stamps = self.time_stamps[start : stop + int(endpoint)]
else:
time_stamps = self.time_stamps
indices = np.round(
np.linspace(0, time_stamps.size + 1, size, endpoint=True)
).astype(int)
times = time_stamps.take(indices.clip(max=time_stamps.size - 1))
elif stop is not None:
times = self.time_stamps[start : stop + int(endpoint)]
else:
times = self.time_stamps
return times
def browse(self, start=0, stop=None, size=None, endpoint=True):
"""Returns an iterator over part of the history
Parameters
----------
start: int, index of the first time point
stop: int, index of the last time point
size: int, the number of time points to return
endpoint: bool, wether to include the stop time point (default True)
Returns
|
mcgettin/ditOldProgramming | yr2/sem2/Labs/2_animal/dogObj.py | Python | mit | 66 | 0.060606 |
class | Doge(object):
def __init__( | self,)
|
rubydhash/webradius | webradius-project/webradius/util.py | Python | lgpl-2.1 | 783 | 0.007663 | # -*- coding: utf-8 -*-
from django.core import except | ions
from django.conf import settings
from django.db.models import fields
class BigAutoField(fields.AutoField):
def db_type(self, connection=None):
for item in settings.DATABASES:
if settings.DATABASES[item]['ENGINE'] == 'django.db.backends.postgresql_psycopg2':
return "bigserial"
| else:
raise NotImplemented
def get_internal_type(self):
return "BigAutoField"
def to_python(self, value):
if value is None:
return value
try:
return long(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
_("This value must be a long integer.")) |
kalgynirae/subjunctive | setup.py | Python | mit | 290 | 0 | from setuptools | import find_packages, setup
setup(
name='subjunctive',
version='0.1',
install_requires=[
'PySDL2 ==0.8.0',
],
packages=find_packages(),
scripts=[
'games/think-green/think-green.py',
'games/floorpaint/floorpaint.p | y',
],
)
|
ktan2020/legacy-automation | win/Lib/ctypes/test/test_callbacks.py | Python | mit | 8,245 | 0.001577 | import unittest
from ctypes import *
import _ctypes_test
class Callbacks(unittest.TestCase):
functype = CFUNCTYPE
## def tearDown(self):
## import gc
## gc.collect()
def callback(self, *args):
self.got_args = args
return args[-1]
def check_type(self, typ, arg):
PROTO = self.functype.im_func(typ, typ)
result = PROTO(self.callback)(arg)
if typ == c_float:
self.assertAlmostEqual(result, arg, places=5)
else:
self.assertEqual(self.got_args, (arg,))
self.assertEqual(result, arg)
PROTO = self.functype.im_func(typ, c_byte, typ)
result = PROTO(self.callback)(-3, arg)
if typ == c_float:
self.assertAlmostEqual(result, arg, places=5)
else:
self.assertEqual(self.got_args, (-3, arg))
self.assertEqual(result, arg)
################
def test_byte(self):
self.check_type(c_byte, 42)
self.check_type(c_byte, -42)
def test_ubyte(self):
self.check_type(c_ubyte, 42)
def test_short(self):
self.check_type(c_short, 42)
self.check_type(c_short, -42)
def test_ushort(self):
self.check_type(c_ushort, 42)
def test_int(self):
self.check_type(c_int, 42)
self.check_type(c_int, -42)
def test_uint(self):
self.check_type(c_uint, 42)
def test_long(self):
self.check_type(c_long, 42)
self.check_type(c_long, -42)
def test_ulong(self):
self.check_type(c_ulong, 42)
def test_longlong(self):
# test some 64-bit values, positive and negative
self.check_type(c_longlong, 5948291757245277467)
self.check_type(c_longlong, -5229388909784190580)
self.check_type(c_longlong, 42)
self.check_type(c_longlong, -42)
def test_ulonglong(self):
# test some 64-bit values, with and without msb set.
self.check_type(c_ulonglong, 10955412242170339782)
self.check_type(c_ulonglong, 3665885499841167458)
self.check_type(c_ulonglong, 42)
def test_float(self):
# only almost equal: double -> float -> double
import math
self.check_type(c_float, math.e)
self.check_type(c_float, -math.e)
def test_double(self):
self.check_type(c_double, 3.14)
self.check_type(c_double, -3.14)
def test_longdouble(self):
self.check_type(c_longdouble, 3.14)
self.check_type(c_longdouble, -3.14)
def test_char(self):
self.check_type(c_char, "x")
self.check_type(c_char, "a")
# disabled: would now (correctly) raise a RuntimeWarning about
# a memory leak. A callback function cannot return a non-integral
# C type without causing a memory leak.
## def test_char_p(self):
## self.check_type(c_char_p, "abc")
## self.check_type(c_char_p, "def")
def test_pyobject(self):
o = ()
from sys import getrefcount as grc
for o in (), [], object():
initial = grc(o)
# This call leaks a reference to 'o'...
self.check_type(py_object, o)
before = grc(o)
# ...but this call doesn't leak any more. Where is the refcount?
self.check_type(py_object, o)
after = grc(o)
self.assertEqual((after, o), (before, o))
def test_unsupported_restype_1(self):
# Only "fundamental" result types are supported for callback
# functions, the type must have a non-NULL stgdict->setfunc.
# POINTER(c_double), for example, is not supported.
prototype = self.functype.im_func(POINTER(c_double))
# The type is checked when the prototype is called
self.assertRaises(TypeError, prototype, lambda: None)
def test_unsupported_restype_2(self):
prototype = self.functype.im_func(object)
self.assertRaises(TypeError, prototype, lambda: None)
def test_issue_7959(self):
proto = self.functype.im_func(None)
class X(object):
def func(self): pass
def __init__(self):
self.v = proto(self.func)
import gc
for i in range(32):
X()
gc.collect()
live = [x for x in gc.get_objects()
if isinstance(x, X)]
self.assertEqual(len(live), 0)
try:
WINFUNCTYPE
except NameError:
pass
else:
class StdcallCallbacks(Callbacks):
functype = WINFUNCTYPE
################################################################
class S | ampleCallbacksTestCase(unittest.TestCase):
def test_integrate(self):
# Derived from some then non-working code, posted by David Foster
dll = CDLL(_ctypes_test.__file__)
# The function prototype called by 'integrate': double func(double);
CALLBACK = CFUNCTYPE(c_double, c_double)
# The integrate function itself, exposed from the _ctypes_test dll
integrate = dll.int | egrate
integrate.argtypes = (c_double, c_double, CALLBACK, c_long)
integrate.restype = c_double
def func(x):
return x**2
result = integrate(0.0, 1.0, CALLBACK(func), 10)
diff = abs(result - 1./3.)
self.assertLess(diff, 0.01, "%s not less than 0.01" % diff)
def test_issue_8959_a(self):
from ctypes.util import find_library
libc_path = find_library("c")
if not libc_path:
return # cannot test
libc = CDLL(libc_path)
@CFUNCTYPE(c_int, POINTER(c_int), POINTER(c_int))
def cmp_func(a, b):
return a[0] - b[0]
array = (c_int * 5)(5, 1, 99, 7, 33)
libc.qsort(array, len(array), sizeof(c_int), cmp_func)
self.assertEqual(array[:], [1, 5, 7, 33, 99])
try:
WINFUNCTYPE
except NameError:
pass
else:
def test_issue_8959_b(self):
from ctypes.wintypes import BOOL, HWND, LPARAM
global windowCount
windowCount = 0
@WINFUNCTYPE(BOOL, HWND, LPARAM)
def EnumWindowsCallbackFunc(hwnd, lParam):
global windowCount
windowCount += 1
return True #Allow windows to keep enumerating
windll.user32.EnumWindows(EnumWindowsCallbackFunc, 0)
def test_callback_register_int(self):
# Issue #8275: buggy handling of callback args under Win64
# NOTE: should be run on release builds as well
dll = CDLL(_ctypes_test.__file__)
CALLBACK = CFUNCTYPE(c_int, c_int, c_int, c_int, c_int, c_int)
# All this function does is call the callback with its args squared
func = dll._testfunc_cbk_reg_int
func.argtypes = (c_int, c_int, c_int, c_int, c_int, CALLBACK)
func.restype = c_int
def callback(a, b, c, d, e):
return a + b + c + d + e
result = func(2, 3, 4, 5, 6, CALLBACK(callback))
self.assertEqual(result, callback(2*2, 3*3, 4*4, 5*5, 6*6))
def test_callback_register_double(self):
# Issue #8275: buggy handling of callback args under Win64
# NOTE: should be run on release builds as well
dll = CDLL(_ctypes_test.__file__)
CALLBACK = CFUNCTYPE(c_double, c_double, c_double, c_double,
c_double, c_double)
# All this function does is call the callback with its args squared
func = dll._testfunc_cbk_reg_double
func.argtypes = (c_double, c_double, c_double,
c_double, c_double, CALLBACK)
func.restype = c_double
def callback(a, b, c, d, e):
return a + b + c + d + e
result = func(1.1, 2.2, 3.3, 4.4, 5.5, CALLBACK(callback))
self.assertEqual(result,
callback(1.1*1.1, 2.2*2.2, 3.3*3.3, 4.4*4.4, 5.5*5.5))
#################################################### |
sckasturi/saltlake | commands/stopwatch.py | Python | gpl-2.0 | 3,644 | 0.001372 | # Copyright (C) 2013-2014 Fox Wilson, Peter Foley, Srijay Kasturi, Samuel Damashek and James Forcier
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from time import time
from datetime import timedelta
from helpers.orm import Stopwatches
from helpers.command import Command
def create_sw(session):
row = Stopwatches(time=time())
session.add(row)
session.flush()
return "Created new stopwatch with ID %d" % row.id
def get_status(session, sw):
active = session.query(Stopwatches.active).get(sw).scalar()
if active is None:
return "Invalid ID!"
return "Active" if active == 1 else "Paused"
def get_elapsed(session, sw):
stopwatch = session.query(Stopwatches).get(sw)
if stopwatch is None:
return "No stopwatch exists with that ID!"
etime = stopwatch.elapsed
if stopwatch.active == 1:
etime = time() - stopwatch.time
return str(timedelta(seconds=etime))
def stop_stopwatch(session, sw):
stopwatch = session.query(Stopwatches).get(sw)
if stopwatch is None:
return "No stopwatch exists with that ID!"
if stopwatch.active == 0:
return "That stopwatch is already disabled!"
etime = stopwatch.elapsed
etime = time.time() - stopwatch.time
stopwatch.elapsed = etime
stopwatch.active = 0
return "Stopwatch stopped!"
def stopwatch_resume(session, sw):
stopwatch = session.query(Stopwatches).get(sw)
if stopwatch is None:
return "No stopwatch exists with that ID!"
if stopwatch.active == 1:
return "That stopwatch is not paused!"
stopwatch.active = 1
stopwatch.time = time()
return "Stopwatch resumed!"
def stopwatch_list(session, send, nick):
active = session.query(Stopwatches).filter(Stopwatches.active == 1).all()
pause | d = session.query(Stopwatches).filter(Stopwatches.active == 0).all()
send("%d active and %d paused stopwatches." % (len(active), len(paused)))
for x in active:
send('Active stopwatch #%d started at %d' % (x.id, x.time), target=nick)
for x in paused:
send('Paused stopwatch #%d started at %d time elapsed %d' % (x.id, x.time, x.elapsed), target=nick)
@Command(['stopwatch', 'sw'], ['db', 'nick'])
def cmd(send, msg, args):
| """Start/stops/resume/get stopwatch
Syntax: {command} <start|stop|resume|get|list>
"""
if not msg:
send("Invalid Syntax.")
return
msg = msg.split()
command = msg[0]
msg = " ".join(msg[1:])
session = args['db']
if command == "start":
send(create_sw(session))
elif command == "get":
send("%s %s" % (get_status(session, msg), get_elapsed(session, msg)))
elif command == "stop":
send("%s Stopped at %s" % (stop_stopwatch(session, msg), get_elapsed(session, msg)))
elif command == "resume":
send(stopwatch_resume(session, msg))
elif command == "list":
stopwatch_list(session, send, args['nick'])
else:
send("Invalid Syntax.")
|
xuender/test | testAdmin/itest/migrations/0002_auto__del_field_test_modify_at__del_field_test_create_by__del_field_te.py | Python | apache-2.0 | 2,475 | 0.006061 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Test.modify_at'
db.delete_column(u'itest_test', 'modify_at')
# Deleting field 'Test.create_by'
db.delete_column(u'itest_test', 'create_by_i | d')
# Deleting field 'Test.modify_by'
db.delete_column(u'itest_test', 'modify_by_id')
def backwards(self, orm):
# Adding field 'Test.modify_at'
db.add_column(u'itest_test', 'modify_at',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False)
| # Adding field 'Test.create_by'
db.add_column(u'itest_test', 'create_by',
self.gf('django.db.models.fields.related.ForeignKey')(default=1, related_name='+', to=orm['auth.User']),
keep_default=False)
# Adding field 'Test.modify_by'
db.add_column(u'itest_test', 'modify_by',
self.gf('django.db.models.fields.related.ForeignKey')(default=1, related_name='+', to=orm['auth.User']),
keep_default=False)
models = {
'itest.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'word': ('django.db.models.fields.CharField', [], {'max_length': '35'})
},
'itest.test': {
'Meta': {'object_name': 'Test'},
'content': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'create_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'tests'", 'symmetrical': 'False', 'to': "orm['itest.Tag']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '150'})
}
}
complete_apps = ['itest'] |
openstack/aodh | aodh/tests/functional/storage/test_storage_scenarios.py | Python | apache-2.0 | 22,582 | 0 | #
# Copyright 2013 Intel Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Base classes for DB backend implementation test
"""
import datetime
from unittest import mock
from oslo_utils import timeutils
from aodh import storage
from aodh.storage import models as alarm_models
from aodh.tests import constants
from aodh.tests.functional import db as tests_db
ALARM_TYPE = 'gnocchi_aggregation_by_metrics_threshold'
METRIC_IDS = ['41869681-5776-46d6-91ed-cccc43b6e4e3',
'a1fb80f4-c242-4f57-87c6-68f47521059e']
class DBTestBase(tests_db.TestBase):
@ | staticmethod
def create_side_effect(method, exception_type, test_except | ion):
def side_effect(*args, **kwargs):
if test_exception.pop():
raise exception_type
else:
return method(*args, **kwargs)
return side_effect
def setUp(self):
super(DBTestBase, self).setUp()
patcher = mock.patch.object(timeutils, 'utcnow')
self.addCleanup(patcher.stop)
self.mock_utcnow = patcher.start()
self.mock_utcnow.return_value = datetime.datetime(2015, 7, 2, 10, 39)
class AlarmTestBase(DBTestBase):
def add_some_alarms(self):
alarms = [alarm_models.Alarm(alarm_id='r3d',
enabled=True,
type=ALARM_TYPE,
name='red-alert',
description='my red-alert',
timestamp=datetime.datetime(2015, 7,
2, 10, 25),
user_id='me',
project_id='and-da-boys',
state="insufficient data",
state_reason="insufficient data",
state_timestamp=constants.MIN_DATETIME,
ok_actions=[],
alarm_actions=['http://nowhere/alarms'],
insufficient_data_actions=[],
repeat_actions=False,
time_constraints=[dict(name='testcons',
start='0 11 * * *',
duration=300)],
rule=dict(comparison_operator='eq',
threshold=36,
aggregation_method='count',
evaluation_periods=1,
granularity=60,
metrics=METRIC_IDS),
severity='low'
),
alarm_models.Alarm(alarm_id='0r4ng3',
enabled=True,
type=ALARM_TYPE,
name='orange-alert',
description='a orange',
timestamp=datetime.datetime(2015, 7,
2, 10, 40),
user_id='me',
project_id='and-da-boys',
state="insufficient data",
state_reason="insufficient data",
state_timestamp=constants.MIN_DATETIME,
ok_actions=[],
alarm_actions=['http://nowhere/alarms'],
insufficient_data_actions=[],
repeat_actions=False,
time_constraints=[],
rule=dict(comparison_operator='gt',
threshold=75,
aggregation_method='avg',
evaluation_periods=1,
granularity=60,
metrics=METRIC_IDS),
severity='low'
),
alarm_models.Alarm(alarm_id='y3ll0w',
enabled=False,
type=ALARM_TYPE,
name='yellow-alert',
description='yellow',
timestamp=datetime.datetime(2015, 7,
2, 10, 10),
user_id='me',
project_id='and-da-boys',
state="insufficient data",
state_reason="insufficient data",
state_timestamp=constants.MIN_DATETIME,
ok_actions=[],
alarm_actions=['http://nowhere/alarms'],
insufficient_data_actions=[],
repeat_actions=False,
time_constraints=[],
rule=dict(comparison_operator='lt',
threshold=10,
aggregation_method='min',
evaluation_periods=1,
granularity=60,
metrics=METRIC_IDS),
severity='low'
)]
for a in alarms:
self.alarm_conn.create_alarm(a)
class AlarmTest(AlarmTestBase):
def test_empty(self):
alarms = list(self.alarm_conn.get_alarms())
self.assertEqual([], alarms)
def test_list(self):
self.add_some_alarms()
alarms = list(self.alarm_conn.get_alarms())
self.assertEqual(3, len(alarms))
def test_list_ordered_by_timestamp(self):
self.add_some_alarms()
alarms = list(self.alarm_conn.get_alarms())
self.assertEqual(len(alarms), 3)
alarm_l = [a.timestamp for a in alarms]
alarm_l_ordered = [datetime.datetime(2015, 7, 2, 10, 40),
datetime.datetime(2015, 7, 2, 10, 25),
datetime.datetime(2015, 7, 2, 10, 10)]
self.assertEqual(alarm_l_ordered, alarm_l)
def test_list_enabled(self):
self.add_some_alarms()
alarms = list(self.alarm_conn.get_alarms(enabled=True))
self.assertEqual(2, len(alarms))
def test_list_disabled(self):
self.add_some_alarms()
alarms = list(self.alarm_conn.get_alarms(enabled=False))
self.assertEqual(1, len(alarms))
def test_list_by_type(self):
self.add_some_alarms()
alarms = list(self.alarm_conn.get_alarms(type=ALARM_TYPE))
self.assertEqual(3, len(alarms))
def test_list_excluded_by_name(self):
self.add_some_alarms()
alarms = list(self.alarm_conn.get_alarms(name={'ne': 'yellow-alert'}))
self.assertEqual(2, len(alarms))
alarm_names = sorted([a.name fo |
aikramer2/spaCy | spacy/lang/fr/tokenizer_exceptions.py | Python | mit | 5,710 | 0.003355 | # coding: utf8
from __future__ import unicode_literals
import regex as re
from ._tokenizer_exceptions_list import FR_BASE_EXCEPTIONS
from .punctuation import ELISION, HYPHENS
from ..tokenizer_exceptions import URL_PATTERN
from ..char_classes import ALPHA_LOWER
from ...symbols import ORTH, LEMMA, TAG, NORM, PRON_LEMMA
def upper_first_letter(text):
if len(text) == 0:
return text
if len(text) == 1:
return text.upper()
return text[0].upper() + text[1:]
def lower_first_letter(text):
if len(text) == 0:
return text
if len(text) == 1:
return text.lower()
return text[0].lower() + text[1:]
_exc = {
"J.-C.": [
{LEMMA: "Jésus", ORTH: "J."},
{LEMMA: "Christ", ORTH: "-C."}]
}
for exc_data in [
{LEMMA: "avant", ORTH: "av."},
{LEMMA: "janvier", ORTH: "janv."},
{LEMMA: "février", ORTH: "févr."},
{LEMMA: "avril", ORTH: "avr."},
{LEMMA: "juillet", ORTH: "juill."},
{LEMMA: "septembre", ORTH: "sept."},
{LEMMA: "octobre", ORTH: "oct."},
{LEMMA: "novembre", ORTH: "nov."},
{LEMMA: "décembre", ORTH: "déc."},
{LEMMA: "après", ORTH: "apr."},
{LEMMA: "docteur", ORTH: "Dr."},
{LEMMA: "monsieur", ORTH: "M."},
{LEMMA: "monsieur", ORTH: "Mr."},
{LEMMA: "madame", ORTH: "Mme."},
{LEMMA: "mademoiselle", ORTH: "Mlle."},
{LEMMA: "numéro", ORTH: "n°"},
{LEMMA: "degrés", ORTH: "d°"},
{LEMMA: "saint", ORTH: "St."},
{LEMMA: "sainte", ORTH: "Ste."}]:
_exc[exc_data[ORTH]] = [exc_data]
for orth in FR_BASE_EXCEPTIONS + ["etc."]:
_exc[orth] = [{ORTH: orth}]
for verb, verb_lemma in [
("a", "avoir"),
("est", "être"),
("semble", "sembler"),
("indique", "indiquer"),
("moque", "moquer"),
("passe", "passer")]:
for orth in [verb, verb.title()]:
for pronoun in ["elle", "il", "on"]:
token = "{}-t-{}".format(orth, pronoun)
_exc[token] = [
{LEMMA: verb_lemma, ORTH: orth, TAG: "VERB"},
{LEMMA: "t", ORTH: "-t"},
{LEMMA: pronoun, ORTH: "-" + | pronoun}]
for verb, verb_lemma in [
("est","être")]:
for orth in [verb, verb.title()]:
token = "{}-ce".format(orth)
_exc[token] = [
{LEMMA: verb_lemma, ORTH: orth, TAG: "VERB"},
{LEMMA: 'ce', ORTH: '-ce'}]
for pre, pre_lemma in [
("qu'", "que"),
("n'", "ne")]:
for orth in [pre,pre.title()]:
_exc['%sest-ce' % orth] = [
{LEMMA: pre_lemma, ORTH: orth, TAG: "ADV | "},
{LEMMA: 'être', ORTH: "est", TAG: "VERB"},
{LEMMA: 'ce', ORTH: '-ce'}]
_infixes_exc = []
for elision_char in ELISION:
for hyphen_char in ['-', '‐']:
_infixes_exc += [infix.replace("'", elision_char).replace('-', hyphen_char)
for infix in FR_BASE_EXCEPTIONS]
_infixes_exc += [upper_first_letter(word) for word in _infixes_exc]
_infixes_exc = list(set(_infixes_exc))
for orth in _infixes_exc:
_exc[orth] = [{ORTH: orth}]
_hyphen_prefix = [
'a[ée]ro', 'abat', 'a[fg]ro', 'after', 'am[ée]ricano', 'anglo', 'anti',
'apr[èe]s', 'arabo', 'arcs?', 'archi', 'arrières?', 'avant', 'auto',
'banc', 'bas(?:ses?)?', 'bec?', 'best', 'bio?', 'bien', 'blanc', 'bo[îi]te',
'bois', 'bou(?:c|rg)', 'b[êe]ta', 'cache', 'cap(?:ello)?', 'champ',
'chapelle', 'ch[âa]teau', 'cha(?:ud|t)e?s?', 'chou', 'chromo', 'claire?s?',
'co(?:de|ca)?', 'compte', 'contre', 'cordon', 'coupe?', 'court', 'crash',
'crise', 'croche', 'cross', 'cyber', 'côte', 'demi', 'di(?:sney)?',
'd[ée]s?', 'double', 'dys', 'entre', 'est', 'ethno', 'extra', 'extrême',
'[ée]co', 'fil', 'fort', 'franco?s?', 'gallo', 'gardes?', 'gastro',
'grande?', 'gratte', 'gr[ée]co', 'gros', 'g[ée]o', 'haute?s?', 'hyper',
'indo', 'infra', 'inter', 'intra', 'islamo', 'italo', 'jean', 'labio',
'latino', 'live', 'lot', 'louis', 'm[ai]cro', 'mesnil', 'mi(?:ni)?', 'mono',
'mont?s?', 'moyen', 'multi', 'm[ée]cano', 'm[ée]dico', 'm[ée]do', 'm[ée]ta',
'mots?', 'noix', 'non', 'nord', 'notre', 'n[ée]o', 'ouest', 'outre', 'ouvre',
'passe', 'perce', 'pharmaco', 'ph[oy]to', 'pique', 'poissons?', 'ponce',
'pont', 'po[rs]t', 'primo', 'pro(?:cès|to)?', 'pare', 'petite?', 'porte',
'pré', 'prêchi', 'pseudo', 'pêle', 'péri', 'puy', 'quasi', 'recourt',
'rythmo', 'r[ée]', 'r[ée]tro', 'sans', 'sainte?s?', 'semi', 'social',
'sous', 'su[bdr]', 'super', 'tire', 'thermo', 'tiers', 'trans',
'tr(?:i|ou)', 't[ée]l[ée]', 'vi[cd]e', 'vid[ée]o', 'vie(?:ux|illes?)',
'vill(?:e|eneuve|ers|ette|iers|y)', 'ultra', 'à', '[ée]lectro', '[ée]qui']
_elision_prefix = ['entr', 'grande?s?']
_other_hyphens = ''.join([h for h in HYPHENS if h != '-'])
_regular_exp = [
'^droits?[{hyphen}]de[{hyphen}]l\'homm[{alpha}]+$'.format(hyphen=HYPHENS, alpha=ALPHA_LOWER),
'^zig[{hyphen}]zag[{alpha}]*$'.format(hyphen=HYPHENS, alpha=ALPHA_LOWER),
'^prud[{elision}]homm[{alpha}]*$'.format(elision=ELISION, alpha=ALPHA_LOWER)]
_regular_exp += ["^{prefix}[{hyphen}][{alpha}][{alpha}{elision}{other_hyphen}\-]*$".format(
prefix=p, hyphen=HYPHENS, other_hyphen=_other_hyphens,
elision=ELISION, alpha=ALPHA_LOWER)
for p in _hyphen_prefix]
_regular_exp += ["^{prefix}[{elision}][{alpha}][{alpha}{elision}{hyphen}\-]*$".format(
prefix=p, elision=HYPHENS, hyphen=_other_hyphens, alpha=ALPHA_LOWER)
for p in _elision_prefix]
_regular_exp.append(URL_PATTERN)
TOKENIZER_EXCEPTIONS = _exc
TOKEN_MATCH = re.compile('|'.join('(?:{})'.format(m) for m in _regular_exp), re.IGNORECASE).match
|
LernaProject/Lerna | achievements/models.py | Python | gpl-2.0 | 3,400 | 0.009412 | import collections
from django.db import models as md
from django.db.models import Q
from users.models import User
from core.models import Contest
from core.models import Problem
from core.models import Attempt
|
AchievementStatus = collections.namedtuple('AchievementStatus', 'achievement unlocked earned_at progress_percent')
class Achievement(md.Model):
name = md.CharField(max_length=255)
description = md.TextField(null=True, blank=True, default=None)
points = md.PositiveIntegerField()
icon_path = m | d.CharField(max_length=255, null=True, blank=True, default=None)
created_at = md.DateTimeField(auto_now_add=True)
updated_at = md.DateTimeField(auto_now=True)
# filters:
amount = md.IntegerField(null=True, blank=True, default=None)
problem = md.ForeignKey(Problem, null=True, blank=True, default=None)
contest = md.ForeignKey(Contest, null=True, blank=True, default=None)
author = md.CharField(max_length=255, null=True, blank=True, default=None)
origin = md.CharField(max_length=255, null=True, blank=True, default=None)
language = md.CharField(max_length=255, null=True, blank=True, default=None)
class Meta:
db_table = 'achievements'
get_latest_by = 'created_at'
def status(self, user):
try:
info = UserAchievement.objects.get(user=user, achievement=self)
except UserAchievement.DoesNotExist:
info = None
if info is not None:
return AchievementStatus(self, True, info.earned_at, 100)
query = Q(user=user) & Q(problem_in_contest__contest__is_admin=False) & (
Q(result='Accepted') | (Q(result='Tested') & Q(score__gt=99.99)))
if self.problem:
query = query & Q(problem_in_contest__problem=self.problem)
if self.contest:
query = query & Q(problem_in_contest__contest=self.contest)
if self.author:
query = query & (Q(problem_in_contest__problem__author__contains=self.author) | Q(problem_in_contest__problem__developer__contains=self.author))
if self.origin:
query = query & Q(problem__origin=self.origin)
if self.language:
query = query & Q(compiler__highlighter=self.language)
attempts = Attempt.objects.filter(query).order_by('problem_in_contest__problem', 'time').distinct('problem_in_contest__problem')
attempts_amount = len(attempts)
if attempts_amount >= self.amount:
attempts = sorted(attempts, key=lambda x: x.time)
earned_at = attempts[self.amount - 1].time
UserAchievement.objects.create(
user=user,
achievement=self,
earned_at=earned_at
)
return AchievementStatus(self, True, earned_at, 100)
return AchievementStatus(self, False, None, 100 * attempts_amount / self.amount)
def __str__(self):
return self.name
class UserAchievement(md.Model):
user = md.ForeignKey(User, md.CASCADE)
achievement = md.ForeignKey(Achievement, md.CASCADE)
earned_at = md.DateTimeField(blank=True, null=True)
created_at = md.DateTimeField(auto_now_add=True)
updated_at = md.DateTimeField(auto_now=True)
class Meta:
db_table = 'user_achievements'
get_latest_by = 'created_at'
|
joakim-hove/ert | tests/ert_tests/shared/test_log_abort.py | Python | gpl-3.0 | 626 | 0.001597 | import logging
from unittest.mock import MagicMock, call
from ert_logging._log_util_abort import _log_util_abort
def test_log_util_abort(caplog, monkeypatch):
shutdown_mock = MagicMock()
monkeypatch.setattr(logging, "shutdown", shutdown_mock)
with caplog.at_level(logging.ERROR):
_log_util_abort("fname", 1, "some_func", "err_message", "my_backtrace")
assert (
"C trace:\nmy_bac | ktrace \nwith message: er | r_message \nfrom file: "
"fname in some_func at line: 1\n\nPython backtrace:"
) in caplog.text
shutdown_mock.assert_called_once_with() # must shutdown to propagate message
|
HKV-products-services/hkvfewspy | hkvfewspy/io/rest_fewspi.py | Python | bsd-3-clause | 21,087 | 0.001518 | import json
import requests
import types
import pandas as pd
from datetime import datetime, date
import gzip
import io
from ..utils.simplenamespace import *
from ..utils.query_helper import query
from ..utils.pi_helper import *
import collections
class PiRest(object):
"""create Pi object that can interact with REST fewspi service"""
def __init__(self):
""" """
self.documentVersion = "1.25"
self.documentFormat = "PI_JSON"
self.showAttributes = True
class utils(object):
@staticmethod
def addFilter(self, child):
setattr(
self.Filters,
child["id"].replace(".", "_"),
{
"id": child["id"],
"name": child.name.cdata,
"description": child.description.cdata,
},
)
@staticmethod
def event_client_datetime(event, tz_server, tz_client="Europe/Amsterdam"):
"""
Get datetime object in client time of an XML Element named event with attributes date and time
input:
event : XML Element named event [eg: obj.TimeSeries.series.event[0]]
tz_server : dat | etime abbreviation of the server timezone [eg: 'Etc/GMT']
tz_client : datetime abbreviation of the client timezone [eg: 'Europe/Amsterdam']
return
event_client_time : an dateti | me object of the event in client timezome
"""
# convert XML element date string to integer list
event_server_date = list(
map(int, event["date"].split("-"))
) # -> [yyyy, MM, dd]
event_server_time = list(
map(int, event["time"].split(":"))
) # -> [HH, mm, ss]
# define server time
server_time = datetime(
event_server_date[0],
event_server_date[1],
event_server_date[2],
event_server_time[0],
event_server_time[1],
event_server_time[2],
tzinfo=pytz.timezone(tz_server),
)
client_timezone = pytz.timezone(tz_client)
# returns datetime in the new timezone
event_client_time = server_time.astimezone(client_timezone)
return event_client_time
@staticmethod
def gzip_str(string_):
"""
write string to gzip compressed bytes object
"""
out = io.BytesIO()
with gzip.GzipFile(fileobj=out, mode="w") as fo:
fo.write(string_.encode())
bytes_obj = out.getvalue()
return bytes_obj
@staticmethod
def gunzip_bytes_obj(bytes_obj):
"""
read string from gzip compressed bytes object
"""
in_ = io.BytesIO()
in_.write(bytes_obj)
in_.seek(0)
with gzip.GzipFile(fileobj=in_, mode="rb") as fo:
gunzipped_bytes_obj = fo.read()
return gunzipped_bytes_obj.decode()
def setUrl(self, url):
self.url = url
def setQueryParameters(self, prefill_defaults=True, protocol="soap"):
return query(prefill_defaults, protocol)
def getTimeZoneId(self):
"""
get the servers TimeZoneId
all the results of get*** functions are also written back in the class object without 'get'
(eg result of Pi.getTimeZoneId() is stored in Pi.TimeZoneId)
"""
url = "{}timezoneid".format(self.url)
response = requests.get(url)
setattr(self, "TimeZoneId", response.text)
return response.text
def _addFilter(self, filter):
"""
Add a filter to the collection
"""
setattr(
self.Filters,
filter["id"].replace(".", "_"),
{"id": filter["id"], "name": filter["name"]},
)
def getFilters(self):
"""
get the filters known at the Pi service, nested filters will be 'unnested'
example : https://db.dmhoutribdijk.nl/FewsWebServices/rest/fewspiservice/v1/filters?documentFormat=PI_XML&documentVersion=1.25
https://db.dmhoutribdijk.nl/FewsWebServices/rest/fewspiservice/v1/filters?documentFormat=PI_XML&documentVersion=1.25
"""
self.Filters = types.SimpleNamespace()
url = "{}filters".format(self.url)
params = dict(
documentVersion=self.documentVersion, documentFormat=self.documentFormat
)
response = requests.get(url, params=params)
json_data = json.loads(response.text)
for piFilter in json_data.get("filters"):
keys = list(piFilter.keys())
if "child" in keys:
for child in piFilter["child"]:
keys = list(child.keys())
if "child" in keys:
keys = list(child.keys())
for child in child["child"]:
keys = list(child.keys())
if "child" in keys:
for child in child["child"]:
self._addFilter(child)
self._addFilter(child)
self._addFilter(child)
self._addFilter(piFilter)
return pd.DataFrame(self.Filters.__dict__)
def runTask(
self,
startTime,
endTime,
workflowId,
userId=None,
coldStateId=None,
scenarioId=None,
piParametersXml=None,
timeZero=None,
clientId=None,
piVersion="1.22",
description=None,
):
"""
run a workflow known at the Pi service
Parameters
----------
clientId: str
workflowId: str
startTime: datetime
timeZero: str,
endTime: datetime,
coldStateId: str,
scenarioId: str,
coldstateId: str,
piParametersXml: xml object
userId: str
description: str
useColdState: boolean
piVersion: str
described the version of XML that is returned from the Pi service
(defaults to 1.22 as current version only can read version 1.22)
piXmlContent: xml object
"""
# set new empty attribute in object for task
self.Task = types.SimpleNamespace()
url = "{}runtask".format(self.url)
if type(startTime) == date:
startTime = datetime.combine(startTime, datetime.min.time())
if startTime is not None:
try:
startTime = startTime.isoformat(sep="T", timespec="auto") + "Z"
except TypeError as e:
print(f"stateTime is not date or datetime type: {e}")
if type(endTime) == date:
endTime = datetime.combine(endTime, datetime.min.time())
if endTime is not None:
try:
endTime = endTime.isoformat(sep="T", timespec="auto") + "Z"
except TypeError as e:
print(f"endTime is not date or datetime type: {e}")
if type(timeZero) == date:
timeZero = datetime.combine(timeZero, datetime.min.time())
if timeZero is not None:
try:
timeZero = timeZero.isoformat(sep="T", timespec="auto") + "Z"
except TypeError as e:
print(f"timeZero is not date or datetime type: {e}")
params = dict(
workflowId=workflowId,
startTime=startTime,
timeZero=timeZero,
endTime=endTime,
coldStateId=coldStateId,
scenarioId=scenarioId,
userId=userId,
description=description,
)
headers = {"Content-type": "application/x-www-form-urlencoded"}
data = "piModelParametersXmlContent={}".format(piParametersXml)
# post task
postRunTask_response = requests.post(
url, data=data, params=params, headers=headers
)
if postRunTask_response.status_cod |
digitalocean/netbox | netbox/extras/forms.py | Python | apache-2.0 | 11,110 | 0.00108 | from django import forms
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.utils.safestring import mark_safe
from dcim.models import DeviceRole, Platform, Region, Site
from tenancy.models import Tenant, TenantGroup
from utilities.forms import (
add_blank_choice, APISelectMultiple, BootstrapMixin, BulkEditForm, BulkEditNullBooleanSelect, ColorSelect,
ContentTypeSelect, CSVModelForm, DateTimePicker, DynamicModelMultipleChoiceField, JSONField, SlugField,
StaticSelect2, BOOLEAN_WITH_BLANK_CHOICES,
)
from virtualization.models import Cluster, ClusterGroup
from .choices import *
from .models import ConfigContext, CustomField, ImageAttachment, ObjectChange, Tag
#
# Custom fields
#
class CustomFieldModelForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.obj_type = ContentType.objects.get_for_model(self._meta.model)
self.custom_fields = []
super().__init__(*args, **kwargs)
self._append_customfield_fields()
def _append_customfield_fields(self):
"""
Append form fields for all CustomFields assigned to this model.
"""
# Append form fields; assign initial values if modifying and existing object
for cf in CustomField.objects.filter(content_types=self.obj_type):
field_name = 'cf_{}'.format(cf.name)
if self.instance.pk:
self.fields[field_name] = cf.to_form_field(set_initial=False)
self.fields[field_name].initial = self.instance.custom_field_data.get(cf.name)
else:
self.fields[field_name] = cf.to_form_field()
# Annotate the field in the list of CustomField form fields
self.custom_fields.append(field_name)
def clean(self):
# Save custom field data on instance
for cf_name in self.custom_fields:
self.instance.custom_field_data[cf_name[3:]] = self.cleaned_data.get(cf_name)
return super().clean()
class CustomFieldModelCSVForm(CSVModelForm, CustomFieldModelForm):
def _append_customfield_fields(self):
# Append form fields
for cf in CustomField.objects.filter(content_types=self.obj_type):
field_name = 'cf_{}'.format(cf.name)
self.fields[field_name] = cf.to_form_field(for_csv_import=True)
# Annotate the field in the list of CustomField form fields
self.custom_fields.append(field_name)
class CustomFieldBulkEditForm(BulkEditForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.custom_fields = []
self.obj_type = ContentType.objects.get_for_model(self.model)
# Add all applicable CustomFields to the form
custom_fields = CustomField.objects.filter(content_types=self.obj_type)
for cf in custom_fields:
# Annotate non-required custom fields as nullable
if not cf.required:
self.nullable_fields.append(cf.name)
self.fields[cf.name] = cf.to_form_field(set_initial=False, enforce_required=False)
# Annotate this as a custom field
self.custom_fields.append(cf.name)
class CustomFieldFilterForm(forms.Form):
def __init__(self, *args, **kwargs):
self.obj_type = ContentType.objects.get_for_model(self.model)
super().__init__(*args, **kwargs)
# Add all applicable CustomFields to the form
custom_fields = CustomField.objects.filter(content_types=self.obj_type).exclude(
filter_logic=CustomFieldFilterLogicChoices.FILTER_DISABLED
)
for cf in custom_fields:
field_name = 'cf_{}'.format(cf.name)
self.fields[field_name] = cf.to_form_field(set_initial=True, enforce_required=False)
#
# Tags
#
class TagForm(BootstrapMixin, forms.ModelForm):
slug = SlugField()
class Meta:
model = Tag
fields = [
'name', 'slug', 'color', 'description'
]
class TagCSVForm(CSVModelForm):
slug = SlugField()
class Meta:
model = Tag
fields = Tag.csv_headers
help_texts = {
'color': mark_safe('RGB color in hexadecimal (e.g. <code>00ff00</code>)'),
}
class AddRemoveTagsForm(forms.Form):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Add add/remove tags fields
self.fields[ | 'add_tags'] = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
self.fields['remove_tags'] = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class TagFil | terForm(BootstrapMixin, forms.Form):
model = Tag
q = forms.CharField(
required=False,
label='Search'
)
class TagBulkEditForm(BootstrapMixin, BulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=Tag.objects.all(),
widget=forms.MultipleHiddenInput
)
color = forms.CharField(
max_length=6,
required=False,
widget=ColorSelect()
)
description = forms.CharField(
max_length=200,
required=False
)
class Meta:
nullable_fields = ['description']
#
# Config contexts
#
class ConfigContextForm(BootstrapMixin, forms.ModelForm):
regions = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
required=False
)
sites = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
required=False
)
roles = DynamicModelMultipleChoiceField(
queryset=DeviceRole.objects.all(),
required=False
)
platforms = DynamicModelMultipleChoiceField(
queryset=Platform.objects.all(),
required=False
)
cluster_groups = DynamicModelMultipleChoiceField(
queryset=ClusterGroup.objects.all(),
required=False
)
clusters = DynamicModelMultipleChoiceField(
queryset=Cluster.objects.all(),
required=False
)
tenant_groups = DynamicModelMultipleChoiceField(
queryset=TenantGroup.objects.all(),
required=False
)
tenants = DynamicModelMultipleChoiceField(
queryset=Tenant.objects.all(),
required=False
)
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
data = JSONField(
label=''
)
class Meta:
model = ConfigContext
fields = (
'name', 'weight', 'description', 'is_active', 'regions', 'sites', 'roles', 'platforms', 'cluster_groups',
'clusters', 'tenant_groups', 'tenants', 'tags', 'data',
)
class ConfigContextBulkEditForm(BootstrapMixin, BulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=ConfigContext.objects.all(),
widget=forms.MultipleHiddenInput
)
weight = forms.IntegerField(
required=False,
min_value=0
)
is_active = forms.NullBooleanField(
required=False,
widget=BulkEditNullBooleanSelect()
)
description = forms.CharField(
required=False,
max_length=100
)
class Meta:
nullable_fields = [
'description',
]
class ConfigContextFilterForm(BootstrapMixin, forms.Form):
q = forms.CharField(
required=False,
label='Search'
)
region = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
to_field_name='slug',
required=False
)
site = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
to_field_name='slug',
required=False
)
role = DynamicModelMultipleChoiceField(
queryset=DeviceRole.objects.all(),
to_field_name='slug',
required=False
)
platform = DynamicModelMultipleChoiceField(
queryset=Platform.objects.all(),
to_field_name='slug',
required=False
)
cluster_group = DynamicModelMultipleChoiceField(
queryset=ClusterGroup.objects.all(),
to_field_name='slug',
|
sbuss/pydockerfile | tests/fixtures.py | Python | mit | 884 | 0 | import pytest
from pydockerfile.parser import DockerfileParser
DOCKERFILE_PATH = 'tests/dockerfiles/basic.Dockerfile'
DOCKERFILE_LINE_CONT_PATH = 'tests/dockerfiles/line-cont.Dockerfile'
DOCKERFILE_WITH_COMMENTS_PATH = 'tes | ts/dockerfiles/with-comments.Dockerfile'
@pytest.fixture
def dockerfile_str():
with open(DOCKERFILE_PATH) as f:
return f.read()
@pytest.fixture
def dockerfile_line_cont_str():
with open(DOCKERFILE_LINE_CONT_PATH) as f:
return f.read()
@pytest.fixture
def dockerfile_with_comments_str():
with open(DOCKERFILE_WITH_COMMENTS_PATH) as f:
return f.read()
@pytest.fixture
def basic_dockerfile():
parser = DockerfileParser()
| return parser.from_string(dockerfile_str())
@pytest.fixture
def dockerfile_with_comments():
parser = DockerfileParser()
return parser.from_string(dockerfile_with_comments_str())
|
endlessm/chromium-browser | third_party/grpc/src/src/python/grpcio_tests/tests/unit/test_common.py | Python | bsd-3-clause | 4,042 | 0.000247 | # Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS | IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common code used throughout tests of gRPC."""
import collections
import threading
from concurrent import futures
import grpc
import | six
INVOCATION_INITIAL_METADATA = (
('0', 'abc'),
('1', 'def'),
('2', 'ghi'),
)
SERVICE_INITIAL_METADATA = (
('3', 'jkl'),
('4', 'mno'),
('5', 'pqr'),
)
SERVICE_TERMINAL_METADATA = (
('6', 'stu'),
('7', 'vwx'),
('8', 'yza'),
)
DETAILS = 'test details'
def metadata_transmitted(original_metadata, transmitted_metadata):
"""Judges whether or not metadata was acceptably transmitted.
gRPC is allowed to insert key-value pairs into the metadata values given by
applications and to reorder key-value pairs with different keys but it is not
allowed to alter existing key-value pairs or to reorder key-value pairs with
the same key.
Args:
original_metadata: A metadata value used in a test of gRPC. An iterable over
iterables of length 2.
transmitted_metadata: A metadata value corresponding to original_metadata
after having been transmitted via gRPC. An iterable over iterables of
length 2.
Returns:
A boolean indicating whether transmitted_metadata accurately reflects
original_metadata after having been transmitted via gRPC.
"""
original = collections.defaultdict(list)
for key, value in original_metadata:
original[key].append(value)
transmitted = collections.defaultdict(list)
for key, value in transmitted_metadata:
transmitted[key].append(value)
for key, values in six.iteritems(original):
transmitted_values = transmitted[key]
transmitted_iterator = iter(transmitted_values)
try:
for value in values:
while True:
transmitted_value = next(transmitted_iterator)
if value == transmitted_value:
break
except StopIteration:
return False
else:
return True
def test_secure_channel(target, channel_credentials, server_host_override):
"""Creates an insecure Channel to a remote host.
Args:
host: The name of the remote host to which to connect.
port: The port of the remote host to which to connect.
channel_credentials: The implementations.ChannelCredentials with which to
connect.
server_host_override: The target name used for SSL host name checking.
Returns:
An implementations.Channel to the remote host through which RPCs may be
conducted.
"""
channel = grpc.secure_channel(target, channel_credentials, ((
'grpc.ssl_target_name_override',
server_host_override,
),))
return channel
def test_server(max_workers=10):
"""Creates an insecure grpc server.
These servers have SO_REUSEPORT disabled to prevent cross-talk.
"""
return grpc.server(
futures.ThreadPoolExecutor(max_workers=max_workers),
options=(('grpc.so_reuseport', 0),))
class WaitGroup(object):
def __init__(self, n=0):
self.count = n
self.cv = threading.Condition()
def add(self, n):
self.cv.acquire()
self.count += n
self.cv.release()
def done(self):
self.cv.acquire()
self.count -= 1
if self.count == 0:
self.cv.notify_all()
self.cv.release()
def wait(self):
self.cv.acquire()
while self.count > 0:
self.cv.wait()
self.cv.release()
|
anbangr/trusted-juju | juju/charm/tests/test_url.py | Python | agpl-3.0 | 5,620 | 0.000178 | from juju.charm.errors import CharmURLError
from juju.charm.url import CharmCollection, CharmURL
from juju.lib.testing import TestCase
class CharmCollectionTest(TestCase):
def test_str(self):
self.assertEquals(
str(CharmCollection("foo", "bar", "baz")), "foo:~bar/baz")
self.assertEquals(
str(CharmCollection("ping", None, "pong")), "ping:pong")
class CharmURLTest(TestCase):
def assert_url(self, url, schema, user, series, name, rev):
self.assertEquals(url.collection.schema, schema)
self.assertEquals(url.collection.user, user)
self.assertEquals(url.collection.series, series)
self.assertEquals(url.name, name)
self.assertEquals(url.revision, rev)
def assert_error(self, err, url_str, message):
self.assertEquals(
str(err), "Bad charm URL %r: %s" % (url_str, message))
def assert_parse(self, string, schema, user, series, name, rev):
url = CharmURL.parse(string)
self.assert_url(url, schema, user, series, name, rev)
self.assertEquals(str(url), string)
self.assertEquals(url.path, string.split(":", 1)[1])
def test_parse(self):
self.assert_parse(
"cs:~user/series/name", "cs", "user", "series", "name", None)
self.assert_parse(
"cs:~user/series/name-0", "cs", "user", "series", "name", 0)
self.assert_parse(
"cs:series/name", "cs", None, "series", "name", None)
self.assert_parse(
"cs:series/name-0", "cs", None, "series", "name", 0)
self.assert_parse(
"cs:series/name0", "cs", None, "series", "name0", None)
self.assert_parse(
"cs:series/n0-0n-n0", "cs", None, "series", "n0-0n-n0", None)
self.assert_parse(
"local:series/name", "local", None, "series", "name", None)
self.assert_parse(
"local:series/name-0", "local", None, "series", "name", 0)
def assert_cannot_parse(self, string, message):
err = self.assertRaises(CharmURLError, CharmURL.parse, string)
self.assert_error(err, string, message)
def test_cannot_parse(self):
self.assert_cannot_parse(
None, "not a string type")
self.assert_cannot_parse(
"series/name-1", "no schema specified")
self.assert_cannot_parse(
"bs:~user/series/name-1", "invalid schema")
self.assert_cannot_parse(
"cs:~1/series/name-1", "invalid user")
self.assert_cannot_parse(
"cs:~user/1/name-1", "invalid series")
self.assert_cannot_parse(
"cs:~user/series/name-1-2", "invalid name")
self.assert_cannot_parse(
"cs:~user/series/name-1-n-2", "invalid name")
self.assert_cannot_parse(
"cs:~user/series/name--a-2", "invalid name")
self.assert_cannot_parse(
"cs:~user/series/huh/name-1", "invalid form")
self.assert_cannot_parse(
"cs:~user/name", "no series specified")
self.assert_cannot_parse(
"cs:name", "invalid form")
self.assert_cannot_parse(
"local:~user/series/name", "users not allowed in local URLs")
self.assert_cannot_parse(
"local:~user/name", "users not allowed in local URLs")
self.assert_cannot_parse(
"local:name", "invalid form")
def test_revision(self):
url1 = CharmURL.parse("cs:foo/bar")
error = self.assertRaises(CharmURLError, url1.assert_revision)
self.assertEquals(
str(error), "Bad charm URL 'cs:foo/bar': expected a revision")
url2 = url1.with_revision(0)
url1.collection.schema = "local" # change url1, verify deep copied
url2.assert_revision()
self.assertEquals(str(url2), "cs:foo/bar-0")
url3 = url2.with_revision(999)
url3.assert_revision()
self.assertEquals(str(url3), "cs:foo/bar-999")
def assert_infer(self, string, schema, user, series, name, rev):
url = CharmURL.infer(string, "default")
self.assert_url(url, schema, user, series, name, rev)
def test_infer(self):
self.assert_infer(
"name", "cs", None, "default", "name", None)
self.assert_infer(
"name-0", "cs", None, "default", "name", 0)
self.assert_infer(
"series/name", "cs", None, "series", "name", None)
self.assert_infer(
"series/name-0", "cs", None, "series", "name", 0)
self.assert_infer(
"cs:name", "cs", None, "default", "name", None)
self.assert_infer(
"cs:name-0", "cs", None, "default", "name", 0)
self.assert_infer(
"cs:~user/name", "cs", "user", "default", "name", None)
self.assert_infer(
"cs:~user/name-0", "cs", "user", "default", "name", 0)
self.assert_infer(
"local:name", "local", None, "default", "name", None)
self.assert_infer(
"local:name-0", "local", None, "default", "name", 0)
def test_cannot_infer(self):
err = self.assertRaises(
CharmURLError, CharmURL.infer, "name", "invalid!series")
self.assertEquals(
str(err),
"Bad charm URL 'cs:invalid!se | ries/name': invalid series (URL "
"inferred from 'name')")
err = self.assertRaises(
CharmURLError, CharmURL.infer, "~user/name", "default")
self.assertEquals(
str(err),
"Bad charm URL '~user/name': a URL with a user must specify a "
| "schema")
|
dpaiton/OpenPV | pv-core/analysis/python/image_shuffle.py | Python | epl-1.0 | 491 | 0.040733 | import sys
import numpy as np
import matplotlib.image as mpimg
import numpy.random as npr
import Image
mode = 'L'
for i in range(len(sys.argv)-1):
i+=1
mi = mpimg.imread(sys.argv[i])
x = | np.shape(mi)[0]
mi = np.reshape(mi, (x*x))
mi = npr.permutation(mi)
mi = np.reshape(mi, (x,x))
size = np.shape(mi)
imNew=Image.new(mode , size)
data = np.ravel(mi)
data = np.floor(data * 256)
imNew.putdata(data)
im | New.save("../new-images/%s" %(sys.argv[i]))
|
mesos-magellan/enrique | enrique/package.py | Python | mit | 4,717 | 0.000212 | import os
import shutil
from urlparse import urlparse
from hashlib import sha1
from plumbum.cmd import git, tar, wget, pip, sudo
from plumbum import ProcessExecutionError
def mkdir_p(path):
path = os.path.abspath(os.path.expanduser(path))
if not os.path.exists(path):
os.makedirs(path)
return path
ENRIQUE_DIR = mkdir_p("~/.mesos-magellan/enrique")
PACKAGES_DIR = mkdir_p(os.path.join(ENRIQUE_DIR, "packages"))
def get_package_cls(url):
package_cls = None
url_parsed = urlparse(url)
if url_parsed.scheme == "git":
package_cls = GitRepo
e | lif url_parsed.scheme in ["http", "https"]:
if url.endswith("tar.gz"):
package_cls = GzipArchive
if url.endswith(".git"):
package_cls = GitRepo
if package_cls is None:
raise ValueError
return package_cls
def get_name_from_url(url):
return sha1(url).hexdigest()
def get_package(url):
package_cls = get_package_cls(url)
name = get_name_from_url(url)
package = package_cls(name, url)
| package.fetch()
package.setup()
return package
class Package(object):
def __init__(self, name, url):
self.name = name
self.url = url
self._problem_path = None
mkdir_p(self.package_path)
def fetch(self):
raise NotImplementedError
def remove(self):
shutil.rmtree(self.package_path)
def setup(self):
reqs_txt_path = os.path.join(self.problem_path, "requirements.txt")
if os.path.exists(reqs_txt_path):
for command in [
pip['install', '--user', '-r', reqs_txt_path],
pip['install', '-r', reqs_txt_path],
sudo['pip', 'install', '-r', reqs_txt_path]
]:
try:
command()
except ProcessExecutionError as e:
print(str(e))
else:
break
else:
raise ProcessExecutionError
@property
def problem_path(self):
return self._problem_path
@property
def package_path(self):
package_home = os.path.join(PACKAGES_DIR, self.name)
return package_home
def download_http(self):
localfile_path = self._download_file(self.url, self.package_path)
return localfile_path
@staticmethod
def _download_file(url, download_path):
"""Download file from url"""
local_filename = os.path.join(download_path, url.split('/')[-1])
if os.path.exists(local_filename):
os.remove(local_filename)
wget_dl = wget['-P', download_path, url]
wget_dl()
assert os.path.exists(local_filename)
return local_filename
class Archive(Package):
def _extract_package(self, localfile_path):
raise NotImplementedError
def fetch(self):
localfile_path = self.download_http()
problem_path = self._extract_package(localfile_path)
self._problem_path = problem_path
class GzipArchive(Archive):
def _extract_package(self, localfile_path):
archive_path = localfile_path
dirname = os.path.split(localfile_path)[-1].split(".tar.gz")[0]
target_dir = os.path.join(self.package_path, dirname)
if os.path.exists(target_dir):
shutil.rmtree(target_dir)
os.makedirs(target_dir)
untar = tar['-xzf', archive_path, '-C', target_dir]
untar()
extracted_list = os.listdir(target_dir)
if len(extracted_list) == 1:
item = os.path.join(target_dir, extracted_list[0])
if os.path.isdir(item):
# This is a single extracted directory so we help the
# user out and go inside it and return that for the
# problem_path
return item
return target_dir
class GitRepo(Package):
def fetch(self):
url = self.get_https_dl_url()
local_dirname = os.path.join(self.package_path,
url.split('/')[-1].split('.git')[0])
if not os.path.exists(local_dirname):
clone = git['clone', url, local_dirname]
clone()
else:
# If repo exists, pull instead of cloning
pull = git['-C', local_dirname, 'pull']
pull()
self._problem_path = local_dirname
def get_https_dl_url(self):
url_parsed = urlparse(self.url)
if url_parsed.scheme == "git":
url = self.url.replace('git', 'https', 1)
elif url_parsed.scheme == "https" and self.url.endswith(".git"):
url = self.url
else:
raise ValueError
return url
|
rohitranjan1991/home-assistant | tests/components/marytts/test_tts.py | Python | mit | 3,689 | 0.000271 | """The tests for the MaryTTS speech platform."""
import os
import shutil
from unittest.mock import patch
import pytest
from homeassistant.components.media_player.const import (
ATTR_MEDIA_CONTENT_ID,
DOMAIN as DOMAIN_MP,
SERVICE_PLAY_MEDIA,
)
import homeassistant.components.tts as tts
from homeassistant.setup import async_setup_component
from tests.common import assert_setup_component, async_mock_service
@pytest.fixture(autouse=True)
def cleanup_cache(hass):
"""Prevent TTS writing."""
yield
default_tts = hass.config.path(tts.DEFAULT_CACHE_DIR)
if os.path.isdir(default_tts):
shutil.rmtree(default_tts)
async def test_setup_component(hass):
"""Test setup component."""
config = {tts.DOMAIN: {"platform": "marytts"}}
with assert_setup_component(1, tts.DOMAIN):
await async_setup_component(hass, tts.DOMAIN, config)
await hass.async_block_till_done()
async def test_service_say(hass):
"""Test service call say."""
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {tts.DOMAIN: {"platform": "marytts"}}
with assert_setup_component(1, tts.DOMAIN):
await async_setup_component(hass, tts.DOMAIN, config)
await hass.async_block_till_done()
with patch(
"homeassistant.components.marytts.tts.MaryTTS.speak",
return_value=b"audio",
) as mock_speak:
await hass.services.async_call(
tts.DOMAIN,
"marytts_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "HomeAssistant",
},
blocking=True,
)
mock_speak.assert_called_once()
mock_speak.assert_called_with("HomeAssistant", {})
assert len(calls) == 1
assert calls[0].data[ATTR_MEDIA_CONTENT_ID].find(".wav") != -1
async def test_service_say_with_effect(hass):
"""Test service call say with effects.""" |
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {tts.DOMAIN: {"platform": "marytts", "effect": {"Volum | e": "amount:2.0;"}}}
with assert_setup_component(1, tts.DOMAIN):
await async_setup_component(hass, tts.DOMAIN, config)
await hass.async_block_till_done()
with patch(
"homeassistant.components.marytts.tts.MaryTTS.speak",
return_value=b"audio",
) as mock_speak:
await hass.services.async_call(
tts.DOMAIN,
"marytts_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "HomeAssistant",
},
blocking=True,
)
mock_speak.assert_called_once()
mock_speak.assert_called_with("HomeAssistant", {"Volume": "amount:2.0;"})
assert len(calls) == 1
assert calls[0].data[ATTR_MEDIA_CONTENT_ID].find(".wav") != -1
async def test_service_say_http_error(hass):
"""Test service call say."""
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {tts.DOMAIN: {"platform": "marytts"}}
with assert_setup_component(1, tts.DOMAIN):
await async_setup_component(hass, tts.DOMAIN, config)
await hass.async_block_till_done()
with patch(
"homeassistant.components.marytts.tts.MaryTTS.speak",
side_effect=Exception(),
) as mock_speak:
await hass.services.async_call(
tts.DOMAIN,
"marytts_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "HomeAssistant",
},
)
await hass.async_block_till_done()
mock_speak.assert_called_once()
assert len(calls) == 0
|
citrix-openstack-build/sahara | sahara/utils/general.py | Python | apache-2.0 | 3,962 | 0 | # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import six
from sahara import conductor as c
from sahara import context
from sahara.i18n import _LI
from sahara.openstack.common import log as logging
from sahara.utils.notification import sender
conductor = c.API
LOG = logging.getLogger(__name__)
NATURAL_SORT_RE = re.compile('([0-9]+)')
def find_dict(iterable, **rules):
"""Search for dict in iterable of dicts using specified key-value rules."""
for item in iterable:
# assert all key-value pairs from rules dict
ok = True
for k, v in six.iteritems(rules):
ok = ok and k in item and item[k] == v
if ok:
return item
return None
def find(lst, **kwargs):
for obj in lst:
match = True
for attr, value in kwargs.items():
if getattr(obj, attr) != value:
match = False
if match:
return obj
return None
def get_by_id(lst, id):
for obj in lst:
if obj.id == id:
return obj
return None
# Taken from http://stackoverflow.com/questions/4836710/does-
# python-have-a-built-in-function-for-string-natural-sort
def natural_sort_key(s):
return [int(text) if text.isdigit() else text.lower()
for text in re.split(NATURAL_SORT_RE, s)]
def change_cluster_status(cluster, status, status_description=None):
if cluster is None:
return None
update_dict = {"status": status}
if status_description:
update_dict["status_description"] = status_description
cluster = conductor.cluster_update(context.ctx(), cluster, update_dict)
LOG.info(_LI("Cluster status has been changed: id=%(id)s, New status="
"%(status)s"), {'id': cluster.id, 'status': cluster.status})
sender.notify(context.ctx(), cluster.id, cluster.name, cluster.status,
"update")
return cluster
def check_cluster_exists(cluster):
ctx = context.ctx()
# check if cluster still exists (it might have been removed)
cluster = conductor.cluster_get(ctx, cluster)
return cluster is not None
def get_instances(cluster, instances_ids=None):
inst_map = {}
for node_group in cluster.node_groups:
for instance in node_group.instances:
inst_map[instance.id] = instance
if instances_ids is not None:
return [inst_map[id] for id in instances_ids]
else:
return [v for v in six.itervalues(inst_map)]
def clean_cluster_from_empty_ng(cluster):
ctx = context.ctx()
for ng in cluster.node_groups:
if ng.count == 0:
conductor.node_group_remove(ctx, ng)
def generate_etc_hosts(clus | ter):
hosts = "127.0.0.1 localhost\n"
for node_group in cluster.node_groups:
for instanc | e in node_group.instances:
hosts += "%s %s %s\n" % (instance.internal_ip,
instance.fqdn(),
instance.hostname())
return hosts
def generate_instance_name(cluster_name, node_group_name, index):
return ("%s-%s-%03d" % (cluster_name, node_group_name, index)).lower()
def generate_auto_security_group_name(node_group):
return ("%s-%s-%s" % (node_group.cluster.name, node_group.name,
node_group.id[:8])).lower()
def generate_aa_group_name(cluster_name):
return ("%s-aa-group" % cluster_name).lower()
|
andrewyoung1991/supriya | supriya/tools/documentationtools/SupriyaDocumentationManager.py | Python | mit | 25,757 | 0.000233 | # -*- encoding: utf-8 -*-
from __future__ import print_function
import enum
import inspect
import importlib
import os
import re
import shutil
import types
class SupriyaDocumentationManager(object):
@staticmethod
def build_attribute_section(
cls,
attrs,
directive,
title,
):
from abjad.tools import documentationtools
result = []
if attrs:
result.append(documentationtools.ReSTHeading(
level=3,
text=title,
))
for attr in attrs:
options = {
'noindex': True,
}
autodoc = documentationtools.ReSTAutodocDirective(
argument='{}.{}'.format(cls.__name__, attr.name),
directive=directive,
options=options,
)
if cls is attr.defining_class:
result.append(autodoc)
else:
container = documentationtools.ReSTDirective(
argument='inherited',
directive='container',
)
container.append(autodoc)
html_only = documentationtools.ReSTDirective(
argument='html',
directive='only',
)
html_only.append(container)
result.append(html_only)
return result
@staticmethod
def build_attributes_autosummary(
cls,
class_methods,
data,
inherited_attributes,
methods,
readonly_properties,
readwrite_properties,
special_methods,
static_methods,
):
from abjad.tools import documentationtools
result = []
attributes = []
attributes.extend(readonly_properties)
attributes.extend(readwrite_properties)
attributes.extend(methods)
attributes.extend(class_methods)
attributes.extend(static_methods)
attributes.sort(key=lambda x: x.name)
attributes.extend(special_methods)
if attributes:
autosummary = documentationtools.ReSTAutosummaryDirective()
for attribute in attributes:
autosummary.append('~{}.{}.{}'.format(
cls.__module__,
cls.__name__,
attribute.name,
))
html_only = documentationtools.ReSTOnlyDirective(argument='html')
html_only.append(documentationtools.ReSTHeading(
level=3,
text='Attribute summary',
))
html_only.append(autosummary)
result.append(html_only)
return result
@staticmethod
def build_bases_section(cls):
from abjad.tools import documentationtools
result = []
| result.append(documentationtools.ReSTHeading(
level=3,
text='Bases',
))
mro = inspect.getmro(cls)[1:]
for cls in mro:
parts = cls.__module__.split('.') + [cls.__name__]
while 1 < len(parts) and parts[-1] == parts[-2]:
parts.pop()
packagesystem_path = '.'.join(parts)
text = '- :py:class:`{}`'.format(packagesystem_path)
paragraph = documentationtools.ReSTPar | agraph(
text=text,
wrap=False,
)
result.append(paragraph)
return result
@staticmethod
def build_enumeration_section(cls):
from abjad.tools import documentationtools
result = []
if not issubclass(cls, enum.Enum):
return result
items = sorted(cls, key=lambda x: x.name)
if items:
result.append(documentationtools.ReSTHeading(
level=3,
text='Enumeration Items',
))
for item in items:
name = item.name
value = item.value
line = '- `{}`: {}'.format(name, value)
paragraph = documentationtools.ReSTParagraph(
text=line,
wrap=False,
)
result.append(paragraph)
return result
@staticmethod
def collect_class_attributes(cls):
ignored_special_methods = (
'__getattribute__',
'__getnewargs__',
'__getstate__',
'__init__',
'__reduce__',
'__reduce_ex__',
'__setstate__',
'__sizeof__',
'__subclasshook__',
'fromkeys',
'pipe_cloexec',
)
class_methods = []
data = []
inherited_attributes = []
methods = []
readonly_properties = []
readwrite_properties = []
special_methods = []
static_methods = []
attrs = inspect.classify_class_attrs(cls)
for attr in attrs:
if attr.defining_class is object:
continue
if attr.defining_class is not cls:
inherited_attributes.append(attr)
if attr.kind == 'method':
if attr.name not in ignored_special_methods:
if attr.name.startswith('__'):
special_methods.append(attr)
elif not attr.name.startswith('_'):
methods.append(attr)
elif attr.kind == 'class method':
if attr.name not in ignored_special_methods:
if attr.name.startswith('__'):
special_methods.append(attr)
elif not attr.name.startswith('_'):
class_methods.append(attr)
elif attr.kind == 'static method':
if attr.name not in ignored_special_methods:
if attr.name.startswith('__'):
special_methods.append(attr)
elif not attr.name.startswith('_'):
static_methods.append(attr)
elif attr.kind == 'property' and not attr.name.startswith('_'):
if attr.object.fset is None:
readonly_properties.append(attr)
else:
readwrite_properties.append(attr)
elif attr.kind == 'data' and not attr.name.startswith('_') \
and attr.name not in getattr(cls, '__slots__', ()):
data.append(attr)
class_methods = tuple(sorted(class_methods))
data = tuple(sorted(data))
inherited_attributes = tuple(sorted(inherited_attributes))
methods = tuple(sorted(methods))
readonly_properties = tuple(sorted(readonly_properties))
readwrite_properties = tuple(sorted(readwrite_properties))
special_methods = tuple(sorted(special_methods))
static_methods = tuple(sorted(static_methods))
result = (
class_methods,
data,
inherited_attributes,
methods,
readonly_properties,
readwrite_properties,
special_methods,
static_methods,
)
return result
@staticmethod
def ensure_directory(path):
path = os.path.dirname(path)
if not os.path.exists(path):
os.makedirs(path)
@staticmethod
def execute(source_directory):
print('Rebuilding Supriya documentation source.')
source_directory = os.path.abspath(source_directory)
manager = SupriyaDocumentationManager
rewritten_files = set()
tools_packages = manager.get_tools_packages()
api_index_rst = manager.get_api_index_rst(tools_packages)
api_index_file_path = manager.get_api_index_file_path(source_directory)
manager.ensure_directory(api_index_file_path)
manager.write(
api_index_file_path,
api_index_rst.rest_format,
rewritten_files,
)
for package in tools_p |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.