repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
zplab/rpc-scope | scope/simple_rpc/property_client.py | Python | mit | 8,483 | 0.00224 | # This code is licensed under the MIT License (see LICENSE file for details)
import collections
import threading
import traceback
import zmq
from ..util import trie
class PropertyClient(threading.Thread):
"""A client for receiving property updates in a background thread.
The background thread is automatically started when this object is constructed.
To stop the thread, set the 'running' attribute to False.
"""
def __init__(self, daemon=True):
# properties is a local copy of tracked properties, in case that's useful
self.properties = {}
# callbacks is a dict mapping property names to lists of callbacks
self.callbacks = collections.defaultdict(set)
# prefix_callbacks is a trie used to match property names to prefixes
# which were registered for "wildcard" callbacks.
self.prefix_callbacks = trie.trie()
super().__init__(name='PropertyClient', daemon=daemon)
self.start()
def subscribe_from(self, other):
"""Copy subscriptions from other PropertyClient"""
for property_name, callbacks in other.callbacks.items():
for callback, valueonly in callbacks:
self.subscribe(property_name, callback, valueonly)
for property_prefix, callbacks in other.prefix_callbacks.items():
for callback, valueonly in callbacks:
self.subscribe_prefix(property_prefix, callback, valueonly)
def run(self):
"""Thread target: do not call directly."""
self.running = True
while True:
property_name, value = self._receive_update()
self.properties[property_name] = value
for callbacks in [self.callbacks[property_name]] + list(self.prefix_callbacks.values(property_name)):
for callback, valueonly in callbacks:
try:
if valueonly:
callback(value)
else:
callback(property_name, value)
except Exception as e:
print('Caught exception in PropertyClient callback:')
traceback.print_exception(type(e), e, e.__traceback__)
def stop(self):
self.running = False
self.join()
def subscribe(self, property_name, callback, valueonly=False):
"""Register a callback to be called any time the named property is updated.
If valueonly is True, the callback will be called as: cal | lback(new_value);
if valueonly is False, it will be called as callback(property_name, new_value).
Multiple callbacks can be registered for a single property_name.
"""
self.callbacks[property_name].add((callback, valueonly))
def unsubscribe(self, property_name, callback, valueonly=False):
"""Unregister an exactly matching, | previously registered callback. If
the same callback function is registered multiple times with identical
property_name and valueonly parameters, only one registration is removed."""
if property_name is None:
raise ValueError('property_name parameter must not be None.')
try:
callbacks = self.callbacks[property_name]
callbacks.remove((callback, valueonly))
except KeyError:
raise KeyError('No matching subscription found for property name "{}".'.format(property_name)) from None
if not callbacks:
del self.callbacks[property_name]
def subscribe_prefix(self, property_prefix, callback):
"""Register a callback to be called any time a named property which is
prefixed by the property_prefix parameter is updated. The callback is
called as callback(property_name, new_value).
Example: if property_prefix is 'camera.', then the callback will be called
when 'camera.foo' or 'camera.bar' or any such property name is updated.
An empty prefix ('') will match everything.
Multiple callbacks can be registered for a single property_prefix.
"""
if property_prefix not in self.prefix_callbacks:
self.prefix_callbacks[property_prefix] = set()
self.prefix_callbacks[property_prefix].add((callback, False))
def unsubscribe_prefix(self, property_prefix, callback):
"""Unregister an exactly matching, previously registered callback. If
the same callback function is registered multiple times with identical
property_prefix parameters, only one registration is removed."""
if property_prefix is None:
raise ValueError('property_prefix parameter must not be None.')
try:
callbacks = self.prefix_callbacks[property_prefix]
callbacks.remove((callback, False))
except KeyError:
raise KeyError('No matching subscription found for property name "{}".'.format(property_prefix))
if not callbacks:
del self.prefix_callbacks[property_prefix]
def _receive_update(self):
"""Receive an update from the server, or raise an error if self.running
goes False."""
raise NotImplementedError()
class ZMQClient(PropertyClient):
def __init__(self, addr, heartbeat_sec=None, context=None, daemon=True):
"""PropertyClient subclass that uses ZeroMQ PUB/SUB to receive out updates.
Parameters:
addr: a string ZeroMQ port identifier, like 'tcp://127.0.0.1:5555'.
context: a ZeroMQ context to share, if one already exists.
daemon: exit the client when the foreground thread exits.
"""
self.context = context if context is not None else zmq.Context()
self.addr = addr
self.heartbeat_sec = heartbeat_sec
self.connected = threading.Event()
super().__init__(daemon)
def run(self):
self._connect()
try:
super().run()
finally:
self.socket.close()
def reconnect(self):
self.connected.clear()
self.connected.wait()
def _connect(self):
self.socket = self.context.socket(zmq.SUB)
self.socket.RCVTIMEO = 0 # we use poll to determine whether there's data to receive, so we don't want to wait on recv
self.socket.LINGER = 0
if self.heartbeat_sec is not None:
heartbeat_ms = self.heartbeat_sec * 1000
self.socket.HEARTBEAT_IVL = heartbeat_ms
self.socket.HEARTBEAT_TIMEOUT = heartbeat_ms * 2
self.socket.HEARTBEAT_TTL = heartbeat_ms * 2
self.socket.connect(self.addr)
for property_name in list(self.callbacks) + list(self.prefix_callbacks):
self.socket.setsockopt_string(zmq.SUBSCRIBE, property_name)
self.connected.set()
def subscribe(self, property_name, callback, valueonly=False):
self.connected.wait()
self.socket.subscribe(property_name)
super().subscribe(property_name, callback, valueonly)
subscribe.__doc__ = PropertyClient.subscribe.__doc__
def unsubscribe(self, property_name, callback, valueonly=False):
super().unsubscribe(property_name, callback, valueonly)
self.connected.wait()
self.socket.unsubscribe(property_name)
unsubscribe.__doc__ = PropertyClient.unsubscribe.__doc__
def subscribe_prefix(self, property_prefix, callback):
self.connected.wait()
self.socket.subscribe(property_prefix)
super().subscribe_prefix(property_prefix, callback)
subscribe_prefix.__doc__ = PropertyClient.subscribe_prefix.__doc__
def unsubscribe_prefix(self, property_prefix, callback):
super().unsubscribe_prefix(property_prefix, callback)
self.connected.wait()
self.socket.unsubscribe(property_prefix)
unsubscribe_prefix.__doc__ = PropertyClient.unsubscribe_prefix.__doc__
def _receive_update(self):
while not self.socket.poll(500): # 500 ms wait before checking self.running again
if not self.running:
raise RuntimeError()
if not self.connected.is_set():
# reconnect was requeste |
stregoika/aislib | aisutils/uscg.py | Python | gpl-3.0 | 18,764 | 0.012737 | #!/usr/bin/env python
__version__ = '$Revision: 2275 $'.split()[1]
__date__ = '$Date: 2006-07-10 16:22:35 -0400 (Mon, 10 Jul 2006) $'.split()[1]
__author__ = 'Kurt Schwehr'
__doc__ = '''
Connect to a socket and forward what is received to another port.
Filter to a list of AIS receivers/basestations.
@author: '''+__author__+'''
@version: ''' + __version__ +'''
@copyright: 2006
@var __date__: Date of last svn commit
@undocumented: __version__ __author__ __doc__ myparser
@status: under development
@license: GPL v2
@since: Jan 2008
@todo: For speed, provide functions that only parse the timestamp, station, etc.
'''
import sys
#import datetime
import time
import datetime
import unittest
from BitVector import BitVector
import ais.sqlhelp # for sec2timestamp
import ais.binary
import ais.nmea
import re
######################################################################
# NEW Regular Expression Parsing Style
#FIX: make the field names be name1_name2 rather than camel case
# USCG has some receivers that emit corrupted fields, so loosen from this
# | (,s(?P<s_rssi>\d*))
# | (,d(?P<signal_strength>[-0-9]*))
# | (,t(?P<t_recver_hhmmss>(?P<t_hour>\d\d)(?P<t_min>\d\d)(?P<t_sec>\d\d.\d*)))
# | (,T(?P<time_of_arrival>[0-9.]*))
# | (,x(?P<x_station_counter>[0-9]*))
# | (,(?P<station>(?P<station_type>[rbB])[a-zA-Z0-9]*))
# AIVDO might not have a channel associated.
uscg_ais_nmea_regex_str = r'''[!$](?P<talker>AI)(?P<stringType>VD[MO])
,(?P<total>\d?)
,(?P<senNum>\d?)
,(?P<seqId>[0-9]?)
,(?P<chan>[AB]?)
,(?P<body>[;:=@a-zA-Z0-9<>\?\'\`]*)
,(?P<fillBits>\d)\*(?P<checksum>[0-9A-F][0-9A-F])
(
(,S(?P<slot>\d*))
| (,s(?P<s_rssi>\d*))
| (,d(?P<signal_strength>[-0-9]*))
| (,t(?P<t_recver_hhmmss>(?P<t_hour>\d\d)(?P<t_min>\d\d)(?P<t_sec>\d\d.\d*)))
| (,T(?P<time_of_arrival>[^,]*))
| (,x(?P<x_station_counter>[0-9]*))
| (,(?P<station>(?P<station_type>[rbB])[a-zA-Z0-9_-]*))
)*
,(?P<timeStamp>\d+([.]\d+)?)?
'''
'''
Regular expression for parsing a USCG.
* s - receive signal strength indicator (RSSI)
* d - dBm
* S - slot number the message was received in
* x - index counter incremented for each sentence sent by remote site
* T - time of arrival with the minute. Related to slot number
* r - receive station r[^Bb] is a receiver, rR receiver, rB basestation, b is basestation
* t - HHMMSS.SS at the receiver (presume this is UTC from the local GPS)
@note: probably not complete
@todo: What is the x field?
@todo: make all fields explicit so errors do not parse
@bug: does not match this>???? !AIVDM,1,1,,B,85MwqdAKf=Wsd5sKUfl@u>DMk70JwpQ2hjnTHlbfcWj<2n<jRtHd,0*7E,x151038,r003669947,1222129826
@bug: is S missing a comma?
@bug: make this conform to the python coding style guides... time_stamp
'''
uscg_ais_nmea_regex = re.compile(uscg_ais_nmea_regex_str, re.VERBOSE)
'''
Use this pre-comiled regular expression to parse USCG NMEA strings with the extended fields after the checksum
'''
def write_uscg_nmea_fields(nmea_str,out=sys.stdout,indent='\t'):
'''
Write out the fields of a USCG nmea string
@param nmea_str: USCG style nmea string
@param out: stream object to write to
@param separator: string to put between each field
@param indent: how to indent each field
'''
match_obj = uscg_ais_nmea_regex.search(nmea_str)
write(out,indent+' prefix = '+match_obj.group('prefix')+'\n')
write(out,indent+' stringType = '+match_obj.group('stringType')+'\n')
write(out,indent+' total = '+match_obj.group('total')+'\n')
write(out,indent+' senNum = '+match_obj.group('senNum')+'\n')
wr | ite(out,indent+' seqId = '+match_obj.group('seqId')+'\n')
write(out,indent+' chan = '+match_obj.group('chan')+'\n')
write(out,indent+' body = '+match_obj.group('b | ody')+'\n')
write(out,indent+' fillBits = '+match_obj.group('fillBits')+'\n')
write(out,indent+' checksum = '+match_obj.group('checksum')+'\n')
write(out,indent+' slot = '+match_obj.group('slot')+'\n')
write(out,indent+' s = '+match_obj.group('s')+'\n')
write(out,indent+'signal_strength = '+match_obj.group('signal_strength')+'\n')
write(out,indent+'time_of_arrival = '+match_obj.group('time_of_arrival')+'\n')
write(out,indent+' x = '+match_obj.group('x')+'\n')
write(out,indent+' station = '+match_obj.group('station')+'\n')
write(out,indent+' station_type = '+match_obj.group('station_type')+'\n')
write(out,indent+' timeStamp = '+match_obj.group('timeStamp')+'\n')
######################################################################
# OLD Style
def get_station(nmeaStr):
'''Return the station without doing anything else. Try to be fast'''
fields = nmeaStr.split(',')
station = None
for i in range(len(fields)-1,5,-1):
if len(fields[i])==0:
continue # maybe it should throw a parse exception instead?
if fields[i][0] in ('b','r'):
station = fields[i]
continue
return station
def get_contents(nmeaStr):
'''Return the AIS msg string. AIS goo'''
return nmeaStr.split(',')[5]
class UscgNmea:
def __init__(self,nmeaStr=None):
'''
Fields:
- rssi ('s'): relative signal strength indicator
- signalStrength ('d') - signal strendth in dBm
- timeOfArrival ('T') - time of arrive from receiver - seconds within the minute
- slotNumber ('S') - Receive slot number
- station ('r' or 'b') - station name or id that received the message
- stationTypeCode - first letter of the station name indicating 'b'asestation or 'r'eceive only (I think)
- cg_sec - receive time of the message from the logging software. Unix UTC second timestamp
- timestamp - python datetime object in UTC derived from the cg_sec
@todo: parse the other fields?
@see: Maritime navigation and radiocommunication equipment and
systems - Digital interfaces - Part 100: Single talker
and multiple listeners - Extra requirements to IEC
61162-1 for the UAIS. (80_330e_PAS) Draft...
'''
if None!=nmeaStr:
#if len(nmeaStr)<6:
# # FIX: throw exception
# sys.stderr.write('Not a AIVDM... too short\n')
fields = nmeaStr.split(',')
self.cg_sec=float(fields[-1])
self.timestamp = datetime.datetime.utcfromtimestamp(self.cg_sec)
self.sqlTimestampStr = ais.sqlhelp.sec2timestamp(self.cg_sec)
# See 80_330e_PAS
#
self.nmeaType=fields[0][1:]
self.totalSentences = int(fields[1])
self.sentenceNum = int(fields[2])
tmp = fields[3]
if len(tmp)>0:
self.sequentialMsgId = int(tmp)
else:
self.sequentialMsgId = None
# FIX: make an int if the above is set
self.aisChannel = fields[4] # 'A' or 'B'
self.contents = fields[5]
self.fillbits = int(fields[6].split('*')[0])
self.checksumStr = fields[6].split('*')[1] # FIX: this is a hex string. Convert?
if self.sentenceNum==1:
self.msgTypeChar=fields[5][0]
else:
self.msgTypeChar=None
for i in range(len(fields)-1,5,-1):
if len(fields[i])==0:
continue # maybe it should throw a parse exception instead?
f = fields[i]
c = f[0] # first charater determines what the field is
if c in ('b','r','B','R'):
self.station = f # FIX: think we want to keep the code in the first char
self.stationTypeCode = self.station[0]
continue
#break # Found it so ditch the for loop
if c == 's':
self.rssi=int(f[1:])
continue
if c == 'd':
self.signalStrength = int(f[1:])
continue
if c == 'T':
try:
self |
BCCVL/org.bccvl.tasks | src/org/bccvl/tasks/datamover/zoatrack.py | Python | gpl-2.0 | 8,153 | 0.000981 | from __future__ import absolute_import
import logging
import io
import os
import os.path
import shutil
import tempfile
import urllib
import zipfile
import csv
from datetime import datetime
from org.bccvl import movelib
from org.bccvl.movelib.utils import build_source, build_destination
from org.bccvl.movelib.utils import zip_occurrence_data
from org.bccvl.tasks.celery import app
from org.bccvl.tasks.utils import extract_metadata
from org.bccvl.tasks.utils import set_progress, import_cleanup
from org.bccvl.tasks.utils import set_progress_job, import_cleanup_job
from org.bccvl.tasks.utils import import_ala_job
SPECIES = 'species'
LONGITUDE = 'lon'
LATITUDE = 'lat'
EVENT_DATE = 'date'
YEAR = 'year'
MONTH = 'month'
LOG = logging.getLogger(__name__)
def _process_trait_data(datadir):
# check that it is valid trait csv file
count = 0
csvfile = os.path.join(datadir, 'zoatrack_trait.csv')
with io.open(csvfile, mode='br+') as csv_file:
csv_reader = csv.reader(csv_file)
# Check if csv file header has the necessary columns
columns = set(['decimalLatitude', 'decimalLongitude',
'speciesScientificName', 'month', 'year', 'eventDate',
'organismId', 'eventId'])
csv_headers = next(csv_reader)
missing_columns = ', '.join(columns.difference(csv_headers))
if missing_columns:
raise Exception("Missing columns '{}' in dataset".format(missing_columns))
# These columns must be before other trait columns.
for header in columns:
if csv_headers.index(header) >= len(columns):
raise Exception("Column '{}' must be before trait columns in the dataset".format(header))
# rename column names
csv_headers[csv_headers.index('decimalLatitude')] = LATITUDE
csv_headers[csv_headers.index('decimalLongitude')] = LONGITUDE
csv_headers[csv_headers.index('speciesScientificName')] = SPECIES
csv_headers[csv_headers.index('eventDate')] = EVENT_DATE
# write to a temp file
with io.open(os.path.join(datadir, 'trait_temp.csv'), mode='wb') as out_file:
csv_writer = csv.writer(out_file)
csv_writer.writerow(csv_headers)
for row in csv_reader:
csv_writer.writerow(row)
count += 1
# overwrite the trait csv file with the temp file
os.remove(os.path.join(datadir, 'zoatrack_trait.csv'))
os.rename(os.path.join(datadir, 'trait_temp.csv'),
os.path.join(datadir, 'zoatrack_trait.csv'))
return count
def download_zoatrack_trait_data(src_url, dest):
# Get trait file
data_dest = os.path.join(dest, 'data')
try:
trait_zipfile, _ = urllib.urlretrieve(src_url)
# unzip and rename trait file
with zipfile.ZipFile(trait_zipfile) as z:
os.mkdir(data_dest)
# rename trait data csv file
z.extract('trait.csv', dest)
os.rename(os.path.join(dest, 'trait.csv'),
os.path.join(data_dest, 'zoatrack_trait.csv'))
# citation file is optional
try:
z.extract('citation.txt', dest)
os.rename(os.path.join(dest, 'citation.txt'),
os.path.join(data_dest, 'zoatrack_citation.txt'))
except Exception:
pass
except KeyError:
LOG.error("Cannot find file %s in downloaded zip file", 'trait.csv', exc_info=True)
raise
except Exception:
# TODO: Not a zip file error.... does it have to raise?
LOG.error("The downloaded file from %s is not a zip file", src_url, exc_info=True)
raise
finally:
# Remove the downloaded temp file
if trait_zipfile and os.path.isfile(trait_zipfile):
os.remove(trait_zipfile)
count = 0
try:
count = _process_trait_data(data_dest)
except Exception:
LOG.error('Bad column header in downloaded trait file', exc_info=True)
raise
# Zip out files if available
zip_occurrence_data(os.path.join(dest, 'zoatrack_trait.zip'),
data_dest,
['zoatrack_trait.csv', 'zoatrack_citation.txt'])
return os.path.join(dest, 'zoatrack_trait.zip'), count
@app.task()
def pull_traits_from_zoatrack(species, src_url, dest_url, context):
# 1. set progress
spName = ', '.join(species)
set_progress('RUNNING', 'Download {0} from zoatrack'.format(
spName), None, context)
# 2. do download
try:
tmpdir = tempfile.mkdtemp(prefix='zoatrack_download_')
# Trait data file is a zip file; trait data file and citation file
trait_zip, count = download_zoatrack_trait_data(src_url, tmpdir)
if count == 0:
raise Exception("No trait data is found")
# extract metadata and do other stuff....
set_progress('RUNNING', 'Extract metadata {0} from zoatrack'.format(
spName), None, context)
# build item to import
imported_date = datetime.now().strftime('%d/%m/%Y')
title = "{} trait data".format(spName)
description = "Observed trait data for {}, imported from ZoaTack on {}".format(
spName, imported_date)
item = {
'title': title,
'description': description,
'file': {
'url': 'file://{}'.format(trait_zip), # local file url
'contenttype': 'application/zip',
'filename': os.path.basename(trait_zip)
},
| #'bccvlmetadata': bccvlmd,
| 'filemetadata': extract_metadata(trait_zip, 'application/zip'),
}
# Add the number of trait records to the metadata
# To do: This is a hack. Any better solution.
trait_csv_filename = os.path.join('data', 'zoatrack_trait.csv')
if trait_csv_filename in item['filemetadata']:
# FIXME: copy all occurrence metadata to zip level, for backwards
# compatibility... this should go away after we fully support
# 'layered' occurrence zips.
for key in ('rows', 'headers', 'bounds'):
if key in item['filemetadata'][trait_csv_filename]['metadata']:
item['filemetadata'][key] = item['filemetadata'][
trait_csv_filename]['metadata'][key]
# TODO: clean this up
# remove citation file from metadata, otherwise it will be
# interpreted as data layer within zip file
if 'data/zoatrack_citation.csv' in item.get('filemetadata', {}):
del item['filemetadata']['data/zoatrack_citation.csv']
# move data file to destination and build data_url
src = build_source('file://{}'.format(trait_zip))
dst = build_destination(os.path.join(
dest_url, os.path.basename(trait_zip)), app.conf.get('bccvl', {}))
item['file']['url'] = dst['url']
movelib.move(src, dst)
# tell importer about new dataset (import it)
set_progress('RUNNING', 'Import zoatack trait data {0}'.format(spName),
None, context)
cleanup_job = import_cleanup_job(dest_url, context)
import_job = import_ala_job([item], dest_url, context)
import_job.link_error(set_progress_job(
"FAILED", "Import of zoatack trait data failed {0}".format(spName),
None, context))
import_job.link_error(cleanup_job)
finish_job = set_progress_job(
"COMPLETED", 'ZoaTack import {} complete'.format(spName), None,
context)
(import_job | cleanup_job | finish_job).delay()
except Exception as e:
set_progress('FAILED', 'Download Traits from zoatack: {0}'.format(e),
None, context)
import_cleanup(dest_url, context)
LOG.error('Download from %s to %s failed: %s',
src_url, dest_url, e, exc_info=True)
finally:
if tmpdir and os.path.exists(tmpdir):
shutil.rmtree(tmpdir)
|
waylan/mkdocs | mkdocs/config/defaults.py | Python | bsd-2-clause | 5,554 | 0.002161 | from mkdocs.config import config_options
# NOTE: The order here is important. During validation | some config options
# depend on others. So, if config option A depends on B, then A should be
# listed higher in the schema.
# Once we drop Python 2.6 support, this could be an OrderedDict, however, it
# isn't really needed either as we alwa | ys sequentially process the schema other
# than at initialisation when we grab the full set of keys for convenience.
def get_schema():
return (
# Reserved for internal use, stores the mkdocs.yml config file.
('config_file_path', config_options.Type(str)),
# The title to use for the documentation
('site_name', config_options.Type(str, required=True)),
# Defines the structure of the navigation.
('nav', config_options.Nav()),
# TODO: remove this when the `pages` config setting is fully deprecated.
('pages', config_options.Nav()),
# The full URL to where the documentation will be hosted
('site_url', config_options.URL(is_dir=True, required=True)),
# A description for the documentation project that will be added to the
# HTML meta tags.
('site_description', config_options.Type(str)),
# The name of the author to add to the HTML meta tags
('site_author', config_options.Type(str)),
# The MkDocs theme for the documentation.
('theme', config_options.Theme(default='mkdocs')),
# The directory containing the documentation markdown.
('docs_dir', config_options.Dir(default='docs', exists=True)),
# The directory where the site will be built to
('site_dir', config_options.SiteDir(default='site')),
# A copyright notice to add to the footer of documentation.
('copyright', config_options.Type(str)),
# set of values for Google analytics containing the account IO and domain,
# this should look like, ['UA-27795084-5', 'mkdocs.org']
('google_analytics', config_options.Deprecated(
message=(
'The configuration option {} has been deprecated and '
'will be removed in a future release of MkDocs. See the '
'options available on your theme for an alternative.'
),
option_type=config_options.Type(list, length=2)
)),
# The address on which to serve the live reloading docs server.
('dev_addr', config_options.IpAddress(default='127.0.0.1:8000')),
# If `True`, use `<page_name>/index.hmtl` style files with hyperlinks to
# the directory.If `False`, use `<page_name>.html style file with
# hyperlinks to the file.
# True generates nicer URLs, but False is useful if browsing the output on
# a filesystem.
('use_directory_urls', config_options.Type(bool, default=True)),
# Specify a link to the project source repo to be included
# in the documentation pages.
('repo_url', config_options.RepoURL()),
# A name to use for the link to the project source repo.
# Default, If repo_url is unset then None, otherwise
# "GitHub", "Bitbucket" or "GitLab" for known url or Hostname
# for unknown urls.
('repo_name', config_options.Type(str)),
# Specify a URI to the docs dir in the project source repo, relative to the
# repo_url. When set, a link directly to the page in the source repo will
# be added to the generated HTML. If repo_url is not set also, this option
# is ignored.
('edit_uri', config_options.Type(str)),
# Specify which css or javascript files from the docs directory should be
# additionally included in the site.
('extra_css', config_options.Type(list, default=[])),
('extra_javascript', config_options.Type(list, default=[])),
# Similar to the above, but each template (HTML or XML) will be build with
# Jinja2 and the global context.
('extra_templates', config_options.Type(list, default=[])),
# PyMarkdown extension names.
('markdown_extensions', config_options.MarkdownExtensions(
builtins=['toc', 'tables', 'fenced_code'],
configkey='mdx_configs', default=[])),
# PyMarkdown Extension Configs. For internal use only.
('mdx_configs', config_options.Private()),
# enabling strict mode causes MkDocs to stop the build when a problem is
# encountered rather than display an error.
('strict', config_options.Type(bool, default=False)),
# the remote branch to commit to when using gh-deploy
('remote_branch', config_options.Type(
str, default='gh-pages')),
# the remote name to push to when using gh-deploy
('remote_name', config_options.Type(str, default='origin')),
# extra is a mapping/dictionary of data that is passed to the template.
# This allows template authors to require extra configuration that not
# relevant to all themes and doesn't need to be explicitly supported by
# MkDocs itself. A good example here would be including the current
# project version.
('extra', config_options.SubConfig()),
# a list of plugins. Each item may contain a string name or a key value pair.
# A key value pair should be the string name (as the key) and a dict of config
# options (as the value).
('plugins', config_options.Plugins(default=['search'])),
)
|
galou/symoro | pysymoro/inertia.py | Python | mit | 7,964 | 0.000377 | # -*- coding: utf-8 -*-
# This file is part of the OpenSYMORO project. Please see
# https://github.com/symoro/symoro/blob/master/LICENCE for the licence.
"""
This module contains the functions for the computation of Inertia
matrix.
"""
import sympy
from sympy import Matrix
from pysymoro.geometry import compute_rot_trans
from symoroutils.paramsinit import ParamsInit
from symoroutils import tools
CHARSYMS = (
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'j', 'k', 'l', 'm', 'n',
'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'aa', 'ab',
'ac', 'ad', 'ae', 'af', 'ag', 'ah', 'aj', 'ak', 'al', 'am', 'an',
'ap', 'aq', 'ar', 'as', 'at', 'au', 'av', 'aw', 'ax', 'ay', 'az',
'ba', 'bb', 'bc', 'bd', 'be', 'bf', 'bg', 'bh', 'bj', 'bk', 'bl',
'bm', 'bn', 'bp', 'bq', 'br', 'bs', 'bt', 'bu', 'bv', 'bw', 'bx',
'by', 'bz'
)
def inertia_spatial(inertia, ms_tensor, mass):
"""
Setup spatial inertia matrix (internal function).
"""
return Matrix([
(mass * sympy.eye(3)).row_join(tools.skew(ms_tensor).transpose()),
tools.skew(ms_tensor).row_join(inertia)
])
def replace_composite_terms(
symo, j, comp_inertia3, comp_ms, comp_mass
):
"""
Replace composite inertia terms (internal function).
Note:
comp_inertia3, comp_ms, comp_mass are the output parameters.
"""
comp_inertia3[j] = symo.mat_replace(comp_inertia3[j], 'JP', j)
comp_ms[j] = symo.mat_replace(comp_ms[j], 'MSP', j)
comp_mass[j] = symo.replace(comp_mass[j], 'MP', j)
def compute_composite_inertia(
robo, symo, j, antRj, antPj,
aje1, comp_inertia3, comp_ms, comp_mass
):
"""
Compute composite inertia (internal function).
Note:
aje1, comp_inertia3, comp_ms, comp_mass are the output
parameters.
"""
i = robo.ant[j]
i_ms_j_c = antRj[j] * comp_ms[j]
i_ms_j_c = symo.mat_replace(i_ms_j_c, 'AS', j)
expr1 = antRj[j] * comp_inertia3[j]
expr1 = symo.mat_replace(expr1, 'AJ', j)
aje1[j] = expr1[:, 2]
expr2 = expr1 * antRj[j].transpose()
expr2 = symo.mat_replace(expr2, 'AJA', j)
expr3 = tools.skew(antPj[j]) * tools.skew(i_ms_j_c)
expr3 = symo.mat_replace(expr3, 'PAS', j)
comp_inertia3[i] += expr2 - (expr3 + expr3.transpose()) + \
(comp_mass[j] * tools.skew(antPj[j]) * \
tools.skew(antPj[j]).transpose())
comp_ms[i] = comp_ms[i] + i_ms_j_c + (antPj[j] * comp_mass[j])
comp_mass[i] = comp_mass[i] + comp_mass[j]
def compute_diagonal_elements(
robo, symo, j, comp_inertia3, comp_ms, comp_mass,
forces, moments, inertia_a22
):
"""
Compute diagonal elements of the inertia matrix (internal function).
Note:
forces, moments, inertia_a22 are the output parameters
"""
if robo.sigma[j] == 0:
forces[j] = Matrix([-comp_ms[j][1], comp_ms[j][0], 0])
moments[j] = comp_inertia3[j][:, 2]
inertia_a22[j-1, j-1] = comp_inertia3[j][2, 2] + robo.IA[j]
elif robo.sigma[j] == 1:
forces[j] = Matrix([0, 0, comp_mass[j]])
moments[j] = Matrix([comp_ms[j][1], -comp_ms[j][0], 0])
inertia_a22[j-1, j-1] = comp_mass[j] + robo.IA[j]
forces[j] = symo.mat_replace(forces[j], 'E' + CHARSYMS[j], j)
moments[j] = symo.mat_replace(moments[j], 'N' + CHARSYMS[j], j)
def compute_triangle_elements(
robo, symo, j, k, ka, antRj, antPj, aje1,
forces, moments, inertia_a12, inertia_a22
):
"""
Compute elements below and above diagonal of the inertia matrix
(internal function).
Note:
forces, moments, inertia_a12, inertia_a22 are the output
parameters
"""
forces[ka] = antRj[k] * forces[k]
if k == j and robo.sigma[j] == 0:
moments[ka] = aje1[k] + \
(tools.skew(antPj[k]) * antRj[k] * forces[k])
else:
moments[ka] = (antRj[k] * moments[k]) + \
(tools.skew(antPj[k]) * antRj[k] * forces[k])
if ka == 0:
inertia_a12[j][:3, 0] = symo.mat_replace(
forces[ka], 'AV0', j, forced=True
)
inertia_a12[j][3:, 0] = symo.mat_replace(
moments[ka], 'AW0', j, forced=True
)
else:
symo.mat_replace(forces[ka], 'E' + CHARSYMS[j], ka)
symo.mat_replace(moments[ka], 'N' + CHARSYMS[j], ka)
if robo.sigma[ka] == 0:
inertia_a22[j-1, ka-1] = moments[ka][2]
elif robo.sigma[ka] == 1:
inertia_a22[j-1, ka-1] = forces[ka][2]
inertia_a22[ka-1, j-1] = inertia_a22[j-1, ka-1]
def fixed_inertia_matrix(robo, symo):
"""
Compute Inertia Matrix for robots with fixed base. This function
computes just the A22 matrix when the inertia matrix
A = [A11, A12; A12.transpose(), A22].
"""
# init terms
comp_inertia3, comp_ms, comp_mass = ParamsInit.init_jplus(robo)
aje1 = ParamsInit.init_vec(robo)
forces = ParamsInit.init_vec(robo, ext=1)
moments = ParamsInit.init_vec(robo, ext=1)
inertia_a12 = ParamsInit.init_vec(robo, num=6)
inertia_a22 = sympy.zeros(robo.nl, robo.nl)
# init transformation
antRj, antPj = compute_rot_trans(robo, symo)
for j in reversed(xrange(1, robo.NL)):
replace_composite_terms(
symo, j, comp_inertia3, comp_ms, comp_mass
)
if j != 1:
compute_composite_inertia(
robo, symo, j, antRj, antPj,
aje1, comp_inertia3, comp_ms, comp_mass
)
for j in xrange(1, robo.NL):
compute_diagonal_elements(
robo, symo, j, comp_inertia3, comp_ms,
comp_mass, forces, moments, inertia_a22
)
ka = j
while ka != 1:
k = ka
ka = robo.ant[ka]
compute_triangle_elements(
robo, symo, j, k, ka, antRj, antPj, aje1,
forces, moments, inertia_a12, inertia_a22
)
symo.mat_replace(inertia_a22, 'A', forced=True, symmet=True)
return inertia_a22
def floating_inertia_matrix(robo, symo):
"""
Compute Inertia Matrix for robots with floating or mobile base. This
function computes the A11, A12 and A22 matrices when the inertia
matrix A = [A11, A12; A12.transpose(), A22]
"""
# init terms
comp_inertia3, comp_ms, comp_mass = ParamsInit.init_jplus(robo)
aje1 = ParamsInit.init_vec(robo)
forces = ParamsInit.init_vec(robo, ext=1)
moments = ParamsInit.init_vec(robo, ext=1)
inertia_a12 = ParamsInit.init_vec(robo, num=6)
inertia_a22 = sympy.zeros(robo.nl, robo.nl)
# init transformation
antRj, antPj = compute_rot_trans(robo, symo)
for j in reversed(xrange(0, robo.NL)):
replace_composite_terms(
symo, j, comp_inertia3, comp_ms, comp_mass
)
if j != 0:
compute_composite_inertia(
robo, symo, j, antRj, antPj,
aje1, comp_inertia3, comp_ms, comp_mass
)
for j in xrange(1, robo.NL):
compute_diagonal_elements(
robo, symo, j, comp_inertia3, comp_ms,
comp_mass, forces, moments, inertia_a22
)
ka = j
while ka != 0:
k = ka
ka = robo.ant[ka]
compute_triangle_elements(
robo, symo, j, k, ka, antRj, antPj, aje1,
forces, moments, inertia_a12, inertia_a22
)
symo.mat_replace(inertia_a22, 'A', forced=True, symmet=True)
inertia_a11 = inertia_spatial(
comp_inertia3[0], comp_ms[0], comp_mass[ | 0]
)
| inertia_a11 = symo.mat_replace(
inertia_a11, 'Jcomp', 0, forced=True, symmet=True
)
# setup inertia_a12 in Matrix form
a12mat = sympy.zeros(6, robo.NL)
for j in xrange(1, robo.NL):
a12mat[:, j] = inertia_a12[j]
a12mat = a12mat[:, 1:]
# setup the complete inertia matrix
inertia = Matrix([
inertia_a11.row_join(a12mat),
a12mat.transpose().row_join(inertia_a22)
])
return inertia
|
dadavidson/Python_Lab | LP2THW/ex09.py | Python | mit | 418 | 0 | # ex09.py: Printing, Printing, Printing
# Here's some new strange stuff, | remember type it exactly.
days = "Mon Tue Wed Thu Fri Sat Sun"
months = "Jan\nFed\nMar\nApr\nMay\nJun\nJul\nAug"
print "Here are the days: ", days
print "Here are the months: ", months
print """
There's something going on here.
With the three double-quotes.
We'll be able to | type as much as we like.
Even 4 lines if we want, or 5, or 6.
"""
|
MyRookie/SentimentAnalyse | venv/lib/python2.7/site-packages/nltk/metrics/confusionmatrix.py | Python | mit | 7,825 | 0.001278 | # Natural Language Toolkit: Confusion Matrices
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Edward Loper <edloper@gmail.com>
# Steven Bird <stevenbird1@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import print_function, unicode_literals
from nltk.probability import FreqDist
from nltk.compat import python_2_unicode_compatible
@python_2_unicode_compatible
class ConfusionMatrix(object):
"""
The confusion matrix between a list of reference values and a
corresponding list of test values. Entry *[r,t]* of this
matrix is a count of the number of times that the reference value
*r* corresponds to the test value *t*. E.g.:
>>> from nltk.metrics import ConfusionMatrix
>>> ref = 'DET NN VB DET JJ NN NN IN DET NN'.split()
>>> test = 'DET VB VB DET NN NN NN IN DET NN'.split()
>>> cm = ConfusionMatrix(ref, test)
>>> print(cm['NN', 'NN'])
3
Note that the diagonal entries *Ri=Tj* of this matrix
corresponds to correct values; and the off-diagonal entries
correspond to incorrect values.
"""
def __init__(self, reference, test, sort_by_count=False):
"""
Construct a new confusion matrix from a list of reference
values and a corresponding list of test values.
:type reference: list
:param reference: An ordered list of reference values.
:type test: list
:param test: A list of values to compare against the
corresponding reference values.
:raise ValueError: If ``reference`` and ``length`` do not have
the same length. |
"""
if len(reference) != len(test):
| raise ValueError('Lists must have the same length.')
# Get a list of all values.
if sort_by_count:
ref_fdist = FreqDist(reference)
test_fdist = FreqDist(test)
def key(v): return -(ref_fdist[v]+test_fdist[v])
values = sorted(set(reference+test), key=key)
else:
values = sorted(set(reference+test))
# Construct a value->index dictionary
indices = dict((val,i) for (i,val) in enumerate(values))
# Make a confusion matrix table.
confusion = [[0 for val in values] for val in values]
max_conf = 0 # Maximum confusion
for w,g in zip(reference, test):
confusion[indices[w]][indices[g]] += 1
max_conf = max(max_conf, confusion[indices[w]][indices[g]])
#: A list of all values in ``reference`` or ``test``.
self._values = values
#: A dictionary mapping values in ``self._values`` to their indices.
self._indices = indices
#: The confusion matrix itself (as a list of lists of counts).
self._confusion = confusion
#: The greatest count in ``self._confusion`` (used for printing).
self._max_conf = max_conf
#: The total number of values in the confusion matrix.
self._total = len(reference)
#: The number of correct (on-diagonal) values in the matrix.
self._correct = sum(confusion[i][i] for i in range(len(values)))
def __getitem__(self, li_lj_tuple):
"""
:return: The number of times that value ``li`` was expected and
value ``lj`` was given.
:rtype: int
"""
(li, lj) = li_lj_tuple
i = self._indices[li]
j = self._indices[lj]
return self._confusion[i][j]
def __repr__(self):
return '<ConfusionMatrix: %s/%s correct>' % (self._correct,
self._total)
def __str__(self):
return self.pretty_format()
def pretty_format(self, show_percents=False, values_in_chart=True,
truncate=None, sort_by_count=False):
"""
:return: A multi-line string representation of this confusion matrix.
:type truncate: int
:param truncate: If specified, then only show the specified
number of values. Any sorting (e.g., sort_by_count)
will be performed before truncation.
:param sort_by_count: If true, then sort by the count of each
label in the reference data. I.e., labels that occur more
frequently in the reference label will be towards the left
edge of the matrix, and labels that occur less frequently
will be towards the right edge.
@todo: add marginals?
"""
confusion = self._confusion
values = self._values
if sort_by_count:
values = sorted(values, key=lambda v:
-sum(self._confusion[self._indices[v]]))
if truncate:
values = values[:truncate]
if values_in_chart:
value_strings = ["%s" % val for val in values]
else:
value_strings = [str(n+1) for n in range(len(values))]
# Construct a format string for row values
valuelen = max(len(val) for val in value_strings)
value_format = '%' + repr(valuelen) + 's | '
# Construct a format string for matrix entries
if show_percents:
entrylen = 6
entry_format = '%5.1f%%'
zerostr = ' .'
else:
entrylen = len(repr(self._max_conf))
entry_format = '%' + repr(entrylen) + 'd'
zerostr = ' '*(entrylen-1) + '.'
# Write the column values.
s = ''
for i in range(valuelen):
s += (' '*valuelen)+' |'
for val in value_strings:
if i >= valuelen-len(val):
s += val[i-valuelen+len(val)].rjust(entrylen+1)
else:
s += ' '*(entrylen+1)
s += ' |\n'
# Write a dividing line
s += '%s-+-%s+\n' % ('-'*valuelen, '-'*((entrylen+1)*len(values)))
# Write the entries.
for val, li in zip(value_strings, values):
i = self._indices[li]
s += value_format % val
for lj in values:
j = self._indices[lj]
if confusion[i][j] == 0:
s += zerostr
elif show_percents:
s += entry_format % (100.0*confusion[i][j]/self._total)
else:
s += entry_format % confusion[i][j]
if i == j:
prevspace = s.rfind(' ')
s = s[:prevspace] + '<' + s[prevspace+1:] + '>'
else: s += ' '
s += '|\n'
# Write a dividing line
s += '%s-+-%s+\n' % ('-'*valuelen, '-'*((entrylen+1)*len(values)))
# Write a key
s += '(row = reference; col = test)\n'
if not values_in_chart:
s += 'Value key:\n'
for i, value in enumerate(values):
s += '%6d: %s\n' % (i+1, value)
return s
def key(self):
values = self._values
str = 'Value key:\n'
indexlen = len(repr(len(values)-1))
key_format = ' %'+repr(indexlen)+'d: %s\n'
for i in range(len(values)):
str += key_format % (i, values[i])
return str
def demo():
reference = 'DET NN VB DET JJ NN NN IN DET NN'.split()
test = 'DET VB VB DET NN NN NN IN DET NN'.split()
print('Reference =', reference)
print('Test =', test)
print('Confusion matrix:')
print(ConfusionMatrix(reference, test))
print(ConfusionMatrix(reference, test).pretty_format(sort_by_count=True))
if __name__ == '__main__':
demo()
|
benschmaus/catapult | telemetry/telemetry/internal/util/wp_server_unittest.py | Python | bsd-3-clause | 2,645 | 0.002647 | # Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this sou | rce code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import sys
import unittest
from telemetry.internal.util import wpr_server
# pylint: disable=protected-access
class CreateCommandTest(unitte | st.TestCase):
def testHasDnsGivesDnsPort(self):
expected_cmd_line = [
sys.executable, 'replay.py', '--host=127.0.0.1',
'--port=2', '--ssl_port=1', '--dns_port=0',
'--use_closest_match', '--log_level=warning', '--extra_arg', 'foo.wpr']
cmd_line = wpr_server.ReplayServer._GetCommandLine(
'replay.py', '127.0.0.1', 2, 1, 0, ['--extra_arg'], 'foo.wpr',
log_level=logging.WARNING)
self.assertEqual(expected_cmd_line, cmd_line)
def testNoDnsGivesNoDnsForwarding(self):
expected_cmd_line = [
sys.executable, 'replay.py', '--host=127.0.0.1',
'--port=8080', '--ssl_port=8443', '--no-dns_forwarding',
'--use_closest_match', '--log_level=warning', 'bar.wpr']
cmd_line = wpr_server.ReplayServer._GetCommandLine(
'replay.py', '127.0.0.1', 8080, 8443, None, [], 'bar.wpr',
log_level=logging.WARNING)
self.assertEqual(expected_cmd_line, cmd_line)
# pylint: disable=protected-access
class ParseLogFilePortsTest(unittest.TestCase):
def testEmptyLinesGivesEmptyDict(self):
log_lines = iter([])
self.assertEqual(
{},
wpr_server.ReplayServer._ParseLogFilePorts(log_lines))
def testSingleMatchGivesSingleElementDict(self):
log_lines = iter([
'extra stuff',
'2014-09-27 17:04:27,11 WARNING HTTP server started on 127.0.0.1:5167',
'extra stuff',
])
self.assertEqual(
{'http': 5167},
wpr_server.ReplayServer._ParseLogFilePorts(log_lines))
def testUnknownProtocolSkipped(self):
log_lines = iter([
'2014-09-27 17:04:27,11 WARNING FOO server started on 127.0.0.1:1111',
'2014-09-27 17:04:27,12 WARNING HTTP server started on 127.0.0.1:5167',
])
self.assertEqual(
{'http': 5167},
wpr_server.ReplayServer._ParseLogFilePorts(log_lines))
def testTypicalLogLinesGiveFullDict(self):
log_lines = iter([
'extra',
'2014-09-27 17:04:27,11 WARNING DNS server started on 127.0.0.1:2345',
'2014-09-27 17:04:27,12 WARNING HTTP server started on 127.0.0.1:3456',
'2014-09-27 17:04:27,13 WARNING HTTPS server started on 127.0.0.1:4567',
])
self.assertEqual(
{'dns': 2345, 'http': 3456, 'https': 4567},
wpr_server.ReplayServer._ParseLogFilePorts(log_lines))
|
heartherumble/django-twittersync | twittersync/templatetags/twittersync_tags.py | Python | bsd-3-clause | 2,179 | 0.002295 | from django.conf import settings
from django import template
from twittersync.models import TwitterAccount, TwitterStatus
register = template.Library()
class LatestTweets(template.Node):
def __init__(self, account, limit, varname):
self.account = account
self.limit = limit
self.varname = varname
def render(self, context):
def resolve_or_not(var, context):
if callable(getattr(var, 'resolve', None)):
return var.resolve(context)
return var
account = resolve_or_not(self.account, context)
limit = resolve_or_not(self.limit, context)
varname = resolve_or_not(self.varname, context)
if not isinstance(account, TwitterAccount):
try:
account = TwitterAccount.objects.get(screen_name=account)
except TwitterAccount.DoesNotExist:
raise
context[varname] = account.tweets.all()[:int(limit)]
return u''
@register.tag
def get_latest_tweets(parser, token):
''' Returns the latest tweets stored in the db.
Run like so:
{% get_latest_tweets twitter_account_instance 5 as tweets %}
Will return the last 5 tweets and store in the
template context as "tweets"..
You can also exclude the number requested and
the tag will return t | he value set in
settings.TWITTERSYNC_LATEST_TWEETS. If that
isn't set, we fall back to 5
'''
bits = token.split_contents()
if len(bits) < 4:
raise template.TemplateSyntaxError(
'"%s" tag takes at least 3 arguments' % bits[0]
)
limit = None
try:
_tag, account, limit, _as, varname = bits
except ValueError:
_tag, account, _as, varname = bits
if limit is None: |
limit = getattr(settings, 'TWITTERSYNC_LATEST_TWEETS', 5)
try:
# needed because it may not be passed via the
# template token.
limit = parser.compile_filter(limit)
except TypeError:
pass
return LatestTweets(
parser.compile_filter(account),
limit,
parser.compile_filter(varname),
)
|
lxneng/incubator-airflow | airflow/contrib/task_runner/cgroup_task_runner.py | Python | apache-2.0 | 8,763 | 0.001141 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import getpass
import os
import uuid
from cgroupspy import trees
import psutil
from airflow.task_runner.base_task_runner import BaseTaskRunner
from airflow.utils.helpers import reap_process_group
class CgroupTaskRunner(BaseTaskRunner):
"""
Runs the raw Airflow task in a cgroup that has containment for memory and
cpu. It uses the resource requirements defined in the task to construct
the settings for the cgroup.
Note that this task runner will only work if the Airflow user has root privileges,
e.g. if the airflow user is called `airflow` then the following entries (or an even
less restrictive ones) are needed in the sudoers file (replacing
/CGROUPS_FOLDER with your system's cgroups folder, e.g. '/sys/fs/cgroup/'):
airflow ALL= (root) NOEXEC: /bin/chown /CGROUPS_FOLDER/memory/airflow/*
airflow ALL= (root) NOEXEC: !/bin/chown /CGROUPS_FOLDER/memory/airflow/*..*
airflow ALL= (root) NOEXEC: !/bin/chown /CGROUPS_FOLDER/memory/airflow/* *
airflow ALL= (root) NOEXEC: /bin/chown /CGROUPS_FOLDER/cpu/airflow/*
airflow ALL= (root) NOEXEC: !/bin/chown /CGROUPS_FOLDER/cpu/airflow/*..*
airflow ALL= (root) NOEXEC: !/bin/chown /CGROUPS_FOLDER/cpu/airflow/* *
airflow ALL= (root) NOEXEC: /bin/chmod /CGROUPS_FOLDER/memory/airflow/*
airflow ALL= (root) NOEXEC: !/bin/chmod /CGROUPS_FOLDER/memory/airflow/*..*
airflow ALL= (root) NOEXEC: !/bin/chmod /CGROUPS_FOLDER/memory/airflow/* *
airflow ALL= (root) NOEXEC: /bin/chmod /CGROUPS_FOLDER/cpu/airflow/*
airflow ALL= (root) NOEXEC: !/bin/chmod /CGROUPS_FOLDER/cpu/airflow/*..*
airflow ALL= (root) NOEXEC: !/bin/chmod /CGROUPS_FOLDER/cpu/airflow/* *
"""
def __init__(self, local_task_job):
super(CgroupTaskRunner, self).__init__(local_task_job)
self.process = None
self._finished_running = False
self._cpu_shares = None
self._mem_mb_limit = None
self._created_cpu_cgroup = False
self._created_mem_cgroup = False
self._cur_user = getpass.getuser()
def _create_cgroup(self, path):
"""
Create the specif | ied cgroup.
:param path: The path of the cgroup to create.
E.g. cpu/mygroup/mysubgroup
:return: the Node associated with the created cgroup.
:rtype: cgroupspy.nodes.Node
"""
node = trees.Tree().root
path_split = path.split(os.sep)
for path_element in path_split:
name_to_node = {x.name: x for x in node.children}
if path_element not in name_to_node:
self.log.debug("Creating cgroup % | s in %s", path_element, node.path)
node = node.create_cgroup(path_element)
else:
self.log.debug(
"Not creating cgroup %s in %s since it already exists",
path_element, node.path
)
node = name_to_node[path_element]
return node
def _delete_cgroup(self, path):
"""
Delete the specified cgroup.
:param path: The path of the cgroup to delete.
E.g. cpu/mygroup/mysubgroup
"""
node = trees.Tree().root
path_split = path.split("/")
for path_element in path_split:
name_to_node = {x.name: x for x in node.children}
if path_element not in name_to_node:
self.log.warning("Cgroup does not exist: %s", path)
return
else:
node = name_to_node[path_element]
# node is now the leaf node
parent = node.parent
self.log.debug("Deleting cgroup %s/%s", parent, node.name)
parent.delete_cgroup(node.name)
def start(self):
# Use bash if it's already in a cgroup
cgroups = self._get_cgroup_names()
if cgroups["cpu"] != "/" or cgroups["memory"] != "/":
self.log.debug(
"Already running in a cgroup (cpu: %s memory: %s) so not "
"creating another one",
cgroups.get("cpu"), cgroups.get("memory")
)
self.process = self.run_command()
return
# Create a unique cgroup name
cgroup_name = "airflow/{}/{}".format(datetime.datetime.utcnow().
strftime("%Y-%m-%d"),
str(uuid.uuid1()))
self.mem_cgroup_name = "memory/{}".format(cgroup_name)
self.cpu_cgroup_name = "cpu/{}".format(cgroup_name)
# Get the resource requirements from the task
task = self._task_instance.task
resources = task.resources
cpus = resources.cpus.qty
self._cpu_shares = cpus * 1024
self._mem_mb_limit = resources.ram.qty
# Create the memory cgroup
mem_cgroup_node = self._create_cgroup(self.mem_cgroup_name)
self._created_mem_cgroup = True
if self._mem_mb_limit > 0:
self.log.debug(
"Setting %s with %s MB of memory",
self.mem_cgroup_name, self._mem_mb_limit
)
mem_cgroup_node.controller.limit_in_bytes = self._mem_mb_limit * 1024 * 1024
# Create the CPU cgroup
cpu_cgroup_node = self._create_cgroup(self.cpu_cgroup_name)
self._created_cpu_cgroup = True
if self._cpu_shares > 0:
self.log.debug(
"Setting %s with %s CPU shares",
self.cpu_cgroup_name, self._cpu_shares
)
cpu_cgroup_node.controller.shares = self._cpu_shares
# Start the process w/ cgroups
self.log.debug(
"Starting task process with cgroups cpu,memory: %s",
cgroup_name
)
self.process = self.run_command(
['cgexec', '-g', 'cpu,memory:{}'.format(cgroup_name)]
)
def return_code(self):
return_code = self.process.poll()
# TODO(plypaul) Monitoring the the control file in the cgroup fs is better than
# checking the return code here. The PR to use this is here:
# https://github.com/plypaul/airflow/blob/e144e4d41996300ffa93947f136eab7785b114ed/airflow/contrib/task_runner/cgroup_task_runner.py#L43
# but there were some issues installing the python butter package and
# libseccomp-dev on some hosts for some reason.
# I wasn't able to track down the root cause of the package install failures, but
# we might want to revisit that approach at some other point.
if return_code == 137:
self.log.warning("Task failed with return code of 137. This may indicate "
"that it was killed due to excessive memory usage. "
"Please consider optimizing your task or using the "
"resources argument to reserve more memory for your task")
return return_code
def terminate(self):
if self.process and psutil.pid_exists(self.process.pid):
reap_process_group(self.process.pid, self.log)
def on_finish(self):
# Let the OOM watcher thread know we're done to avoid false OOM alarms
self._finished_running = True
# Clean up the cgroups
if self._created_mem_cgroup:
self._delete_cgroup(self.mem_cgroup_name)
if self |
ESOedX/edx-platform | openedx/core/djangoapps/theming/storage.py | Python | agpl-3.0 | 13,828 | 0.003616 | """
Comprehensive Theming support for Django's collectstatic functionality.
See https://docs.djangoproject.com/en/1.8/ref/contrib/staticfiles/
"""
from __future__ import absolute_import
import os.path
import posixpath
import re
from django.conf import settings
from django.contrib.staticfiles.finders import find
from django.contrib.staticfiles.storage import CachedFilesMixin, StaticFilesStorage
from django.utils._os import safe_join
from django.utils.six.moves.urllib.parse import ( # pylint: disable=no-name-in-module, import-error
unquote,
urldefrag,
urlsplit
)
from pipeline.storage import PipelineMixin
from openedx.core.djangoapps.theming.helpers import (
get_current_theme,
get_project_root_name,
get_theme_base_dir,
get_themes,
is_comprehensive_theming_enabled
)
class ThemeStorage(StaticFilesStorage):
"""
Comprehensive theme aware Static files storage.
"""
# prefix for file path, this prefix is added at the beginning of file path before saving static files during
# collectstatic command.
# e.g. having "edx.org" as prefix will cause files to be saved as "edx.org/images/logo.png"
# instead of "images/logo.png"
prefix = None
def __init__(self, location=None, base_url=None, file_permissions_mode=None,
directory_permissions_mode=None, prefix=None):
self.prefix = prefix
super(ThemeStorage, self).__init__(
location=location,
base_url=base_url,
file_permissions_mode=file_permissions_mode,
directory_permissions_mode=directory_permissions_mode,
)
def url(self, name):
"""
Returns url of the asset, themed url will be returned if the asset is themed otherwise default
asset url will be returned.
Args:
name: name of the asset, e.g. 'images/logo.png'
Returns:
url of the asset, e.g. '/static/red-theme/images/logo.png' if current theme is red-theme and logo
is provided by red-theme otherwise '/static/images/logo.png'
"""
prefix = ''
theme = get_current_theme()
# get theme prefix from site address if if asset is accessed via a url
if theme:
prefix = theme.theme_dir_name
# get theme prefix from storage class, if asset is accessed during collectstatic run
elif self.prefix:
prefix = self.prefix
# join theme prefix with asset name if theme is applied and themed asset exists
if prefix and self.themed(name, prefix):
name = os.path.join(prefix, name)
return super(ThemeStorage, self).url(name)
def themed(self, name, theme):
"""
Returns True if given asset override is provided | by the given theme otherwise returns False.
Args:
name: asset name e.g. 'images/logo.png'
theme: theme name e.g. 'red-theme', 'edx.org'
Re | turns:
True if given asset override is provided by the given theme otherwise returns False
"""
if not is_comprehensive_theming_enabled():
return False
# in debug mode check static asset from within the project directory
if settings.DEBUG:
themes_location = get_theme_base_dir(theme, suppress_error=True)
# Nothing can be themed if we don't have a theme location or required params.
if not all((themes_location, theme, name)):
return False
themed_path = "/".join([
themes_location,
theme,
get_project_root_name(),
"static/"
])
name = name[1:] if name.startswith("/") else name
path = safe_join(themed_path, name)
return os.path.exists(path)
# in live mode check static asset in the static files dir defined by "STATIC_ROOT" setting
else:
return self.exists(os.path.join(theme, name))
class ThemeCachedFilesMixin(CachedFilesMixin):
"""
Comprehensive theme aware CachedFilesMixin.
Main purpose of subclassing CachedFilesMixin is to override the following methods.
1 - _url
2 - url_converter
_url:
This method takes asset name as argument and is responsible for adding hash to the name to support caching.
This method is called during both collectstatic command and live server run.
When called during collectstatic command that name argument will be asset name inside STATIC_ROOT,
for non themed assets it will be the usual path (e.g. 'images/logo.png') but for themed asset it will
also contain themes dir prefix (e.g. 'red-theme/images/logo.png'). So, here we check whether the themed asset
exists or not, if it exists we pass the same name up in the MRO chain for further processing and if it does not
exists we strip theme name and pass the new asset name to the MRO chain for further processing.
When called during server run, we get the theme dir for the current site using `get_current_theme` and
make sure to prefix theme dir to the asset name. This is done to ensure the usage of correct hash in file name.
e.g. if our red-theme overrides 'images/logo.png' and we do not prefix theme dir to the asset name, the hash for
'{platform-dir}/lms/static/images/logo.png' would be used instead of
'{themes_base_dir}/red-theme/images/logo.png'
url_converter:
This function returns another function that is responsible for hashing urls that appear inside assets
(e.g. url("images/logo.png") inside css). The method defined in the superclass adds a hash to file and returns
relative url of the file.
e.g. for url("../images/logo.png") it would return url("../images/logo.790c9a5340cb.png"). However we would
want it to return absolute url (e.g. url("/static/images/logo.790c9a5340cb.png")) so that it works properly
with themes.
The overridden method here simply comments out the line that convert absolute url to relative url,
hence absolute urls are used instead of relative urls.
"""
def _processed_asset_name(self, name):
"""
Returns either a themed or unthemed version of the given asset name,
depending on several factors.
See the class docstring for more info.
"""
theme = get_current_theme()
if theme and theme.theme_dir_name not in name:
# during server run, append theme name to the asset name if it is not already there
# this is ensure that correct hash is created and default asset is not always
# used to create hash of themed assets.
name = os.path.join(theme.theme_dir_name, name)
parsed_name = urlsplit(unquote(name))
clean_name = parsed_name.path.strip()
asset_name = name
if not self.exists(clean_name):
# if themed asset does not exists then use default asset
theme = name.split("/", 1)[0]
# verify that themed asset was accessed
if theme in [theme.theme_dir_name for theme in get_themes()]:
asset_name = "/".join(name.split("/")[1:])
return asset_name
def _url(self, hashed_name_func, name, force=False, hashed_files=None):
"""
This override method swaps out `name` with a processed version.
See the class docstring for more info.
"""
processed_asset_name = self._processed_asset_name(name)
return super(ThemeCachedFilesMixin, self)._url(hashed_name_func, processed_asset_name, force, hashed_files)
def url_converter(self, name, hashed_files, template=None):
"""
This is an override of url_converter from CachedFilesMixin.
It changes one line near the end of the method (see the NOTE) in order
to return absolute urls instead of relative urls. This behavior is
necessary for theme overrides, as we get 404 on assets with relative
urls on a themed site.
"""
if template is None:
template = sel |
lotharwissler/bioinformatics | python/misa/exonic-ssrs-to-genes.py | Python | mit | 3,457 | 0.028348 | #!/usr/bin/python
import os, sys # low level handling, such as command line stuff
import string # string methods available
import re # regular expressions
import getopt # coman | d line argument handling
from low import * # custom functions, written by myself
from misa import Mis | aSSR
from collections import defaultdict
# =============================================================================
def show_help( ):
""" displays the program parameter list and usage information """
stdout( "usage: " + sys.argv[0] + " -e <path> -g <path> ")
stdout( " " )
stdout( " option description" )
stdout( " -h help (this text here)" )
stdout( " -e all ssrs in exons" )
stdout( " -g parsed gff for all drosophilas" )
stdout( " " )
sys.exit(1)
# =============================================================================
def handle_arguments():
""" verifies the presence of all necessary arguments and returns the data dir """
if len ( sys.argv ) == 1:
stderr( "no arguments provided." )
show_help()
try: # check for the right arguments
keys, values = getopt.getopt( sys.argv[1:], "hg:e:" )
except getopt.GetoptError:
stderr( "invalid arguments provided." )
show_help()
args = {}
for key, value in keys:
if key == '-g': args['gff'] = value
if key == '-e': args['ssr'] = value
if not args.has_key('gff'):
stderr( "parsed gff argument missing." )
show_help()
elif not file_exists( args.get('gff') ):
stderr( "parsed gff does not exist." )
show_help()
if not args.has_key('ssr'):
stderr( "ssr file argument missing." )
show_help()
elif not file_exists( args.get('ssr') ):
stderr( "ssr file does not exist." )
show_help()
return args
# =============================================================================
class Gene():
def __init__(self, line):
cols = line.rstrip().split("\t")
self.species = cols.pop(0)
self.id = cols.pop(0)
self.chr = cols.pop(0)
self.start = cols.pop(0)
self.stop = cols.pop(0)
self.strand = cols.pop(0)
self.loc = self.species + "|" + self.chr
def pos_in_gene(self, pos):
if int(pos) >= int(self.start) and int(pos) <= int(self.stop): return 1
else: return 0
# =============================================================================
def get_gene_features(file):
hash = defaultdict(list)
fo = open(file)
for line in fo:
g = Gene(line)
hash[g.loc].append(g)
fo.close()
return hash
# =============================================================================
def get_ssrs(file):
hash = defaultdict(list)
fo = open(file)
for line in fo:
if line.startswith("ID\t"): continue
m = MisaSSR(line)
hash[m.geneid].append(m)
fo.close()
return hash
# =============================================================================
# === MAIN ====================================================================
# =============================================================================
def main( args ):
locssrs = get_ssrs(args['ssr'])
locgenes = get_gene_features(args['gff'])
for loc, genes in locgenes.iteritems():
for gene in genes:
for ssr in locssrs[loc]:
if gene.pos_in_gene(ssr.startpos): print gene.id
# =============================================================================
args = handle_arguments()
main( args )
|
baroquebobcat/pants | tests/python/pants_test/init/test_repro.py | Python | apache-2.0 | 1,540 | 0.012987 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import unittest
from functools import partial
from pants.fs.archive import TGZ
from pants.init.repro import Repro
from pants.util.contextutil import temporary_dir
from pants_test.init.repro_mixin import ReproMixin
class ReproTest(unittest.TestCase, ReproMixin):
def test_repro(self):
"""Verify that Repro object creates expected tar.gz file"""
with temporary_dir() as tmpdir:
fake_buildroot = os.path.join(tmpdir, 'buildroot')
add_file = partial(self.add_file, fake_buildroot)
add_file('.git/foo', 'foo')
add_file('dist/bar', | 'bar')
add_file('baz.txt', 'baz')
add_file('qux/quux.txt', 'quux')
repro_file = os.path.join(tmpdir, 'repro.tar.gz')
repro = Repro(repro_file, fake_buildroot, ['.git', 'dist'])
repro.capture(run_info_dict={'foo': 'bar', 'baz': 'qux'})
|
extract_dir = os.path.join(tmpdir, 'extract')
TGZ.extract(repro_file, extract_dir)
assert_file = partial(self.assert_file, extract_dir)
assert_file('baz.txt', 'baz')
assert_file('qux/quux.txt', 'quux')
assert_file('repro.sh')
assert_not_exists = partial(self.assert_not_exists, extract_dir)
assert_not_exists('.git')
assert_not_exists('dist')
|
DivineHime/seishirou | lib/multidict/__init__.py | Python | gpl-3.0 | 1,162 | 0 | """Multidict implementation.
HTTP Headers and URL query string require specific data structure:
multidict. It behaves mostly like a dict but it can have
several values for the same key.
"""
import os
__all__ = ('MultiDictProxy', 'CIMultiDictProxy',
'MultiDict', 'CIMultiDict', 'upstr', 'istr')
__version__ = '2.1.5'
if bool(os.environ.get('MULTIDICT_NO_EXTENSIONS')):
from ._multidict_py import (MultiDictProxy,
CIMultiDictProxy,
MultiDict,
CIMultiDict,
upstr, istr)
else:
try:
from ._multidict import (MultiDictProxy,
CIMultiDictProxy,
MultiDict,
C | IMultiDict,
upstr, istr)
except ImportError: # pragma: no cover
| from ._multidict_py import (MultiDictProxy,
CIMultiDictProxy,
MultiDict,
CIMultiDict,
upstr, istr)
|
petrjasek/superdesk-core | superdesk/macros/imperial/volume_cubic_inches_to_metric_test.py | Python | agpl-3.0 | 1,669 | 0.004194 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import unittest
from .volume_cubic_inches_to_metric import cubic_inches_to_metric
class VolumeTestCase(unittest.TestCase):
def test(self):
text = (
"Total volume is 100.50 cubic inches for this land. "
"Total volume is 15.7 cubic in for this land. "
"Total volume is 1 Cubic Inch for this land. "
"Total volume is 1-16 cu-in for this land. "
"Total volume is 1-16 cb. in for this land. "
"Total volume is 16.7-Cubic-in for this land. "
"Total volume is 16,500-cu. in. for this land. "
)
item = {"body_html": text}
res, diff = cubic_inches_to_metric(item)
self.assertEqual(diff["100.50 cubic inches"], "100.50 cubic inches (1,647 cubic centimeter)")
self.assertEqual(diff["15.7 cubic in"], "15.7 cubic in (257.3 cubic centimeter)")
self.assertEqual(diff["1 Cubic Inch"], "1 Cubic Inch (16 cubic centimeter)")
self.assertEqual(diff["1-16 cu-in"], "1- | 16 cu-in (16-262 cubic centimeter)")
self.assertEqual(diff["1-16 cb. in"], "1-16 cb. in (16-262 cubic centimeter)")
self.assertEqual(diff["16.7-Cubic | -in"], "16.7-Cubic-in (273.7 cubic centimeter)")
self.assertEqual(diff["16,500-cu. in"], "16,500-cu. in (0.3 cubic meter)")
self.assertEqual(res["body_html"], item["body_html"])
|
ktsaou/netdata | collectors/python.d.plugin/python_modules/pyyaml2/scanner.py | Python | gpl-3.0 | 52,661 | 0.002279 | # SPDX-License-Identifier: MIT
# Scanner produces tokens of the following types:
# STREAM-START
# STREAM-END
# DIRECTIVE(name, value)
# DOCUMENT-START
# DOCUMENT-END
# BLOCK-SEQUENCE-START
# BLOCK-MAPPING-START
# BLOCK-END
# FLOW-SEQUENCE-START
# FLOW-MAPPING-START
# FLOW-SEQUENCE-END
# FLOW-MAPPING-END
# BLOCK-ENTRY
# FLOW-ENTRY
# KEY
# VALUE
# ALIAS(value)
# ANCHOR(value)
# TAG(value)
# SCALAR(value, plain, style)
#
# Read comments in the Scanner code for more details.
#
__all__ = ['Scanner', 'ScannerError']
from error import MarkedYAMLError
from tokens import *
class ScannerError(MarkedYAMLError):
pass
class SimpleKey(object):
# See below simple keys treatment.
def __init__(self, token_number, required, index, line, column, mark):
self.token_number = token_number
self.required = required
self.index = index
self.line = line
self.column = column
self.mark = mark
class Scanner(object):
def __init__(self):
"""Initialize the scanner."""
# It is assumed that Scanner and Reader will have a common descendant.
# Reader do the dirty work of checking for BOM and converting the
# input data to Unicode. It also adds NUL to the end.
#
# Reader supports the following methods
# self.peek(i=0) # peek the next i-th character
# self.prefix(l=1) # peek the next l characters
# self.forward(l=1) # read the next l characters and move the pointer.
# Had we reached the end of the stream?
self.done = False
# The numb | er of unclosed '{' and '['. `flow_level == 0` means block
# context.
self.flow_level = 0
# List of processed tokens that are not yet emitted.
self.tokens = []
# Add the STREAM-START token.
self.fetch_stream_start()
# Number of tokens that were emitted through the `get_token` method.
self.tokens_taken = 0
# The current indentation level.
self.indent = -1
# Past indentation levels.
self.indents = []
| # Variables related to simple keys treatment.
# A simple key is a key that is not denoted by the '?' indicator.
# Example of simple keys:
# ---
# block simple key: value
# ? not a simple key:
# : { flow simple key: value }
# We emit the KEY token before all keys, so when we find a potential
# simple key, we try to locate the corresponding ':' indicator.
# Simple keys should be limited to a single line and 1024 characters.
# Can a simple key start at the current position? A simple key may
# start:
# - at the beginning of the line, not counting indentation spaces
# (in block context),
# - after '{', '[', ',' (in the flow context),
# - after '?', ':', '-' (in the block context).
# In the block context, this flag also signifies if a block collection
# may start at the current position.
self.allow_simple_key = True
# Keep track of possible simple keys. This is a dictionary. The key
# is `flow_level`; there can be no more that one possible simple key
# for each level. The value is a SimpleKey record:
# (token_number, required, index, line, column, mark)
# A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
# '[', or '{' tokens.
self.possible_simple_keys = {}
# Public methods.
def check_token(self, *choices):
# Check if the next token is one of the given types.
while self.need_more_tokens():
self.fetch_more_tokens()
if self.tokens:
if not choices:
return True
for choice in choices:
if isinstance(self.tokens[0], choice):
return True
return False
def peek_token(self):
# Return the next token, but do not delete if from the queue.
while self.need_more_tokens():
self.fetch_more_tokens()
if self.tokens:
return self.tokens[0]
def get_token(self):
# Return the next token.
while self.need_more_tokens():
self.fetch_more_tokens()
if self.tokens:
self.tokens_taken += 1
return self.tokens.pop(0)
# Private methods.
def need_more_tokens(self):
if self.done:
return False
if not self.tokens:
return True
# The current token may be a potential simple key, so we
# need to look further.
self.stale_possible_simple_keys()
if self.next_possible_simple_key() == self.tokens_taken:
return True
def fetch_more_tokens(self):
# Eat whitespaces and comments until we reach the next token.
self.scan_to_next_token()
# Remove obsolete possible simple keys.
self.stale_possible_simple_keys()
# Compare the current indentation and column. It may add some tokens
# and decrease the current indentation level.
self.unwind_indent(self.column)
# Peek the next character.
ch = self.peek()
# Is it the end of stream?
if ch == u'\0':
return self.fetch_stream_end()
# Is it a directive?
if ch == u'%' and self.check_directive():
return self.fetch_directive()
# Is it the document start?
if ch == u'-' and self.check_document_start():
return self.fetch_document_start()
# Is it the document end?
if ch == u'.' and self.check_document_end():
return self.fetch_document_end()
# TODO: support for BOM within a stream.
#if ch == u'\uFEFF':
# return self.fetch_bom() <-- issue BOMToken
# Note: the order of the following checks is NOT significant.
# Is it the flow sequence start indicator?
if ch == u'[':
return self.fetch_flow_sequence_start()
# Is it the flow mapping start indicator?
if ch == u'{':
return self.fetch_flow_mapping_start()
# Is it the flow sequence end indicator?
if ch == u']':
return self.fetch_flow_sequence_end()
# Is it the flow mapping end indicator?
if ch == u'}':
return self.fetch_flow_mapping_end()
# Is it the flow entry indicator?
if ch == u',':
return self.fetch_flow_entry()
# Is it the block entry indicator?
if ch == u'-' and self.check_block_entry():
return self.fetch_block_entry()
# Is it the key indicator?
if ch == u'?' and self.check_key():
return self.fetch_key()
# Is it the value indicator?
if ch == u':' and self.check_value():
return self.fetch_value()
# Is it an alias?
if ch == u'*':
return self.fetch_alias()
# Is it an anchor?
if ch == u'&':
return self.fetch_anchor()
# Is it a tag?
if ch == u'!':
return self.fetch_tag()
# Is it a literal scalar?
if ch == u'|' and not self.flow_level:
return self.fetch_literal()
# Is it a folded scalar?
if ch == u'>' and not self.flow_level:
return self.fetch_folded()
# Is it a single quoted scalar?
if ch == u'\'':
return self.fetch_single()
# Is it a double quoted scalar?
if ch == u'\"':
return self.fetch_double()
# It must be a plain scalar then.
if self.check_plain():
return self.fetch_plain()
# No? It's an error. Let's produce a nice error message.
raise ScannerError("while scanning for the next token", None,
"found character %r that cannot start any token"
% ch.encode('utf-8'), self.get_mark())
# Simple keys treatment.
def next_possible_simple_key(self):
# Return the number of the nearest possible simple key. Actually we
# don't n |
morfeokmg/maurrepo | src/Examples/CmdClient.py | Python | gpl-2.0 | 4,875 | 0.03159 | '''
Copyright (c) <2012> Tarek Galal <tare2.galal@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR
A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LI | ABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
from Yowsup.connectionmanager import YowsupConnectionManager
import time, datetime, sys
if sys.version_info >= (3, 0):
raw_input = input
class WhatsappCmdClient:
def __init__(self, phoneNum | ber, keepAlive = False, sendReceipts = False):
self.sendReceipts = sendReceipts
self.phoneNumber = phoneNumber
self.jid = "%s@s.whatsapp.net" % phoneNumber
self.sentCache = {}
connectionManager = YowsupConnectionManager()
connectionManager.setAutoPong(keepAlive)
#connectionManager.setAutoPong(True)
self.signalsInterface = connectionManager.getSignalsInterface()
self.methodsInterface = connectionManager.getMethodsInterface()
self.signalsInterface.registerListener("auth_success", self.onAuthSuccess)
self.signalsInterface.registerListener("auth_fail", self.onAuthFailed)
self.signalsInterface.registerListener("message_received", self.onMessageReceived)
self.signalsInterface.registerListener("receipt_messageSent", self.onMessageSent)
self.signalsInterface.registerListener("presence_updated", self.onPresenceUpdated)
self.signalsInterface.registerListener("disconnected", self.onDisconnected)
self.commandMappings = {"lastseen":lambda: self.methodsInterface.call("presence_request", ( self.jid,)),
"available": lambda: self.methodsInterface.call("presence_sendAvailable"),
"unavailable": lambda: self.methodsInterface.call("presence_sendUnavailable")
}
self.done = False
#signalsInterface.registerListener("receipt_messageDelivered", lambda jid, messageId: methodsInterface.call("delivered_ack", (jid, messageId)))
def login(self, username, password):
self.username = username
self.methodsInterface.call("auth_login", (username, password))
while not self.done:
time.sleep(0.5)
def onAuthSuccess(self, username):
print("Authed %s" % username)
self.methodsInterface.call("ready")
self.goInteractive(self.phoneNumber)
def onAuthFailed(self, username, err):
print("Auth Failed!")
def onDisconnected(self, reason):
print("Disconnected because %s" %reason)
def onPresenceUpdated(self, jid, lastSeen):
formattedDate = datetime.datetime.fromtimestamp(long(time.time()) - lastSeen).strftime('%d-%m-%Y %H:%M')
self.onMessageReceived(0, jid, "LAST SEEN RESULT: %s"%formattedDate, long(time.time()), False, None, False)
def onMessageSent(self, jid, messageId):
formattedDate = datetime.datetime.fromtimestamp(self.sentCache[messageId][0]).strftime('%d-%m-%Y %H:%M')
print("%s [%s]:%s"%(self.username, formattedDate, self.sentCache[messageId][1]))
print(self.getPrompt())
def runCommand(self, command):
if command[0] == "/":
command = command[1:].split(' ')
try:
self.commandMappings[command[0]]()
return 1
except KeyError:
return 0
return 0
def onMessageReceived(self, messageId, jid, messageContent, timestamp, wantsReceipt, pushName, isBroadcast):
if jid[:jid.index('@')] != self.phoneNumber:
return
formattedDate = datetime.datetime.fromtimestamp(timestamp).strftime('%d-%m-%Y %H:%M')
print("%s [%s]:%s"%(jid, formattedDate, messageContent))
if wantsReceipt and self.sendReceipts:
self.methodsInterface.call("message_ack", (jid, messageId))
print(self.getPrompt())
def goInteractive(self, jid):
print("Starting Interactive chat with %s" % jid)
jid = "%s@s.whatsapp.net" % jid
print(self.getPrompt())
while True:
message = raw_input()
message = message.strip()
if not len(message):
continue
if not self.runCommand(message.strip()):
msgId = self.methodsInterface.call("message_send", (jid, message))
self.sentCache[msgId] = [int(time.time()), message]
self.done = True
def getPrompt(self):
return "Enter Message or command: (/%s)" % ", /".join(self.commandMappings)
|
maraujop/django-crispy-forms | crispy_forms/tests/test_tags.py | Python | mit | 6,908 | 0.001013 | import pytest
import django
from django.forms.boundfield import BoundField
from django.forms.formsets import formset_factory
from django.template import Context, Template
from crispy_forms.exceptions import CrispyError
from crispy_forms.templatetags.crispy_forms_field import crispy_addon
from .conftest import only_bootstrap
from .forms import SampleForm
def test_crispy_field():
template = Template(
"""
{% load crispy_forms_field %}
{% for field in form %}
{% crispy_field field %}
{% endfor %}
"""
)
html = template.render(Context({"form": SampleForm()}))
assert html.count("<input") == 8
def test_as_crispy_errors_form_without_non_field_errors():
template = Template(
"""
{% load crispy_forms_tags %}
{{ form|as_crispy_errors }}
"""
)
form = SampleForm({"password1": "god", "password2": "god"})
form.is_valid()
c = Context({"form": form})
html = template.render(c)
assert not ("errorMsg" in html or "alert" in html)
def test_as_crispy_errors_form_with_non_field_errors():
template = Template(
"""
{% load crispy_forms_tags %}
{{ form|as_crispy_errors }}
"""
)
form = SampleForm({"password1": "god", "password2": "wargame"})
form.is_valid()
c = Context({"form": form})
html = template.render(c)
assert "errorMsg" in html or "alert" in html
assert "<li>Passwords dont match</li>" in html
assert "<h3>" not in html
def test_as_crispy_errors_formset_without_non_form_errors():
template = Template(
"""
{% load crispy_forms_tags %}
{{ formset|as_crispy_errors }}
"""
)
SampleFormset = formset_factory(SampleForm, max_num=1, validate_max=True)
formset = SampleFormset()
formset.is_valid()
c = Context({"formset": formset})
html = template.render(c)
assert not ("errorMsg" in html or "alert" in html)
def test_as_crispy_errors_formset_with_non_form_errors():
template = Template(
"""
{% load crispy_forms_tags %}
{{ formset|as_crispy_errors }}
"""
)
SampleFormset = formset_factory(SampleForm, max_num=1, validate_max=True)
formset = SampleFormset(
{
"form-TOTAL_FORMS": "2",
"form-INITIAL_FORMS": "0",
"form-MAX_NUM_FORMS": "",
"form-0-password1": "god",
"form-0-password2": "wargame",
}
)
formset.is_valid()
c = Context({"formset": formset})
html = template.render(c)
assert "errorMsg" in html or "alert" in html
if django.VERSION < (3, 2):
assert "<li>Please submit 1 or fewer forms.</li>" in html
else:
assert "<li>Please submit at most 1 form.</li>" in html
assert "<h3>" not in html
def test_as_crispy_field_non_field(settings):
template = Template(
"""
{% load crispy_forms_tags %}
{{ field|as_crispy_field }}
"""
)
c = Context({"field": "notafield"})
# Raises an AttributeError when trying to f | igure out how to render it
# Not sure if this is expected behavior -- @kavdev
error_class = CrispyError if settings.DEBUG else AttributeError
with pytest.raises(error_class):
template.render(c)
def test_as_crispy_field_bound_field():
template = Template(
"""
{% load crispy_forms_tags %}
{{ f | ield|as_crispy_field }}
"""
)
form = SampleForm({"password1": "god", "password2": "god"})
form.is_valid()
c = Context({"field": form["password1"]})
# Would raise exception if not a field
html = template.render(c)
assert "id_password1" in html
assert "id_password2" not in html
def test_crispy_filter_with_form():
template = Template(
"""
{% load crispy_forms_tags %}
{{ form|crispy }}
"""
)
c = Context({"form": SampleForm()})
html = template.render(c)
assert "<td>" not in html
assert "id_is_company" in html
assert html.count("<label") == 7
def test_crispy_filter_with_formset():
template = Template(
"""
{% load crispy_forms_tags %}
{{ testFormset|crispy }}
"""
)
SampleFormset = formset_factory(SampleForm, extra=4)
testFormset = SampleFormset()
c = Context({"testFormset": testFormset})
html = template.render(c)
assert html.count("<form") == 0
# Check formset management form
assert "form-TOTAL_FORMS" in html
assert "form-INITIAL_FORMS" in html
assert "form-MAX_NUM_FORMS" in html
def test_classes_filter():
template = Template(
"""
{% load crispy_forms_field %}
{{ testField|classes }}
"""
)
test_form = SampleForm()
test_form.fields["email"].widget.attrs.update({"class": "email-fields"})
c = Context({"testField": test_form.fields["email"]})
html = template.render(c)
assert "email-fields" in html
def test_crispy_field_and_class_converters():
template = Template(
"""
{% load crispy_forms_field %}
{% crispy_field testField 'class' 'error' %}
"""
)
test_form = SampleForm()
field_instance = test_form.fields["email"]
bound_field = BoundField(test_form, field_instance, "email")
c = Context({"testField": bound_field})
html = template.render(c)
assert "error" in html
assert "inputtext" in html
@only_bootstrap
def test_crispy_addon(settings):
test_form = SampleForm()
field_instance = test_form.fields["email"]
bound_field = BoundField(test_form, field_instance, "email")
if settings.CRISPY_TEMPLATE_PACK == "bootstrap":
# prepend tests
assert "input-prepend" in crispy_addon(bound_field, prepend="Work")
assert "input-append" not in crispy_addon(bound_field, prepend="Work")
# append tests
assert "input-prepend" not in crispy_addon(bound_field, append="Primary")
assert "input-append" in crispy_addon(bound_field, append="Secondary")
# prepend and append tests
assert "input-append" in crispy_addon(bound_field, prepend="Work", append="Primary")
assert "input-prepend" in crispy_addon(bound_field, prepend="Work", append="Secondary")
elif settings.CRISPY_TEMPLATE_PACK == "bootstrap3":
assert "input-group-addon" in crispy_addon(bound_field, prepend="Work", append="Primary")
assert "input-group-addon" in crispy_addon(bound_field, prepend="Work", append="Secondary")
elif settings.CRISPY_TEMPLATE_PACK == "bootstrap4":
assert "input-group-text" in crispy_addon(bound_field, prepend="Work", append="Primary")
assert "input-group-text" in crispy_addon(bound_field, prepend="Work", append="Secondary")
# errors
with pytest.raises(TypeError):
crispy_addon()
with pytest.raises(TypeError):
crispy_addon(bound_field)
|
alisaifee/limits | limits/aio/storage/base.py | Python | mit | 2,931 | 0.000341 | from abc import ABC, abstractmethod
from typing import Dict, List, Optional, Tuple
from limits.storage.registry import StorageRegistry
from limits.util import LazyDependency
class Storage(LazyDependency, metaclass=StorageRegistry):
"""
Base class to extend when implementing an async storage backend.
.. warning:: This is a beta feature
.. versionadded:: 2.1
"""
STORAGE_SCHEME: Optional[List[str]]
"""The storage schemes to register against this implementation"""
def __init__(self, uri: Optional[str] = None, **options: Dict) -> None:
super().__init__()
@abstractmethod
async def incr(
self, key: str, expiry: int, elastic_expiry: bool = False, amount: int = 1
) -> int:
"""
increments the counter for a given rate limit key
:param key: the key to increment
:param expiry: amount in seconds for the key to expire in
:param elastic_expiry: whether to keep extending the rate limit
window every hit.
:param amount: the number to increment by
"""
raise NotImplementedError
@abstractmethod
async def get(self, key: str) -> int:
"""
:param key: the key to get the counter value for
"""
raise NotImplementedError
@abstractmethod
async def get_expiry(self, key: str) -> int:
"""
:param key: the key to get the expiry for
"""
raise NotImplementedError
@abstractmethod
async def check(self) -> bool:
"""
check if storage is healthy
"""
raise NotImplementedError
@abstractmethod
async def reset(self) -> Optional[int]:
"""
reset storage to clear limits
"""
raise NotImplementedError
@abstractmethod
async def clear(self, key: | str) -> int:
"""
resets the rate limit key
:param key: the key to clear rate limits for
"""
raise NotImplementedError
class MovingWindowSupport(ABC):
"""
Abstract base for stora | ges that intend to support
the moving window strategy
.. warning:: This is a beta feature
.. versionadded:: 2.1
"""
async def acquire_entry(
self, key: str, limit: int, expiry: int, amount: int = 1
) -> bool:
"""
:param key: rate limit key to acquire an entry in
:param limit: amount of entries allowed
:param expiry: expiry of the entry
:param amount: the number of entries to acquire
"""
raise NotImplementedError
async def get_moving_window(self, key, limit, expiry) -> Tuple[int, int]:
"""
returns the starting point and the number of entries in the moving
window
:param key: rate limit key
:param expiry: expiry of entry
:return: (start of window, number of acquired entries)
"""
raise NotImplementedError
|
SmartJog/spvd | share/baseplugin.py | Python | lgpl-2.1 | 11,049 | 0.001629 | """ BasePlugin definitions. """
import logging
import threading
import traceback
import os
import queue
from sjutils import threadpool
class BasePluginError(Exception):
"""Raised by BasePlugin."""
def __init__(self, error):
"""Init method."""
Exception.__init__(self, error)
class BasePlugin(threading.Thread):
"""Base class for job implementation in spvd."""
name = ""
require = {}
optional = {
"debug": bool,
"max_parallel_checks": int,
"max_checks_queue": int,
"check_poll": int,
"check_timeout": int,
"result_threshold": int,
"limit_group": str,
"limit_check": str,
"limit_commit": int,
}
def __init__(self, options, event, params=None):
"""Init method.
@params is a dictionary of optional parameters among:
max_parallel_checks: maximum number of threads for this plugin.
max_checks_queue: maximum number of checks to get from
the DB and queue for execution.
check_poll: interval between two get_checks call.
check_timeout: maximum wait time for get_checks calls.
debug: enable debugging information.
result_threshold: number of results waiting for a commit that
will trigger a main-loop wake up.
"""
threading.Thread.__init__(self)
self.setDaemon(True)
self.dismiss = event
self.resqueue = {}
self.checks = {}
self.rescommit = threading.Event()
self.params = {
"max_parallel_checks": 3,
"max_checks_queue": 9,
"check_poll": 60,
"check_timeout": None,
"debug": False,
"result_threshold": 5,
"limit_group": None,
"limit_check": None,
"limit_commit": 40,
}
if params:
self.params.update(params)
# Limiting groups
self.limit_group = None
if self.params["limit_group"]:
self.lim | it_group = [
group.strip()
for group in self.params["limit_group"].split(",")
if group.strip()
]
if len(self.limit_group) == 1:
| self.limit_group = self.limit_group[0]
# Limiting checks
self.limit_check = None
if self.params["limit_check"]:
self.limit_check = [
check.strip()
for check in self.params["limit_check"].split(",")
if check.strip()
]
if len(self.limit_check) == 1:
self.limit_check = self.limit_check[0]
self.options = options
self.log = logging.getLogger("spvd.plugins." + self.name)
# Set up logging
if not self.options.nodaemon:
log_dir = options.logdir + "/" + self.name
if not os.path.exists(log_dir):
os.mkdir(log_dir)
log_handler = logging.FileHandler(log_dir + "/" + self.name + ".log")
log_format = logging.Formatter("%(asctime)s %(levelname)s %(message)s")
log_handler.setFormatter(log_format)
self.log.addHandler(log_handler)
if self.params.get("debug", False):
self.log.setLevel(logging.DEBUG)
else:
self.log.setLevel(logging.INFO)
self.log.propagate = False
# Finalize init
self.job_pool = threadpool.ThreadPool(int(self.params["max_parallel_checks"]))
for widx, worker in enumerate(self.job_pool.workers):
worker.setName("%s-#%d" % (self.name, widx))
# Plugins or Subclasses must start Thread by themselves
# self.start()
# self.log.info(self)
def __str__(self):
return "<BasePlugin>"
@staticmethod
def __prepare_status_update(check):
"""Prepare a structure for status update."""
status = {
"status_id": check["status"]["status_id"],
"sequence_id": check["status"]["seq_id"],
"status": check["status"]["check_status"],
"message": check["status"]["check_message"],
"status_infos": check["status"]["status_infos"],
}
if "status_infos" in check:
status["status_infos"] = check["status_infos"]
return status
def job_start(self, check):
"""Starts a job."""
job = self.create_new_job(check)
job.log.debug("check started")
self.log.debug("Work request #%s started." % check["status"]["status_id"])
self.checks[check["status"]["status_id"]] = job
return job.run()
def job_stop(self, request, result):
"""Stops a job."""
self.checks[request.request_id].log.info(
"check result is %s : (%s)"
% (result["status"]["check_status"], result["status"]["check_message"])
)
self.log.debug("Work request #%s finished." % request.request_id)
update = self.__prepare_status_update(result)
self.resqueue.update({result["status"]["status_id"]: update})
if len(self.resqueue) > self.params["result_threshold"]:
self.rescommit.set()
del self.checks[request.request_id]
def handle_exception(self, request, exc_info):
"""Handle exception in a job."""
if not isinstance(exc_info, tuple):
# Something is seriously wrong...
self.log.critical("*** Worker thread raised an exception ***")
self.log.critical(request)
self.log.critical(exc_info)
raise SystemExit
self.log.error(
"Exception occured in request #%s: %s" % (request.request_id, exc_info)
)
for line in traceback.format_exception(exc_info[0], exc_info[1], exc_info[2]):
self.log.error(line)
def run(self):
"""Run method."""
self.log.info("plugin started")
first = True
while not self.dismiss.isSet():
try:
if not first:
self.rescommit.wait(self.params["check_poll"])
first = False
self.log.debug(
"number of threads alive %d/%d"
% (
len(
[
thread
for thread in self.job_pool.workers
if thread.isAlive()
]
),
int(self.params["max_parallel_checks"]),
)
)
self.log.debug("jobs waiting to be reported: %d" % len(self.resqueue))
self.log.debug(
"jobs waiting to be executed: %d (approx)"
% self.job_pool._requests_queue.qsize()
)
try:
self.job_pool.poll()
except threadpool.NoResultsPending:
self.log.debug("there was no result to poll")
# Commit pending results
if self.resqueue:
self.log.debug("%d results to commit" % len(self.resqueue))
self.commit_checks()
# Determine maximum number of checks to get
# Queue.qsize is unreliable, try to mitigate its weirdness
limit_fetch = (
self.params["max_checks_queue"]
- self.job_pool._requests_queue.qsize()
)
limit_fetch = min(abs(limit_fetch), self.params["max_checks_queue"])
# Determine if we need to fetch more work
if self.job_pool._requests_queue.full() or limit_fetch == 0:
self.log.info("queue estimated full")
continue
# Non sensical value or no check to fetch
if limit_fetch > self.params["max_checks_queue"] or limit_fetch < 0:
self.log.info( |
ddico/server-tools | base_search_fuzzy/models/__init__.py | Python | agpl-3.0 | 136 | 0 | # -*- coding: utf-8 -*-
# Licen | se AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from . import ir_model
from | . import trgm_index
|
walterbender/fractionbounce | FractionBounceActivity.py | Python | gpl-3.0 | 21,894 | 0.000868 | # -*- coding: utf-8 -*-
# Copyright (c) 2011-14, Walter Bender
# Copyright (c) 2011 Paulina Clares, Chris Rowe
# Ported to GTK3 - 2012:
# Ignacio Rodríguez <ignaciorodriguez@sugarlabs.org>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# You should have received a copy of the GNU General Public License
# along with this library; if not, write to the Free Software
# Foundation, 51 Franklin Street, Suite 500 Boston, MA 02110-1335 USA
import os
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
from gi.repository import Gdk
from sugar3 import profile
from sugar3.activity import activity
from sugar3.activity.widgets import ActivityToolbarButton
from sugar3.activity.widgets import Stop | Button
from sugar3.graphics.toolbarbox import To | olbarBox
from sugar3.graphics.toolbarbox import ToolbarButton
from sugar3.graphics.toolbutton import ToolButton
from sugar3.graphics.radiotoolbutton import RadioToolButton
from sugar3.graphics.alert import NotifyAlert
from sugar3.graphics import style
from collabwrapper import CollabWrapper
from gettext import gettext as _
import logging
_logger = logging.getLogger('fractionbounce-activity')
from utils import chooser
from svg_utils import svg_str_to_pixbuf, generate_xo_svg
from bounce import Bounce
from aplay import aplay
BALLDICT = {'basketball': [_('basketball'), 'wood'],
'soccerball': [_('soccer ball'), 'grass'],
'rugbyball': [_('rugby ball'), 'grass'],
'bowlingball': [_('bowling ball'), 'wood'],
'beachball': [_('beachball'), 'sand'],
'feather': [_('feather'), 'clouds'],
'custom': [_('user defined'), None]}
BGDICT = {'grass': [_('grass'), 'grass_background.png'],
'wood': [_('wood'), 'parquet_background.png'],
'clouds': [_('clouds'), 'feather_background.png'],
'sand': [_('sand'), 'beach_background.png'],
'custom': [_('user defined'), None]}
class FractionBounceActivity(activity.Activity):
def __init__(self, handle):
''' Initiate activity. '''
super(FractionBounceActivity, self).__init__(handle)
self.nick = profile.get_nick_name()
self.key = profile.get_pubkey()
if profile.get_color() is not None:
self._colors = profile.get_color().to_string().split(',')
else:
self._colors = ['#A0FFA0', '#FF8080']
self.max_participants = 4 # sharing
self._ignore_messages = False # activity was asked to stop
self._setup_toolbars()
canvas = self._setup_canvas()
# Read any custom fractions from the project metadata
if 'custom' in self.metadata:
custom = self.metadata['custom']
else:
custom = None
self._current_ball = 'soccerball'
self._toolbar_was_expanded = False
# Initialize the canvas
self._bounce_window = Bounce(canvas, activity.get_bundle_path(), self)
Gdk.Screen.get_default().connect('size-changed', self._configure_cb)
# Restore any custom fractions
if custom is not None:
fractions = custom.split(',')
for f in fractions:
self._bounce_window.add_fraction(f)
self._bounce_window.buddies.append([self.nick, self.key])
self._player_colors = [self._colors]
self._player_pixbufs = [
svg_str_to_pixbuf(generate_xo_svg(scale=0.8, colors=self._colors))
]
def on_activity_joined_cb(me):
logging.debug('activity joined')
self._player.set_from_pixbuf(self._player_pixbufs[0])
self.connect('joined', on_activity_joined_cb)
def on_activity_shared_cb(me):
logging.debug('activity shared')
self._player.set_from_pixbuf(self._player_pixbufs[0])
self._label.set_label(_('Wait for others to join.'))
self.connect('shared', on_activity_shared_cb)
self._collab = CollabWrapper(self)
if self.shared_activity:
# We're joining
if not self.get_shared():
self._label.set_label(_('Wait for the sharer to start.'))
actions = {
'j': self._new_joiner,
'b': self._buddy_list,
'f': self._receive_a_fraction,
't': self._take_a_turn,
'l': self._buddy_left,
}
def on_message_cb(collab, buddy, msg):
logging.debug('on_message_cb buddy %r msg %r' % (buddy, msg))
if not self._ignore_messages:
actions[msg.get('action')](msg.get('data'))
self._collab.connect('message', on_message_cb)
def on_joined_cb(collab, msg):
logging.debug('joined')
self.send_event('j', [self.nick, self.key, self._colors])
self._collab.connect('joined', on_joined_cb, 'joined')
def on_buddy_joined_cb(collab, buddy, msg):
logging.debug('on_buddy_joined_cb buddy %r' % (buddy.props.nick))
self._collab.connect('buddy_joined', on_buddy_joined_cb,
'buddy_joined')
def on_buddy_left_cb(collab, buddy, msg):
logging.debug('on_buddy_left_cb buddy %r' % (buddy.props.nick))
# synthesise a buddy left message in case it did not
# arrive; this can happen when the peer terminates
# unexpectedly, or the network connection between the
# peers fails.
self._buddy_left([buddy.props.nick, buddy.props.key])
self._collab.connect('buddy_left', on_buddy_left_cb, 'buddy_left')
self._collab.setup()
def set_data(self, blob):
pass
def get_data(self):
return None
def close(self, **kwargs):
self._bounce_window.pause()
aplay.close()
activity.Activity.close(self, **kwargs)
def _configure_cb(self, event):
if Gdk.Screen.width() < 1024:
self._label.set_size_request(275, -1)
self._label.set_label('')
self._separator.set_expand(False)
else:
self._label.set_size_request(500, -1)
self._separator.set_expand(True)
self._bounce_window.configure_cb(event)
if self._toolbar_expanded():
self._bounce_window.bar.bump_bars('up')
self._bounce_window.ball.ball.move_relative(
(0, -style.GRID_CELL_SIZE))
def _toolbar_expanded(self):
if self._activity_button.is_expanded():
return True
elif self._custom_toolbar_button.is_expanded():
return True
return False
def _update_graphics(self, widget):
# We need to catch opening and closing of toolbars and ignore
# switching between open toolbars.
if self._toolbar_expanded():
if not self._toolbar_was_expanded:
self._bounce_window.bar.bump_bars('up')
self._bounce_window.ball.ball.move_relative(
(0, -style.GRID_CELL_SIZE))
self._toolbar_was_expanded = True
else:
if self._toolbar_was_expanded:
self._bounce_window.bar.bump_bars('down')
self._bounce_window.ball.ball.move_relative(
(0, style.GRID_CELL_SIZE))
self._toolbar_was_expanded = False
def _setup_toolbars(self):
custom_toolbar = Gtk.Toolbar()
toolbox = ToolbarBox()
self._toolbar = toolbox.toolbar
self._activity_button = ActivityToolbarButton(self)
self._activity_button.connect('clicked', self._update_graphics)
self._toolbar.insert(self._activity_button, 0)
self._activity_button.show()
self._custom_toolbar_button = ToolbarButton(
label=_('Custom'),
page=custom_toolbar,
icon_name='view-source')
self._custom_toolbar_button.connect('clicked', self._update_graphics)
custom_toolbar.show()
|
amwelch/a10sdk-python | a10sdk/core/aam/aam_authentication_relay_form_based_instance_request_uri.py | Python | apache-2.0 | 3,904 | 0.008709 | from a10sdk.common.A10BaseClass import A10BaseClass
class CookieValue(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param cookie_value: {"minLength": 1, "maxLength": 127, "type": "string", "description": "Specify cookie in POST packet", "format": "string-rlx"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "cookie-value"
self.DeviceProxy = ""
self.cookie_value = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Cookie(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "cookie"
self.DeviceProxy = ""
self.cookie_value = {}
for keys, value in kwargs.items():
setattr(self,keys, value)
class RequestUri(A10BaseClass):
"""Class Description::
URI of authentication web page.
Class request-uri supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param other_variables: {"description": "Specify other variables (n1=v1&n2=v2) in form relay", "format": "string-rlx", "minLength": 1, "optional": true, "maxLength": 127, "type": "string"}
:param action_uri: {"description": "Specify the action-URI", "format": "string-rlx", "minLength": 1, "optional": true, "maxLength": 127, "type": "string"}
:param uri: {"description": "Specify request URI", "format": "string-rlx", "minLength": 1, "optional": false, "maxLength": 127, "type": "string"}
:param user_variable: {"description": "Specify username variable name", "format": "string-rlx", "minLength": 1, "optional": true, "maxLength": 63, "type": "string"}
:param domain_variable: {"description": "Specify domain variable name", "format": "string-rlx", "minLength": 1, "optional": true, "maxLength": 63, "type": "string"}
:param password_variable: {"description": "Specify password variable name", "format": "string-rlx", "minLength": 1, "optional": true, "maxLength": 63, "type": "string"}
:param match_type: {"optional": false, "enum": ["equals", "contains", "starts-with", "ends-with"], "type": "string", "description": "'equals': URI exactly matches the string; 'contains': URI string contains another sub string; 'starts-with': URI string starts with sub string; 'ends-with': URI string ends with sub string; ", "format": "enum"}
:p | aram uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type" | : "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/aam/authentication/relay/form-based/instance/{name}/request-uri/{match_type}+{uri}`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required = [ "match_type","uri"]
self.b_key = "request-uri"
self.a10_url="/axapi/v3/aam/authentication/relay/form-based/instance/{name}/request-uri/{match_type}+{uri}"
self.DeviceProxy = ""
self.other_variables = ""
self.action_uri = ""
self.uri = ""
self.cookie = {}
self.user_variable = ""
self.domain_variable = ""
self.password_variable = ""
self.match_type = ""
self.uuid = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
|
ikvk/imap_tools | tests/messages_data/rfc2822/example07.py | Python | apache-2.0 | 1,101 | 0.004541 | import datetime
from imap_tools import EmailAddress
DATA = dict(
subject='Re: Saying Hello',
from_='jdoe@machine.example',
to=('smith@home.example',),
cc=(),
bcc=(),
re | ply_to=(),
date=datetime.datetime(1997, 11, 21, 11, 0, tzinfo=datetime.timezone(datetime.timedelta(-1, 64800))),
date_str='Fri, 21 Nov 1997 11:00:00 -0600',
text='This is a reply to your reply.\r\n',
html='',
headers={'to': ('"Mary Smith: Personal Account" <smith@home.example>',), 'from': ('John Doe <jdoe@machine.example>',), 'subject': ('Re | : Saying Hello',), 'date': ('Fri, 21 Nov 1997 11:00:00 -0600',), 'message-id': ('<abcd.1234@local.machine.tld>',), 'in-reply-to': ('<3456@example.net>',), 'references': ('<1234@local.machine.example> <3456@example.net>',)},
attachments=[],
from_values=EmailAddress('John Doe', 'jdoe@machine.example', 'John Doe <jdoe@machine.example>'),
to_values=(EmailAddress('Mary Smith: Personal Account', 'smith@home.example', 'Mary Smith: Personal Account <smith@home.example>'),),
cc_values=(),
bcc_values=(),
reply_to_values=(),
) |
reubano/tabutils | meza/fntools.py | Python | mit | 34,504 | 0.000058 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
meza.fntools
~~~~~~~~~~~~
Provides methods for functional manipulation of content
Examples:
basic usage::
>>> from meza.fntools import underscorify
>>>
>>> header = ['ALL CAPS', 'Illegal $%^', 'Lots of space']
>>> result = {'all_caps', 'illegal', 'lots_of_space'}
>>> set(underscorify(header)) == result
True
Attributes:
DEF_TRUES (tuple[str]): Values to be consider True
DEF_FALSES (tuple[str]): Values to be consider False
ARRAY_TYPE (dict): Python to array.array type lookup table
NP_TYPE (dict): Python to numpy type lookup table
DB_TYPE (dict): Python to postgres type lookup table
SQLITE_TYPE (dict): Python to sqlite type lookup table
ARRAY_NULL_TYPE (dict): None to array.array type lookup table
"""
import sys
import itertools as it
import operator
import time
from functools import partial, reduce
from collections import defaultdict
from json import JSONEncoder
from os import path as p
from itertools import zip_longest, filterfalse
import pygogo as gogo
from slugify import slugify
from . import CURRENCIES, ENCODING
from .compat import encode
DEF_TRUES = ('yes', 'y', 'true', 't')
DEF_FALSES = ('no', 'n', 'false', 'f')
NP_TYPE = {
'null': 'bool',
'bool': 'bool',
'int': 'i',
'float': 'f',
'double': 'd',
'decimal': 'd',
'datetime': 'datetime64[us]',
'time': 'timedelta64[us]',
'date': 'datetime64[D]',
'text': 'object_'}
ARRAY_TYPE = {
'null': 'B',
'bool': 'B',
'int': 'i',
'float': 'f',
'double': 'd',
'decimal': 'd',
'text': 'u'}
POSTGRES_TYPE = {
'null': 'boolean',
'bool': 'boolean',
'int': 'integer',
'float': 'real',
'double': 'double precision',
'decimal': 'decimal',
'datetime': 'timestamp',
'time': 'time',
'date': 'date',
'text': 'text'}
MYSQL_TYPE = {
'null': 'CHAR(0)',
'bool': 'BOOL',
'int': 'INT',
'float': 'FLOAT',
'double': 'DOUBLE',
'decimal': 'DECIMAL',
'datetime': 'DATETIME',
'time': 'TIME',
'date': 'DATE',
'text': 'TEXT'}
SQLITE_TYPE = {
'null': 'INT',
'bool': 'INT',
'int': 'INT',
'float': 'REAL',
'double': 'REAL',
'decimal': 'REAL',
'datetime': 'TEXT',
'time': 'TEXT',
'date': 'TEXT',
'text': 'TEXT'}
ARRAY_NULL_TYPE = {
'B': False,
'i': 0,
'f': 0.0,
'd': 0.0,
'u': ''}
try:
MAXINT = sys.maxint # pylint: disable=sys-max-int
except AttributeError:
MAXINT = sys.maxsize
logger = gogo.Gogo(__name__, monolog=True).logger
class Objectify(object):
"""Creates an object with dynamically set attributes. Useful
for accessing the kwargs of a function as attributes.
"""
def __init__(self, kwargs, func=None, **defaults):
""" Objectify constructor
Args:
kwargs (dict): The attributes to set
defaults (dict): The default attributes
Examples:
>>> kwargs = {'key_1': 1, 'key_2': 2}
>>> defaults = {'key_2': 5, 'key_3': 3}
>>> kw = Objectify(kwargs, **defaults)
| >>> sorted(kw) == ['key_1', 'key_2', 'key_3']
True
>>> dict(kw) == {'key_1': 1, 'key_2': 2, 'key_3': 3}
True
>>> k | w.key_1
1
>>> kw['key_2']
2
>>> kw.get('key_3')
3
>>> kw.key_4
>>> kw.get('key_4')
>>> kw['key_4'] = 4
>>> kw.key_4 == kw.get('key_4') == kw['key_4'] == 4
True
>>> kw.key_4 = 5
>>> kw.key_4 == kw.get('key_4') == kw['key_4'] == 5
True
"""
defaults.update(kwargs)
self.data = defaults
self.func = func
self.keys = self.data.keys
self.values = self.data.values
self.items = self.data.items
self.get = self.data.get
def __repr__(self):
return repr(self.data)
def __getitem__(self, key):
value = self.get(key)
return self.func(value) if self.func else value
def __setitem__(self, key, value):
return self.data.__setitem__(key, value)
def __setattr__(self, key, value):
if key not in {'data', 'func', 'keys', 'values', 'items', 'get'}:
self.data.__setitem__(key, value)
return super(Objectify, self).__setattr__(key, value)
def __getattr__(self, name):
return self.__getitem__(name)
def __delitem__(self, key):
return self.data.__delitem__(key)
def __delattr__(self, key):
return self.__delitem__(key)
def __iter__(self):
return iter(self.data)
def iteritems(self):
return iter(self.items())
class Andand(object):
"""A Ruby inspired null soaking object
Examples:
>>> kwargs = {'key': 'value'}
>>> kw = Objectify(kwargs)
>>> kw.key == 'value'
True
>>> Andand(kw).key # doctest: +ELLIPSIS
<meza.fntools.Andand object at 0x...>
>>> Andand(kw).key.item == 'value'
True
>>> Andand(kw).key() == 'value'
True
>>> Andand(kw).key.imnot.here # doctest: +ELLIPSIS
<meza.fntools.Andand object at 0x...>
>>> Andand(kw).key.imnot.here.item
>>> Andand(kw).key.imnot.here()
"""
def __init__(self, item=None):
self.item = item
def __getattr__(self, name):
try:
item = getattr(self.item, name)
return item if name == 'item' else Andand(item)
except AttributeError:
return Andand()
def __call__(self):
return self.item
class CustomEncoder(JSONEncoder):
def default(self, obj):
if hasattr(obj, 'real'):
encoded = float(obj)
elif hasattr(obj, 'to_dict'):
encoded = obj.to_dict()
elif set(['quantize', 'year', 'hour']).intersection(dir(obj)):
encoded = str(obj)
elif hasattr(obj, 'union'):
encoded = tuple(obj)
elif set(['next', 'append']).intersection(dir(obj)):
encoded = list(obj)
else:
encoded = super(CustomEncoder, self).default(obj)
return encoded
class SleepyDict(dict):
"""A dict like object that sleeps for a specified amount of time before
returning a key or during truth value testing
"""
def __init__(self, *args, **kwargs):
self.delay = kwargs.pop('delay', 0)
super(SleepyDict, self).__init__(*args, **kwargs)
def __len__(self):
time.sleep(self.delay)
return super(SleepyDict, self).__len__()
def get(self, key, default=None):
time.sleep(self.delay)
return super(SleepyDict, self).get(key, default)
def underscorify(content):
""" Slugifies elements of an array with underscores
Args:
content (Iter[str]): the content to clean
Returns:
(generator): the slugified content
Examples:
>>> _ = underscorify(['ALL CAPS', 'Illegal $%^', 'Lots of space'])
>>> list(_) == ['all_caps', 'illegal', 'lots_of_space']
True
"""
for item in content:
try:
yield slugify(item, separator='_')
except TypeError:
yield slugify(item.encode(ENCODING), separator='_')
def get_ext(path):
""" Gets a file (local)
Args:
content (Iter[str]): the content to dedupe
Returns:
(generator): the deduped content
Examples:
>>> get_ext('file.csv') == 'csv'
True
"""
if 'format=' in path:
file_format = path.lower().split('format=')[1]
if '&' in file_format:
file_format = file_format.split('&')[0]
else:
file_format = p.splitext(path)[1].lstrip('.')
return file_format
def get_dtype(_type, dialect='array'):
switch = {
'numpy': NP_TYPE,
'array': ARRAY_TYPE,
'postgres': POSTGRES_TYPE,
'mysql': MYSQL_TYPE,
'sqlite': SQLITE_TYPE}
converter = switch[dialect]
return converter.get(_type, converter['text'])
def |
maheshcn/memory-usage-from-ldfile | openpyxl/tests/test_strings.py | Python | gpl-2.0 | 1,270 | 0 | # Copyright (c) 2010-2015 openpyxl
# package imports
from openpyxl.reader.strings import read_string_table
from openpyxl.tests.helper import co | mpare_xml
def test_read_string_table(datadir):
datadir.join('reader').chdir()
src = 'sharedStrings.xml'
with open(src) as content:
assert read_string_table(content.read()) == [
'This is cell A1 in Sheet 1', 'This is cell G5']
def test_empty_string(datadir):
datadir.jo | in('reader').chdir()
src = 'sharedStrings-emptystring.xml'
with open(src) as content:
assert read_string_table(content.read()) == ['Testing empty cell', '']
def test_formatted_string_table(datadir):
datadir.join('reader').chdir()
src = 'shared-strings-rich.xml'
with open(src) as content:
assert read_string_table(content.read()) == [
'Welcome', 'to the best shop in town', " let's play "]
def test_write_string_table(datadir):
from openpyxl.writer.strings import write_string_table
datadir.join("reader").chdir()
table = ['This is cell A1 in Sheet 1', 'This is cell G5']
content = write_string_table(table)
with open('sharedStrings.xml') as expected:
diff = compare_xml(content, expected.read())
assert diff is None, diff
|
bjorncooley/maleo | courses/migrations/0006_auto_20151019_1127.py | Python | mit | 552 | 0.001812 | # -*- coding: utf-8 -*-
from __future__ import uni | code_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('courses', '0005_studentcourseprofile_skill_level'),
]
operations = [
migrations.AlterField(
model_name='studentcourseprofile',
name='skill_level',
field=models.CharField(max_length=255, default='beginner | ', choices=[('beginner', 'Beginner'), ('intermediate', 'Intermediate'), ('advanced', 'Advanced')]),
),
]
|
blurstudio/cross3d | cross3d/maya/mayascenemodel.py | Python | mit | 17,183 | 0.026712 | import re
import maya.cmds as cmds
import maya.OpenMaya as om
import cross3d
from collections import OrderedDict
from cross3d.abstract.abstractscenemodel import AbstractSceneModel
resolutionAttr = 'resolution'
#------------------------------------------------------------------------
class MayaSceneModel(AbstractSceneModel):
""" Used to handle Maya's file Referencing
Model Requirements:
A model is a namespaced transform node named "Model" with a user prop named "model".
The namespace is used as the model's name and displayName. If the model has a user prop named
"referenced" with a value, the first time a reference method is called on the model an
"resolution" enum. and a "resolutions" user prop will be created.
Referencing:
MayaSceneModel stores a "resolutions" OrderedDict userProp containing resolution name keys
and filePath values. It stores the current resolution in a enum attribute on the model called
"resolution". Currently changing this attribute does nothing, but I plan to make it
call setResolution in the future. I am tracking the reference path of the current resolution
by cross-referencing both "resolution" and "resolutions". This is neccissary to keep track
of the {1} maya adds the second time a reference is loaded. This means that if you swap
reference in the Reference Editor, it will loose the reference. I plan to change this to lookup
the reference node from the namespace.
"""
def _modifiedAttrs(self):
""" Returns a dictionary of modifications made to this referenced model.
For referneced models return info describing the modifications made by the referencing
system.
"""
modified = {}
if self.isReferenced():
fullName = self.path()
refNode = cmds.referenceQuery(fullName, referenceNode=True)
# Build the setAttr pattern
pattern = r'setAttr {node}.(?P<attr>[^ ]+)'.format(node = fullName.replace('|', r'\|'))
setAttrRegex = re.compile(pattern)
# TODO: Add patterns for other mel commands like addAttr, etc
for s in cmds.referenceQuery(refNode, editStrings=True):
# Process setAttr
match = setAttrRegex.match(s)
if match:
key = match.groupdict()['attr']
if s.endswith('"'):
# Found a string. Note, this does not include escaped quotes.
openingQuote = s[::-1].find('"', 1)
value = s[-openingQuote:-1]
else:
# its not a string
value = s.split(' ')[-1]
modified.setdef | ault(key, {}).setdefault('setAttr', {}).update(value=value, command=s)
return modified
def _attrName(self):
return '{node}.{attr}'.format(n | ode=self._nativeName(), attr=resolutionAttr)
def __init__(self, scene, nativeObject):
super(MayaSceneModel, self).__init__(scene, nativeObject)
def addResolution(self, name='', path='', load=False):
if self.isReferenced():
reses = self.userProps().get('resolutions', OrderedDict())
reses[name] = path
self.userProps()['resolutions'] = reses
# Update the enum
cmds.addAttr(self._attrName(), edit=True, enumName=':'.join(self.resolutions()))
if load:
self.setResolution(name)
return True
return False
def displayName(self):
# Ignore the name of the object, we only care about the namespace for tools.
return self._namespace(self._nativeTransform).get('namespace', '')
def setDisplayName(self, name):
name = name.replace('-', '_')
# If the model using a namespace, rename the namespace not the object.
namespace = self._namespace(self._nativeTransform)['namespace']
if namespace:
if namespace == name:
# Renaming the model to its current name, nothing to do.
return
# TODO: pull the reference node from the namespace instead of storing it in a user prop
# that way if a user swaps reference in the Reference Editor we won't loose track of it.
filename = self.resolutionPath(self.resolution())
if self.isReferenced() and filename:
cmds.file(filename, edit=True, namespace=name, mergeNamespacesOnClash=True)
# Doc's say cmds.file should move non-referenced nodes to the new namespace, but
# in practice it doesn't. If the old namespace still exists, move all of its
# nodes into the new namespace and remove the old namespace
if namespace in cmds.namespaceInfo(listOnlyNamespaces=True):
cmds.namespace(moveNamespace=[namespace, name])
cmds.namespace(removeNamespace=namespace)
else:
namespaces = cmds.namespaceInfo(listOnlyNamespaces=True)
if name in namespaces:
# If the namespace already exists we need to auto-increment the value or the
# rename command will error out
# reverse the name and pull off any trailing digits
revName = re.match('(?P<revIter>\d*)(?P<name>.+)', name[::-1])
if revName:
n = revName.group('name')[::-1]
v = int(revName.group('revIter')[::-1] or 1)
while '{name}{revIter}'.format(name=n, revIter=v) in namespaces:
v += 1
name = '{name}{revIter}'.format(name=n, revIter=v)
else:
name = '{name}1'.format(name=name)
cmds.namespace(rename=[namespace, name])
return
super(MayaSceneModel, self).setDisplayName(name)
def _createResolutionComboBox(self):
userProps = cross3d.UserProps(self._nativePointer)
# Local models have a resolution metadata.
# Maybe it's not a good idea.
if resolutionAttr in userProps:
del userProps[resolutionAttr]
resolutions = ':'.join(userProps.get('resolutions', []))
# Object should support referencing, but referencing hasn't been setup, so create the structure.
cmds.addAttr(self._nativeName(), longName=resolutionAttr, attributeType="enum", enumName=resolutions)
# Make the attribute viewable, but not keyable in the channelBox
try:
cmds.setAttr(self._attrName(), keyable=False, channelBox=True)
# Consume a runtime error if the resolution attribute was in the reference. This is only a
# issue with some of our first models, Asset Exporter will remove them from future exports.
except RuntimeError as error:
pattern = r"setAttr: The attribute '[^']+' is from a referenced file, thus the keyable state cannot be changed."
if not re.match(pattern, error.message):
raise
def isReferenced(self):
userProps = cross3d.UserProps(self._nativePointer)
if userProps.get('referenced', False):
# Checking if we need to set the resolution combobox.
if not resolutionAttr in cmds.listAttr(self._nativeName()):
# Create the user props for the reference.
userProps['resolutions'] = OrderedDict(Offloaded='')
self._createResolutionComboBox()
return True
return False
def export(self, fileName):
name = self.name()
objects = self.objects()
selection = self._scene.selection()
# Selecting the object.
self._scene.setSelection(objects)
# Make sure we set the current namespace to root otherwise the next line does not work.
initialNamespace = cmds.namespaceInfo(currentNamespace=True)
cmds.namespace(setNamespace=":" )
# Trashing the namespace.
cmds.namespace(removeNamespace=name, mergeNamespaceWithRoot=True)
# Remove attributes that should not be stored in a exported model.
userProps = self.userProps()
resolutions = None
resolution = {}
if 'resolutions' in userProps:
resolutions = userProps.pop('resolutions')
if resolutionAttr in userProps:
# Resolution is a bit complicated because it can be a enum.
if cmds.attributeQuery(resolutionAttr, node= self._nativeName(), attributeType=True) == 'enum':
# Store the enum list
resolution['names'] = cmds.attributeQuery( resolutionAttr, node=self._nativeName(), listEnum=True)
resolution['value'] = self.userProps().pop(resolutionAttr)
# Export the model
cmds.file(fileName, force=True, exportSelected=True, typ="mayaAscii", usingNamespaces=False)
# Restore the attributes we used earlier
userProps = self.userProps()
if resolutions != None:
userProps['resolutions'] = resolutions
if resolution:
if 'names' in resolution:
cmds.addAttr(self._nativeName(), lo |
SUSE/azure-sdk-for-python | azure-mgmt-web/azure/mgmt/web/models/network_access_control_entry.py | Python | mit | 1,490 | 0.000671 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class NetworkAccessControlEntry(Model):
"""Network access control entry.
:param action: Action object. Possible values include: 'Permit', 'Deny'
:type action: str or :class:`AccessControlEntryAction
<azure.mgmt.web.models.AccessControlEntryAction>`
:param description: Description.
:type description: str
:param order: Order of precedenc | e.
:type order: int
:param remote_subnet: Remote subnet.
:type remote_subnet: str
"""
_attribute_map = {
'action': {'key': 'action', 'type': 'AccessControlEntryAction'},
'description': {'key': 'description', 'type': 'str'},
'order': {'key': 'order', 'type': 'int'},
'remote_subnet': {'key': 'remoteSubnet', ' | type': 'str'},
}
def __init__(self, action=None, description=None, order=None, remote_subnet=None):
self.action = action
self.description = description
self.order = order
self.remote_subnet = remote_subnet
|
satyrius/cmsplugin-articles | cmsplugin_articles/south_migrations/0002_auto__add_teaserextension.py | Python | mit | 8,573 | 0.007699 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'TeaserExtension'
db.create_table(u'cmsplugin_articles_teaserextension', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('public_extension', self.gf('django.db.models.fields.related.OneToOneField')(related_name='draft_extension', unique=True, null=True, to=orm['cmsplugin_articles.TeaserExtension'])),
('extended_object', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.Page'], unique=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('image', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True, blank=True)),
('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal(u'cmsplugin_articles', ['TeaserExtension'])
def backwards(self, orm):
# Deleting model 'TeaserExtension'
db.delete_table(u'cmsplugin_articles_teaserextension')
models = {
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.page': {
'Meta': {'ordering | ': "('tree_id', 'lft')", 'unique_together': "(('publisher_is_draft', 'application_namespace'), ('reverse_id', 'site', 'publ | isher_is_draft'))", 'object_name': 'Page'},
'application_namespace': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'application_urls': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_home': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'languages': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'revision_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'djangocms_pages'", 'to': u"orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'default': "'INHERIT'", 'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'xframe_options': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
u'cmsplugin_articles.articlesplugin': {
'Meta': {'object_name': 'ArticlesPlugin', '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'limit': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'cmsplugin_articles.teaserextension': {
'Meta': {'object_name': 'TeaserExtension'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'extended_object': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.Page']", 'unique': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'public_extension': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'draft_extension'", 'unique': 'True', 'null': 'True', 'to': u"orm['cmsplugin_articles.TeaserExtension']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'sites.site': {
'Meta': {'orderi |
NCLAB2016/DF_STEALL_ELECTRIC | nn.py | Python | gpl-3.0 | 5,327 | 0.017646 | from __future__ import print_function
import tensorflow as tf
import numpy as np
from six.moves import cPickle as pickle
from sklearn.decomposition import PCA
from sklearn import preprocessing
import random
dataset = '/media/dat1/liao/dataset/new_new_try/'
train_data_filename = dataset + 'train_data_statis.pickle'
train_label_filename = dataset + 'train_label.pickle'
test_data_filename = dataset + 'test_data_statis.pickle'
test_uid_filename = dataset + 'test_uid.pickle'
# load dataset
print('load dataset...')
with open(train_data_filename, 'rb') as f:
train_data = pickle.load(f)
with open(train_label_filename, 'rb') as f:
train_label = pickle.load(f)
with open(test_data_filename, 'rb') as f:
test_data = pickle.load(f)
with open(test_uid_filename, 'rb') as f:
test_uid = pickle.load(f)
print ('Training set', train_data.shape, train_label.shape)
print ('Test set', test_data.shape)#, test_uid.shape)
n_classes = 2
shulf = np.column_stack((train_data, train_label))
random.shuffle(shulf)
train_data = shulf[:,:-1]
train_label = shulf[:,-1]
print('reformat dataset...')
pca = PCA(300).fit(train_data)
train_data = pca.transform(train_data)
test_data = pca.transform(test_data)
n_samples, n_features = train_data.shape
train_label = (np.arange(n_classes) == train_label[:,None]).astype(np.float32)
#test_label = (np.arange(n_classes) == test_label[:,None]).astype(np.float32)
test_data = test_data.astype(np.float32)
#train | _subset = range(0,1200) + range(2800, 9900)
#valid_subset = range(1200, 2800)
#train_data = train_data[train_subset | ]
#train_label = train_label[train_subset]
#valid_data = train_data[valid_subset]
#valid_label = train_label[valid_subset]
#tf_test_data = tf.constant(test_data)
train_data = preprocessing.normalize(train_data, norm='l2')
#valid_data = preprocessing.normalize(valid_data, norm='l2')
print ('Training set', train_data.shape, train_label.shape)
#print ('Validation set', valid_data.shape, valid_label.shape)
print ('Test set', test_data.shape)#, test_uid.shape)
# constant
learning_rate = 0.001
num_steps = 200000
test_inter = 200
hidden1_units = 100
hidden2_units = 32
batch_size = 100
def ip_layer(x, W, b):
return tf.matmul(x, W) + b
graph = tf.Graph()
with graph.as_default():
x = tf.placeholder(tf.float32, [None, n_features])
y_ = tf.placeholder(tf.float32, [None, n_classes])
# hidden 1
W1 = tf.Variable(
tf.truncated_normal([n_features, hidden1_units]))
b1 = tf.Variable(
tf.constant(0.1, shape=[hidden1_units]))
hidden1 = tf.nn.relu(ip_layer(x, W1, b1))
test_h1 = tf.nn.relu(ip_layer(test_data, W1, b1))
# hidden 2
# W2 = tf.Variable(
# tf.truncated_normal([hidden1_units, hidden2_units]))
# b2 = tf.Variable(
# tf.constant(0.1, shape=[hidden2_units]))
# hidden2 = tf.nn.relu(ip_layer(hidden1, W2, b2))
# dropout
keep_prob = tf.placeholder("float")
h2_drop = tf.nn.dropout(hidden1, keep_prob)
# output layer
W3 = tf.Variable(tf.truncated_normal([hidden1_units, n_classes]))
b3 = tf.Variable(tf.constant(0.1, shape=[n_classes]))
output = tf.nn.relu(ip_layer(h2_drop, W3, b3))
test_output = tf.nn.relu(ip_layer(test_h1, W3, b3))
y = tf.nn.softmax(output)
test_prob = tf.nn.softmax(test_output)
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(output, y_))
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss)
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
a_accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
/ predictions.shape[0])
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_label.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_data[offset:(offset + batch_size), :]
batch_labels = train_label[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {x : batch_data, y_ : batch_labels, keep_prob : 0.5}
_, l, predictions = session.run(
[optimizer, loss, y], feed_dict=feed_dict)
if (step % test_inter == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print(session.run(test_prob))
result = session.run(test_prob)
class Score:
def __init__(self, uid, prob):
self.uid = uid
self.prob = prob
print(result)
scores = list()
for i in range(len(test_uid)):
scores.append(Score(test_uid[i], result[i][1]))
scores = sorted(scores, key=lambda x: x.prob, reverse=True)
ff = open('raw.csv', 'w')
for i in range(len(test_uid)):
uid = scores[i].uid
ff.write(uid + ',' + str(scores[i].prob) + '\n')
ff.close()
with open('result.csv', 'w') as f:
for i in range(len(test_uid)):
f.write(scores[i].uid + '\n')
|
clarkduvall/JSOL | passer.py | Python | apache-2.0 | 1,058 | 0.00189 | #!/usr/bin/env python
# Copyright 2012 Clark DuVall
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This program illustrates how a JSOL program can be sent between applications.
# A fib function is created in the first Eval, and then sent to the second Eval
# to be run.
import jsol
import json
with open('examples/jsol/part1.jsol') as f:
fib = jsol.Eval(json.load(f)).json()
print 'JSOL created!:'
pri | nt json.dumps(fib, indent=3)
print 'Passing to other instance...'
with | open('examples/jsol/part2.jsol') as f:
jsol.Eval(json.load(f), argv=fib)
|
dimoynwa/DLivescore | data/match.py | Python | gpl-2.0 | 3,878 | 0.001805 | from datetime import datetime, timedelta
import time
class Match:
def __init__(self, json):
i = (int)(json['_links']['competition']['href'].rfind('/') + 1)
self.competitionId = (int)(json['_links']['competition']['href'][i:])
ind = (int)(json['_links']['self']['href'].rfind('/') + 1)
self.matchId = (int)(json['_links']['self']['href'][ind:])
self.homeTeamName = json['homeTeamName']
self.awayTeamName = json['awayTeamName']
self.homeTeamGoals = json['result']['goalsHomeTeam']
self.awayTeamGoals = json['result']['goalsAwayTeam']
self.date = json['date']
self.status = json['status']
self.favourite = False
self.odds = {}
if(json.get('odds', False)):
self.odds = json['odds']
self.updatedStatus = ''
def __str__(self):
fav = ''
if self.favourite:
fav = 'Fav.'
homeGoals = self.homeTeamGoals
awayGoals = self.awayTeamGoals
if self.homeTeamGoals is None:
homeGoals = '-'
if self.awayTeamGoals is None:
awayGoals = '-'
return self.updatedStatus + 'Id : ' + (str)(self.matchId) + ' ' + \
(str)(self.calculateMinutes()) + ' ' + self.homeTeamName + \
' ' + \
(str)(homeGoals) + ' : ' + \
(str)(awayGoals) + ' ' + self.awayTeamName + ' ' + fav
def calculateMinutes(self):
if self.status == 'TIMED' or self.status == 'SCHEDULED':
return self.date[11:16]
elif self.status == 'FINISHED':
return 'FT'
elif self.status == 'IN_PLAY':
'''
tf = '%Y-%m-%dT%H:%M:%S'
dt1 = datetime.strptime(self.date[:19], tf)
dt2 = datetime.strftime(tf, time.gmtime())
dt2 = datetime.strptime(dt2, tf)
mins = ((dt2 - dt1) // timedelta(minutes=1))
'''
now = time.strftime("%H:%M:%S")
nowH = (int)(now[0:2]) - 3
nowM = (int)(now[3:5])
matchH = (int)(self.date[11:13])
metchM = (int)(self.date[14:16])
mins = 0
if nowH == matchH:
mins = nowM - matchM
elif nowH == matchM + 1:
mins = 60 - matchM + nowM
elif nowH == matchM + 2:
mins = 60 - matchM + nowM + 60
if mins > 45 and mins <= 60:
mins = 'HT'
elif mins > 60:
| mins | = mins - 15
return (str)(mins) + '\''
else:
return self.status
def refresh(self, json):
if self.status == 'FINISHED':
return
newHomeGoals = json['result']['goalsHomeTeam']
newAwayGoals = json['result']['goalsAwayTeam']
self.status = json['status']
self.updatedStatus = ''
if self.homeTeamGoals != newHomeGoals:
self.updatedStatus += self.homeTeamName + ' scores in ' + \
(str)(self.calculateMinutes()) + '\n'
self.homeTeamGoals = newHomeGoals
if self.awayTeamGoals != newAwayGoals:
self.updatedStatus += self.awayTeamName + ' scores in ' + \
(str)(self.calculateMinutes()) + '\n'
self.awayTeamGoals = newAwayGoals
def printMatchOdds(self):
if self.odds:
print('Odds : ' + self.homeTeamName + ' : ' +\
(str)(self.odds['homeWin']) +\
' Draw : ' + (str)(self.odds['draw']) +\
' ' + self.awayTeamName + ' : ' +\
(str)(self.odds['awayWin']))
def markAsFavourite(self):
self.favourite = True
def markAsNotFavourite(self):
self.favourite = False
|
picoCTF/picoCTF | picoCTF-shell/hacksport/deploy.py | Python | mit | 40,196 | 0.002264 | import logging
from subprocess import CalledProcessError
import tarfile
"""
Handles deployment of an installed problem.
Deploying a problem means creating one or more instances, which are each
templated with flags, the shell server URL, etc., and assigned a port
(if required for their problem type).
Flags and assigned ports will remain consistent for (problem, instance) pairs
across any shell servers that share the SHARED_ROOT directory.
However, | instances mu | st still be created individually on each shell server,
as server URLs must be templated appropriately, dependencies potentially
need to be installed on each server, and the underlying files, users and
service definitions that make up a deployed instance are specific to each
shell server.
"""
HIGHEST_PORT = 65535
LOWEST_PORT = 1025
CONTAINER_PORT = 5000
LOCALHOST = "127.0.0.1"
PROBLEM_FILES_DIR = "problem_files"
STATIC_FILE_ROOT = "static"
XINETD_SERVICE_PATH = "/etc/xinetd.d/"
TEMP_DEB_DIR = "/tmp/picoctf_debs/"
FLAG_FMT = "%s"
# will be set to the configuration module during deployment
shared_config = None
local_config = None
port_map = {}
current_problem = None
current_instance = None
containerize = False
logger = logging.getLogger(__name__)
def get_deploy_context():
"""
Returns the deployment context, a dictionary containing the current
config, port_map, problem, instance
"""
global shared_config, local_config, port_map, current_problem, current_instance
return {
"shared_config": shared_config,
"local_config": local_config,
"port_map": port_map,
"problem": current_problem,
"instance": current_instance,
}
port_random = None
# checks if the port is being used by a system process
def check_if_port_in_use(port):
import socket, errno
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind((LOCALHOST, port))
except socket.error as e:
return True
s.close()
return False
def flag_fmt():
"""Used to shim the command line passed flag format into the challenge class"""
return FLAG_FMT
def give_port():
"""
Returns a random port and registers it, unless running in a container which
always sets the port to a constant CONTAINER_PORT.
"""
global port_random
if containerize:
logger.debug(
f"Running in a container. Assigning fixed port: {CONTAINER_PORT}"
)
return CONTAINER_PORT
context = get_deploy_context()
# default behavior
if context["shared_config"] is None:
return randint(LOWEST_PORT, HIGHEST_PORT)
if "banned_ports_parsed" not in context["shared_config"]:
banned_ports_result = []
for port_range in context["shared_config"].banned_ports:
banned_ports_result.extend(
list(range(port_range["start"], port_range["end"] + 1))
)
context["shared_config"]["banned_ports_parsed"] = banned_ports_result
# during real deployment, let's register a port
if port_random is None:
port_random = Random(context["shared_config"].deploy_secret)
# if this instance already has a port, reuse it
if (context["problem"], context["instance"]) in context["port_map"]:
assigned_port = context["port_map"][(context["problem"], context["instance"])]
if assigned_port is not None:
logger.debug(
f"This problem instance ({context['problem']}: {str(context['instance'])}) already has an assigned port: {str(assigned_port)}"
)
return assigned_port
used_ports = [port for port in context["port_map"].values() if port is not None]
if (
len(used_ports) + len(context["shared_config"].banned_ports_parsed)
== HIGHEST_PORT + 1
):
raise Exception("All usable ports are taken. Cannot deploy any more instances.")
# Added used ports to banned_ports_parsed.
for port in used_ports:
context["shared_config"].banned_ports_parsed.append(port)
# in case the port chosen is in use, try again.
loop_var = HIGHEST_PORT - len(context["shared_config"].banned_ports_parsed) + 1
while loop_var > 0:
# Get a random port that is random, not in the banned list, not in use, and not assigned before.
port = port_random.choice(
[
i
for i in range(LOWEST_PORT, HIGHEST_PORT)
if i not in context["shared_config"].banned_ports_parsed
]
)
if check_if_port_in_use(port):
loop_var -= 1
context["shared_config"].banned_ports_parsed.append(port)
continue
return port
raise Exception(
"Unable to assigned a port to this problem. All ports are either taken or used by the system."
)
import functools
import json
import os
import shutil
import subprocess
import traceback
from abc import ABCMeta
from ast import literal_eval
from copy import copy, deepcopy
from grp import getgrnam
from hashlib import md5, sha1
from importlib.machinery import SourceFileLoader
# These are below because of a circular import issue with problem.py and give_port
# [TODO] cleanup
from os.path import commonprefix, isdir, isfile, join
from pwd import getpwnam
from random import randint, Random
from time import sleep
from hacksport.operations import create_user, execute
from hacksport.problem import (
Compiled,
Directory,
ExecutableFile,
File,
FlaskApp,
GroupWriteDirectory,
PHPApp,
WebService,
PreTemplatedFile,
ProtectedFile,
Remote,
Service,
)
# must follow hacksport.problem due to dependency on Challenge
from hacksport.docker import DockerChallenge
from hacksport.status import get_all_problem_instances, get_all_problems
from jinja2 import Environment, FileSystemLoader, Template
from shell_manager.package import package_problem
from shell_manager.util import (
DEPLOYED_ROOT,
FatalException,
get_attributes,
get_problem,
get_problem_root,
sanitize_name,
STAGING_ROOT,
get_problem_root_hashed,
get_pid_hash,
get_bundle,
DEB_ROOT,
SHARED_ROOT,
get_shared_config,
get_local_config,
acquire_lock,
release_lock,
)
from spur import RunProcessError
PORT_MAP_PATH = join(SHARED_ROOT, "port_map.json")
def challenge_meta(attributes):
"""
Returns a metaclass that will introduce the given attributes into the class
namespace.
Args:
attributes: The dictionary of attributes
Returns:
The metaclass described above
"""
class ChallengeMeta(ABCMeta):
def __new__(cls, name, bases, attr):
attrs = dict(attr)
attrs.update(attributes)
return super().__new__(cls, name, bases, attrs)
return ChallengeMeta
def update_problem_class(Class, problem_object, seed, user, instance_directory):
"""
Changes the metaclass of the given class to introduce necessary fields before
object instantiation.
Args:
Class: The problem class to be updated
problem_name: The problem name
seed: The seed for the Random object
user: The linux username for this challenge instance
instance_directory: The deployment directory for this instance
Returns:
The updated class described above
"""
random = Random(seed)
attributes = deepcopy(problem_object)
# pass configuration options in as class fields
attributes.update(dict(shared_config))
attributes.update(dict(local_config))
attributes.update(
{
"random": random,
"user": user,
"directory": instance_directory,
"server": local_config.hostname,
}
)
return challenge_meta(attributes)(Class.__name__, Class.__bases__, Class.__dict__)
def get_username(problem_name, instance_number):
"""
Determine the username for a given problem instance.
Given limitation of 32char linux usernames with useradd, truncates generated
username to 28chars. This allows up to 1000 instances of problems with
usernames that do require truncation. |
UNINETT/nav | tests/unittests/eventengine/alerts_test.py | Python | gpl-2.0 | 2,574 | 0 | from unittest import TestCase
import datetime
from nav.models.event import EventQueue as Event, Subsystem, EventType
from nav.models.manage import Netbox, Device
from nav.eventengine.alerts import AlertGenerator
class MockedAlertGenerator(AlertGenerator):
def get_alert_type(self):
return None
def _find_existing_alert_history(self):
return None
class AlertFromEventBase(TestCase):
def setUp(self):
self.event = Event(
source=Subsystem('someone'),
netbox=Netbox(),
device=Device(),
subid="thing",
event_type=EventType('boxState'),
state=Event.STATE_START,
time=datetime.datetime.now(),
value=50,
severity=80,
)
class AlertFromEventTests(AlertFromEventBase):
def test_alert_from_event_copies_attributes(self):
alert = MockedAlertGenerator(self.event).make_alert()
self.assertEqual(alert.source, self.event.source)
self.assertEqual(alert.netbox, self.event.netbox)
self.assertEqual(alert.device, self.event.device)
se | lf.assertEqual(alert.subid, self.event.subid)
self.assertEqual(alert.state, | self.event.state)
self.assertEqual(alert.time, self.event.time)
self.assertEqual(alert.value, self.event.value)
self.assertEqual(alert.severity, self.event.severity)
def test_alert_from_event_copies_variables(self):
self.event.varmap = dict(foo='bar', parrot='dead')
alert = MockedAlertGenerator(self.event).make_alert()
self.assertEqual(alert.varmap, self.event.varmap)
class AlertHistoryFromEventTests(AlertFromEventBase):
def test_alerthist_from_event_copies_attributes(self):
history = MockedAlertGenerator(self.event).make_alert_history()
self.assertEqual(history.source, self.event.source)
self.assertEqual(history.netbox, self.event.netbox)
self.assertEqual(history.device, self.event.device)
self.assertEqual(history.subid, self.event.subid)
self.assertEqual(history.start_time, self.event.time)
self.assertEqual(history.value, self.event.value)
self.assertEqual(history.severity, self.event.severity)
self.assertEqual(history.end_time, datetime.datetime.max)
def test_should_not_create_alerthist_from_end_event(self):
self.event.state = self.event.STATE_END
alert = MockedAlertGenerator(self.event)
self.assertTrue(alert.make_alert_history() is None)
|
david-vavra/pyrage | pyrage/modules/dns.py | Python | gpl-2.0 | 1,216 | 0.004934 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (C) 2014 David Vavra (vavra.david@email.cz)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is dist | ributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if no | t, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
from yapsy.IPlugin import IPlugin
class DNS(IPlugin):
def __init__(self):
self.hosts = {}
def addHost(self,id,host):
self.hosts[id] = host
def parseContext(self,context,*args):
for dns in context.iter('dns_host'):
for dnsServer in dns.iter('dns_host'):
self.addHost(dnsServer.attrib['id'],dnsServer.text)
|
cloudify-cosmo/cloudify-manager | cloudify_types/cloudify_types/component/__init__.py | Python | apache-2.0 | 832 | 0 | # Copyright (c) 2017-2019 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .operations import upload_blueprint # NOQA
from .operations import delete # NOQA
from .operations import create # NOQA
from .operations import execute_start # NOQA
from .oper | ations import refresh # NO | QA
|
reidlindsay/wins | sandbox/experiments/aloha/infocom/parse-per.py | Python | apache-2.0 | 6,546 | 0.010541 | #! /usr/bin/env python
"""
Parse PER vs. SINR data from trace files.
Revision Info
=============
* $LastChangedBy: mandke $
* $LastChangedDate: 2011-10-19 17:04:02 -0500 (Wed, 19 Oct 2011) $
* $LastChangedRevision: 5220 $
:author: Ketan Mandke <kmandke@mail.utexas.edu>
:copyright:
Copyright 2009-2011 The University of Texas at Austin
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__docformat__ = "restructuredtext en"
from wins import *
from wins.ieee80211 import *
from optparse import OptionParser
import sys
from copy import copy
from numpy import array
def read_trace(options, tracefile):
# load trace from file
tr = Trace()
tr.read(tracefile)
# return trace
return tr
DETECTFAIL1 = "not detected in LISTEN"
HEADERFAIL1 = "header parameters failed"
HEADERFAIL2 = "header decoding failed"
IGNOREFAIL1 = "ignore rxdata in DECODE"
IGNOREFAIL2 = "ignore detect in DECODE"
def parse_per_info(options, trace, fmt='bo', usemodel=False):
# initialize parameters
param, data = {}, []
mcs, rmsdelay = None, []
ncollision = options.ncollision
# parse trace
for e in trace.events:
obj, evt = e['obj'], e['event']
# check for MCS parameter
if ('phy-rate' in e):
rate = int(e['phy-rate'])
hparamfail = ('drop' in e) and (e['drop']==HEADERFAIL1)
if not hparamfail:
if mcs is None: mcs = rate
else: assert (mcs == rate)
# check for 802.11n RCV & DRP events
if (obj=="80211N"):
rcv, drp = (evt=="RCV"), (evt=="DRP")
x, y = None, None
if drp:
drop = e['drop']
notdetected = (drop==DETECTFAIL1)
hparamfail = (drop==HEADERFAIL1)
headerfail = (drop==HEADERFAIL2)
ignorefail = (drop==IGNOREFAIL1) or (drop==IGNOREFAIL2)
assert (notdetected or hparamfail or headerfail or ignorefail), "%s"%(e)
#sinr = float(e['dot11n-sinr'].lower().replace("db","") )
#x, y = sinr, 1.0 # log header drop as a packet error also
elif rcv:
sinr = float(e['dot11n-sinr'].lower().replace("db","") )
err = e['crc']
haserror = (err=="FAIL")
noerror = (err=="OK")
assert (haserror or noerror)
if usemodel:
per = float(e['dot11n-model-per'])
else:
if haserror: per = 1.0
else: per = 0.0
# check if ncollision matches
keepdata = True
if (ncollision is not None):
keepdata = False
if 'cif-collision' in e:
coll = eval(e['cif-collision'])
assert isinstance(coll, list)
keepdata = (len(coll) == ncollision)
if keepdata:
x, y = sinr, per
# log data point
if (x is not None) and (y is not None):
dp = {'x':x, 'y':y, 'ndata': 1}
data.append(dp)
# check for RMS delay
if (rcv or drp):
tau = float(e['dot11n-rmsdelay'])
rmsdelay.append(tau)
# check parameters
assert (rmsdelay)
assert (mcs is not None)
avgdelay = array(rmsdelay).mean()
pertype = "actual"
if usemodel: pertype = "model"
# return param and data
param['mcs'] = mcs
param['rmsdelay'] = avgdelay
param['format'] = fmt
label = "${\\rm PER}_{%s}$ ${\\rm (MCS = %d}$, "%(pertype,mcs)
if ncollision is not None: label +="$N_{coll} = %d$, "%(ncollision)
label += "$\\sigma_{rms} = %.3g ns)$"%(avgdelay*1e9)
param['label'] = label
return param, data
def parse_per():
usage = "%prog [OPTIONS] TRACEFILE1 [TRACEFILE2 ...]\n" + \
" Writes parsed data to standard output."
parser = OptionParser(usage=usage)
parser.add_option("-c", "--ncollision", dest="ncollision", type="int", \
default=None, help="Filter results using number of collisions. [default=%default]")
(options, args) = parser.parse_args()
if len(args)<1:
print "Insufficient number of arguments."
parser.print_help()
raise SystemExit
tracefile = args[0:]
numtraces = len(tracefile)
# set parameters
default_parameters = {'xlabel': "SINR (dB)", \
'ylabel': "PER", \
'title': "PER vs. SINR", \
'label': None, \
'source': None, \
'format': None}
lgd, formats = [], [('ro','r:'), ('bo', 'b:'), ('go', 'g:')]
for k in range(numtraces):
tfile = tracefile[k]
# treat as normal wins trace file
trace = read_trace(options, tfile)
fmt | = formats[k%len(formats)]
if not trace: continue
sys.stderr.write("Parsing trace from %s ...\n"%(tfile))
# parse actual PER from trace
param, data = parse_per_info(options, trace)
if data:
parameters = copy(default_parameters)
parameters.up | date(param)
parameters['source'] = tfile
parameters['format'] = fmt[0]
assert (param['label'] is not None)
parsed_data = {'parameters': parameters, 'data': data}
sys.stdout.write("%s\n"%(parsed_data) )
# parse model PER from trace
param, data = parse_per_info(options, trace, usemodel=True)
if data:
parameters = copy(default_parameters)
parameters.update(param)
parameters['source'] = tfile
parameters['format'] = fmt[1]
assert (param['label'] is not None)
parsed_data = {'parameters': parameters, 'data': data}
sys.stdout.write("%s\n"%(parsed_data) )
if __name__ == '__main__':
parse_per()
|
akarambir/shadow | shadow/prod_settings.py | Python | mit | 685 | 0.00292 | from .settings import *
# Enforce secret key from environment
SECRET_KEY = os.environ.get("NAINOMICS_KEY")
ALLOWED_HOSTS = [
'.nainomics.in',
]
CSRF_COOKIE_SECURE = True
SESSION_COOKIE | _SECURE = True
CONN_MAX_AGE = True
import dj_email_url
email_config = dj_email_url.config()
EMAIL_FILE_PATH = email_config['EMAIL_FILE_PATH']
EMAIL_HOST_USER = email_config['EMAIL_HOST_USER']
EMAIL_HOST_PASSWORD = email_config['EMAIL_HOST_PASSWORD']
EMAIL_HOST = email_config['EMAIL_HOST']
EMAIL_PORT = email_config['EMAIL_PORT']
EMAIL_BACKEND = email_config['EMAIL_BACKEND']
EM | AIL_USE_TLS = email_config['EMAIL_USE_TLS']
#Settings for myks-contact
CONTACT_EMAILS = ['akarambir@gmail.com']
|
Rignak/Scripts-Python | DeepLearning/_Others/OCR/callback.py | Python | gpl-3.0 | 2,311 | 0.000865 | from os.path import join
from keras.callbacks import Callback
import matplotlib.pyplot as plt
class PlotLearnin | g(Callback):
"""Callback generating a fitness plot in a file after each epoch"""
def __init__(self, exam | ples=False):
super().__init__()
self.i = 0
self.x = []
self.losses = []
self.val_losses = []
self.acc = []
self.val_acc = []
self.logs = []
self.examples = examples
def on_epoch_end(self, epoch, logs={}):
plt.ioff()
self.logs.append(logs)
self.x.append(self.i)
self.losses.append(logs.get('loss'))
self.val_losses.append(logs.get('val_loss'))
if not self.examples:
self.acc.append(logs.get('acc'))
self.val_acc.append(logs.get('val_acc'))
plt.subplot(1, 2, 1)
self.i += 1
plt.plot(self.x, self.losses, label="Training")
plt.plot(self.x, self.val_losses, label="Validation")
plt.xlabel('Epoch')
plt.ylabel('loss')
plt.yscale('log')
plt.legend()
if not self.examples:
plt.subplot(1, 2, 2)
plt.plot(self.x, self.acc, label="Training")
plt.plot(self.x, self.val_acc, label="Validation")
plt.xlabel('Epoch')
plt.ylabel('accuracy')
plt.ylim(0, 1)
plt.legend()
plt.tight_layout()
plt.savefig(join('examples', 'plot.png'))
plt.close()
if self.examples:
z = self.model.predict(self.model.example[0][:6])
plot_example(self.model.example[0][:6], z, y=self.model.example[1][:6])
plt.savefig(join('epochs', f'epoch{self.i}.png'))
plt.close()
def plot_example(x, z, y=None):
x = x[:, :, :, ::-1]
z = z[:, :, :, ::-1]
if y is None:
n = 2
else:
n = 3
plt.figure(figsize=(16.3, 6.52))
for i, im in enumerate(x):
plt.subplot(n, len(x), 1 + i)
plt.imshow(im)
plt.subplot(n, len(x), i + 1 + len(x))
plt.imshow(z[i, :, :, 0], cmap='gray')
if n == 3:
plt.subplot(n, len(x), i + 1 + len(x) * 2)
plt.imshow(y[i, :, :, 0], cmap='gray')
plt.tight_layout()
plt.savefig(join('examples', 'example.png'))
|
iulian787/spack | var/spack/repos/builtin/packages/r-quantreg/package.py | Python | lgpl-2.1 | 1,540 | 0.003896 | # Copyright 2013-2020 Lawrence Livermore N | ational Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RQuantreg(RPackage):
"""Estimation and inference methods for models of conditional quantiles:
Linear and nonli | near parametric and non-parametric (total variation
penalized) models for conditional quantiles of a univariate response
and several methods for handling censored survival data. Portfolio
selection methods based on expected shortfall risk are also
included."""
homepage = "https://cloud.r-project.org/package=quantreg"
url = "https://cloud.r-project.org/src/contrib/quantreg_5.29.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/quantreg"
version('5.51', sha256='df1330d245f66ee6d924b209bd4c15d44ff8cce52667959ec0d299975428bdb1')
version('5.42.1', sha256='4cc2b0883c52694e58fcfde83e30e4a54be9f4d9cbcf6138f6498cc8e0b3ccab')
version('5.40', sha256='86e310a235009ab85635dfb8803c175f80a35892e237db2525c4ef37a98936eb')
version('5.29', sha256='bb4638e8f295579afa5c40c4de7266a6ea9221436ba4ca802f94cdb43bf20f25')
version('5.26', sha256='9d7403f7c5ee219ec155838648401a1c4915a46a74f5774a0f6876c537ef2c87')
depends_on('r@2.6:', type=('build', 'run'))
depends_on('r-sparsem', type=('build', 'run'))
depends_on('r-matrix', type=('build', 'run'))
depends_on('r-matrixmodels', type=('build', 'run'))
|
Smile-SA/odoo_addons | smile_api_rest/models/api_rest_path.py | Python | agpl-3.0 | 24,554 | 0.000041 | # -*- coding: utf-8 -*-
# (C) 2022 Smile (<https://www.smile.eu>)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from copy import deepcopy
from odoo import api, fields, models, _
from odoo.tools import safe_eval
MAPPING_FIELDS_SWAGGER = {
'binary': ('string', 'binary'),
'boolean': ('boolean', ''),
'char': ('string', ''),
'date': ('string', 'date'),
'datetime': ('string', 'date-time'),
'float': ('number', 'float'),
'html': ('string', ''),
'integer': ('integer', ''),
'many2many': ('array', ''),
'many2one': ('integer', ''),
'many2one_reference': ('integer', ''),
'monetary': ('number', 'float'),
'one2many': ('array', ''),
'reference': ('string', ''),
'selection': ('array', ''),
'text': ('string', ''),
}
LIMIT_MAX = 500
def _convert_field_type_to_swagger(ttype):
type_swagger, format_swagger = 'string', ''
if ttype in MAPPING_FIELDS_SWAGGER:
type_swagger, format_swagger = MAPPING_FIELDS_SWAGGER.get(ttype)
return type_swagger, format_swagger
def _format_definition_name(name):
return name and name.replace(' ', '') or ''
class ApiRestPath(models.Model):
_name = 'api.rest.path'
_order = 'model_id'
_rec_name = 'model_id'
name = fields.Char(required=True)
active = fields.Boolean(default=True)
version_id = fields.Many2one(
'api.rest.version', string='API Version', required=True,
ondelete='cascade')
model_id = fields.Many2one('ir.model', required=True, ondelete='cascade')
model = fields.Char(related='model_id.model', readonly=True)
method = fields.Selection([
('get', 'Read'),
('post', 'Create'),
('put', 'Update'),
('delete', 'Delete'),
('custom', 'Custom function')
], required=True)
description = fields.Html()
deprecated = fields.Boolean()
tag_id = fields.Many2one('api.rest.tag', string='Tag', ondelete='set null')
# Read
filter_domain = fields.Char(default="[]")
field_ids = fields.Many2many(
'ir.model.fields', domain="[('model_id', '=', model_id)]",
string='Fields')
limit = fields.Integer(string='Limit of results', default=500)
# Create / Update
warning_required = fields.Boolean(
compute='_compute_warning_required', compute_sudo=True)
api_field_ids = fields.One2many(
'api.rest.field', 'path_id', string='Fields', copy=True)
update_domain = fields.Char(default="[]")
# Unlink
unlink_domain = fields.Char(default="[]")
# Custom function
function_apply_on_record = fields.Boolean()
function_domain = fields.Char(default="[]")
function = fields.Char()
function_parameter_ids = fields.One2many(
'api.rest.function.parameter', 'path_id', string='Parameters',
copy=True)
_sql_constraints = [
('name_uniq', 'unique (name, version_id, method)',
"Name, Version, Method must be unique!"),
]
@api.onchange('model_id')
def _onchange_model_id(self):
self.field_ids = False
self.api_field_ids = False
def _compute_warning_required(self):
warning_required = False
if self.api_field_ids:
model_required_fields = self.model_id.field_id.filtered(
lambda f: f.required).mapped('name')
api_required_fields = self.api_field_ids.filtered(
lambda f: f.required).mapped('field_id.name')
warning_required = \
not all(elem in api_required_fields
for elem in model_required_fields)
self.warning_required = warning_required
def _update_values(self, values):
if values.get('name'):
values['name'] = values.get('name', '').replace(' ', '')
@api.returns('self', lambda value: value.id)
def copy(self, default=None):
default = dict(default or {})
default.update(name=_("%s (copy)") % (self.name or ''))
return super(ApiRestPath, self).copy(default)
@api.model
def create(self, values):
self._update_values(values)
return super().create(values)
def write(self, values):
self._update_values(values)
return super().write(values)
def _generate_path(self, swagger_paths):
self.ensure_one()
# Default values
values = {
'tags': se | lf.tag_id and [self.tag_id.name or ''] or [],
'description': self.description or '',
'deprecated': self.deprecated,
'produces': ['application/json'],
'responses': {
'200': {
'description': 'OK',
},
'401': {
'description': 'Unauthorized',
'schema': {
| '$ref': '#/definitions/ApiErrorResponse'
}
},
'403': {
'description': 'Forbidden',
'schema': {
'$ref': '#/definitions/ApiErrorResponse'
}
},
'404': {
'description': 'Not found',
'schema': {
'$ref': '#/definitions/ApiErrorResponse'
}
},
'500': {
'description': 'Internal server error',
'schema': {
'$ref': '#/definitions/ApiErrorResponse'
}
}
},
'security': [{
'api_key': [],
}],
}
# Get
if self.method == 'get':
# Default dict path
get_path = '/{}'.format(self.name)
if get_path not in swagger_paths:
swagger_paths.setdefault(get_path, {})
get_one_path = '/{}/{}'.format(self.name, '{Id}')
if get_one_path not in swagger_paths:
swagger_paths.setdefault(get_one_path, {})
# Read All elements
definition_all = {
'schema': {
'type': 'object',
'properties': {
'results': {
'type': 'array',
'items': {
'$ref': '#/definitions/{}'.format(
_format_definition_name(self.name)),
}
},
'total': {
'type': 'integer',
},
'offset': {
'type': 'integer',
},
'limit': {
'type': 'integer',
}
}
}
}
values['responses']['200'].update(definition_all)
values.update(parameters=self._get_parameters_all_elements())
swagger_paths[get_path].update({
'get': values,
})
# Read One element
values_one = deepcopy(values)
definition_one = {
'schema': {
'$ref': '#/definitions/{}'.format(
_format_definition_name(self.name)),
}
}
values_one['responses']['200'].update(definition_one)
values_one.update(parameters=self._get_parameters_one_element())
swagger_paths[get_one_path].update({
'get': values_one,
})
# Post
elif self.method == 'post' and self.api_field_ids:
# Default dict path
post_path = '/{}'.format(self.name)
if post_path not in swagger_paths:
swagger_paths.setdefault(post_path, {})
# Create element
definition = {
'description': _('Identifier of the resource created.'),
'schema': {
'type': 'integer',
}
}
values['responses']['200'].update(de |
bit-bots/imagetagger | src/imagetagger/users/forms.py | Python | mit | 473 | 0 | from django import forms
from django_registration.forms i | mport RegistrationForm
from .models import Team, User
class UserRegistrationForm(RegistrationForm):
class Meta(RegistrationForm.Meta):
model = User
fields = [
'username',
'email',
'password1',
'password2',
]
| class TeamCreationForm(forms.ModelForm):
class Meta:
model = Team
fields = [
'name',
]
|
dreipol/smallinvoice | smallinvoice/time.py | Python | mit | 292 | 0.003425 | # coding=utf-8
from smallinvoice.commons import BaseJsonEncodableObject, BaseSer | vice
class Time(BaseJsonEncodabl | eObject):
def __init__(self, start, end, date):
self.start = start
self.end = end
self.date = date
class TimeService(BaseService):
name = 'time' |
NineWoranop/loadtesting-kpi | loadtesting/ThirdPartyTools/grinder-3.11/examples/ejb.py | Python | apache-2.0 | 1,893 | 0.007924 | # Enterprise Java Beans
#
# Exercise a stateful session EJB from the Oracle WebLogic Server
# examples. Additionally this script demonstrates the use of the
# ScriptContext sleep(), getThreadId() and getRunNumber() methods.
#
# Before running this example you will need to add the EJB client and
# the WebLogic classes to your CLASSPATH.
from java.lang import String
from java.util import Properties,Random
from javax.naming import Context,InitialContext
from net.grinder.script.Grinder i | mport grinder
from net.grinder.script import Test
from weblogic.jndi import WLInitialContextFactory
tests = {
"home" : Test(1, "TraderHome"),
"trade" : Test(2, | "Trader buy/sell"),
"query" : Test(3, "Trader getBalance"),
}
# Initial context lookup for EJB home.
p = Properties()
p[Context.INITIAL_CONTEXT_FACTORY] = WLInitialContextFactory.name
home = InitialContext(p).lookup("ejb20-statefulSession-TraderHome")
tests["home"].record(home)
random = Random()
class TestRunner:
def __call__(self):
log = grinder.logger.info
trader = home.create()
tests["trade"].record(trader.sell)
tests["trade"].record(trader.buy)
tests["query"].record(trader.getBalance)
stocksToSell = { "BEAS" : 100, "MSFT" : 999 }
for stock, amount in stocksToSell.items():
tradeResult = trader.sell("John", stock, amount)
log("Result of trader.sell(): %s" % tradeResult)
grinder.sleep(100) # Idle a while
stocksToBuy = { "BEAS" : abs(random.nextInt()) % 1000 }
for stock, amount in stocksToBuy.items():
tradeResult = trader.buy("Phil", stock, amount)
log("Result of trader.buy(): %s" % tradeResult)
balance = trader.getBalance()
log("Balance is $%.2f" % balance)
trader.remove() # We don't record the remove() as a test
|
petef4/payg | test_eff_min.py | Python | mit | 10,146 | 0.000591 | import eff_min
import pytest
TOL = 6 # tolerance, compare to this number of decimal places
MEAN_PER_CALL_DATA = (
# (maximum, minimum, per_minute, charge_interval, connection=0)
(30, 60, 20, 60), (60, 60, 20, 60),
(120, 60, 20, 60), (180, 60, 20, 60),
(30, 60, 20, 1), (60, 60, 20, 1),
(120, 60, 20, 1), (180, 60, 20, 1),
(30, 1, 20, 1), (60, 1, 20, 1),
(120, 1, 20, 1), (180, 1, 20, 1),
(30, 6, 20, 6), (60, 6, 20, 6),
(120, 6, 20, 6), (180, 6, 20, 6),
(30, 0, 0, 600, 10), (60, 0, 0, 600, 10),
(120, 0, 0, 600, 10), (180, 0, 0, 600, 10),
(30, 600, 0, 600, 10), (60, 600, 0, 600, 10),
(120, 600, 0, 600, 10), (180, 600, 0, 600, 10))
def test_smart_mean_per_minute():
for args in MEAN_PER_CALL_DATA:
assert round(eff_min.smart_mean_per_minute(*args) -
60.0 / args[0] * mean_per_call(*args), TOL) == 0
def test_max_minute():
row = {'min_same': 'Free', 'min_other': '25p', 'min_land': '10p'}
assert eff_min.max_minute(row) == 25.0
row = {'min_same': '20p', 'min_other': '15p', 'min_land': '10p'}
assert eff_min.max_minute(row) == 20.0
row = {'min_same': '10p', 'min_other': '?', 'min_land': '?'}
assert eff_min.max_minute(row) == 10.0
row = {'min_same': 'n/a', 'min_other': 'n/a', 'min_land': 'Free'}
assert eff_min.max_minute(row) == 0.0
row = {'min_same': '?', 'min_other': '?', 'min_land': '?'}
with pytest.raises(ValueError):
assert eff_min.max_minute(row) == '?'
def test_pence():
assert eff_min.pence(0) == 0
assert eff_min.pence(0.0) == 0
assert eff_min.pence(10) == 10
assert eff_min.pence(10.0) == 10
assert eff_min.pence('0p') == 0.0
assert eff_min.pence('10p') == 10.0
assert eff_min.pence('1.5p') == 1.5
assert eff_min.pence('£1') == 100.0
assert eff_min.pence('Free') == 0.0
assert eff_min.pence('?') == '?'
assert eff_min.pence('n/a') == 'n/a'
with pytest.raises(ValueError) as excinfo:
eff_min.pence('foo')
assert str(excinfo.value) == 'String format not handled: foo'
# remaining tests are for functions in this test file
def test__charge_per_call():
assert round(20 - charge_per_call(1, 0, 20, 60), TOL) == 0
assert round(20 - charge_per_call(60, 0, 20, 60), TOL) == 0
assert round(40 - charge_per_call(90, 0, 20, 60), TOL) == 0
assert round(40 - charge_per_call(120, 0, 20, 60), TOL) == 0
assert round(60 - charge_per_call(121, 0, 20, 60), TOL) == 0
assert round(2 - char | ge_per_call(1, 0, 2 | 0, 6), TOL) == 0
assert round(2 - charge_per_call(6, 0, 20, 6), TOL) == 0
assert round(4 - charge_per_call(7, 0, 20, 6), TOL) == 0
assert round(4 - charge_per_call(12, 0, 20, 6), TOL) == 0
assert round(10 - charge_per_call(1, 60, 0, 600, 10), TOL) == 0
assert round(10 - charge_per_call(6, 60, 0, 600, 10), TOL) == 0
assert round(10 - charge_per_call(7, 60, 0, 600, 10), TOL) == 0
assert round(10 - charge_per_call(12, 60, 0, 600, 10), TOL) == 0
def test__mean_per_call():
assert round(20 - mean_per_call(30, 60, 20, 60), TOL) == 0
assert round(20 - mean_per_call(60, 60, 20, 60), TOL) == 0
assert round(30 - mean_per_call(120, 60, 20, 60), TOL) == 0
assert round(40 - mean_per_call(180, 60, 20, 60), TOL) == 0
assert round(20 - mean_per_call(30, 60, 20, 1), TOL) == 0
assert round(20 - mean_per_call(60, 60, 20, 1), TOL) == 0
assert round(25 + 1. / 12 - mean_per_call(120, 60, 20, 1), TOL) == 0
assert round(33 + 4. / 9 - mean_per_call(180, 60, 20, 1), TOL) == 0
assert round(5 + 1. / 6 - mean_per_call(30, 1, 20, 1), TOL) == 0
assert round(10 + 1. / 6 - mean_per_call(60, 1, 20, 1), TOL) == 0
assert round(20 + 1. / 6 - mean_per_call(120, 1, 20, 1), TOL) == 0
assert round(30 + 1. / 6 - mean_per_call(180, 1, 20, 1), TOL) == 0
# these tests are not appropriate for real world parameters, the minimum
# ought to be a multiple of charge_interval
assert round(6 - mean_per_call(30, 1, 20, 6), TOL) == 0
assert round(11 - mean_per_call(60, 1, 20, 6), TOL) == 0
assert round(21 - mean_per_call(120, 1, 20, 6), TOL) == 0
assert round(31 - mean_per_call(180, 1, 20, 6), TOL) == 0
assert round(6 - mean_per_call(30, 6, 20, 6), TOL) == 0
assert round(11 - mean_per_call(60, 6, 20, 6), TOL) == 0
assert round(21 - mean_per_call(120, 6, 20, 6), TOL) == 0
assert round(31 - mean_per_call(180, 6, 20, 6), TOL) == 0
# again the minimum ought to be a multiple of charge_interval
assert round(10 - mean_per_call(30, 60, 0, 600, 10), TOL) == 0
assert round(10 - mean_per_call(60, 60, 0, 600, 10), TOL) == 0
assert round(10 - mean_per_call(120, 60, 0, 600, 10), TOL) == 0
assert round(10 - mean_per_call(180, 60, 0, 600, 10), TOL) == 0
assert round(10 - mean_per_call(30, 0, 0, 600, 10), TOL) == 0
assert round(10 - mean_per_call(60, 0, 0, 600, 10), TOL) == 0
assert round(10 - mean_per_call(120, 0, 0, 600, 10), TOL) == 0
assert round(10 - mean_per_call(180, 0, 0, 600, 10), TOL) == 0
assert round(10 - mean_per_call(30, 600, 0, 600, 10), TOL) == 0
assert round(10 - mean_per_call(60, 600, 0, 600, 10), TOL) == 0
assert round(10 - mean_per_call(120, 600, 0, 600, 10), TOL) == 0
assert round(10 - mean_per_call(180, 600, 0, 600, 10), TOL) == 0
def test__excel_mean_per_call():
for args in MEAN_PER_CALL_DATA:
assert round(excel_mean_per_call(*args) - mean_per_call(*args), TOL) == 0
def test__smart_mean_per_call():
for args in MEAN_PER_CALL_DATA:
assert round(smart_mean_per_call(*args) - mean_per_call(*args), TOL) == 0
def excel_mean_per_call(
maximum, minimum, per_minute, charge_interval, connection=0):
"""Returns the mean charge for a call.
Args:
maximum (seconds): maximum length of call to average (the minimum is 1)
minimum (seconds): calls are effectively at least this long
per_minute: charge for a call of one minute
charge_interval (seconds): call length rounded up to a multiple of this
connection: charge for connecting call
"""
C5 = 60 # m minimum length of call, seconds
C6 = 20 # c charge per minute, pence/min
C7 = 1 # i chargeable interval
C8 = 0 # f flat rate connection charge
C9 = 180 # u upper bound (length of call), seconds
C9 = maximum
C5 = minimum
C6 = per_minute
C7 = charge_interval
C8 = connection
C11 = int(C9 / C7) # I max number of intervals
C12 = C9 - C11 * C7 # r remaining seconds
C14 = C5 / C7 # m/i
C15 = C11 - C5 / C7 # I - m/i
C17 = C5 * C6 / 60.0 + C8
C19 = (C5 * (C5 * C6 / 60.0 + C8) + C12 * (C6 * C7 * (C11 + 1) / 60.0 + C8)) / C9
# C23=((C7^2)*C6/60)*(C11*(C11+1)/2 - (C14)*(C14+1)/2)+C7*(C11 - C14)*C8
C23 = ((C7**2) * C6 / 60.0) * (C11 * (C11 + 1) / 2.0 - (C14) * (C14 + 1) / 2.0) + C7 * (C11 - C14) * C8
C21 = (C5 * (C5 * C6 / 60.0 + C8) + C12 * (C6 * C7 * (C11 + 1) / 60.0 + C8) + C23) / C9
# F6=IF(C15<0,C17,IF(C15=0,C19,C21))
if C15 < 0: # I < m/i
F6 = C17
elif C15 == 0: # I = m/i
F6 = C19
else: # I > m/i
F6 = C21
return F6
def smart_mean_per_call(
maximum, minimum, per_minute, charge_interval, connection=0):
"""Returns the mean charge for a call.
Args:
maximum (seconds): maximum length of call to average (the minimum is 1)
minimum (seconds): calls are effectively at least this long
per_minute: charge for a call of one minute
charge_interval (seconds): call length rounded up to a multiple of this
connection: charge for connecting call
"""
whole_intervals, last_seconds = divmod(maximum, charge_interval)
first_intervals, inexact = divmod(minimum, charge_interval)
if inexact != 0:
raise ValueError('minimum must be a multiple of charge interval')
middle_intervals = whole_intervals - first_intervals
per_second = per_minute / 60.0
if middle_intervals < 0:
return per_second * minimum + connection
else:
return (
per_second * (
|
kevinlee12/oppia | scripts/run_presubmit_checks.py | Python | apache-2.0 | 3,619 | 0 | # Copyright 2019 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS-IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script runs the following tests in all cases.
- Javascript and Python Linti | ng
- Backend Python tests
Only when frontend files are changed will it run Frontend Karma unit tests.
"""
from __future__ import annotations
import argparse
import subprocess
from . import common
from . import run_backend_tests
from . import run_frontend_tests
from .linters import pre_ | commit_linter
_PARSER = argparse.ArgumentParser(
description="""
Run this script from the oppia root folder prior to opening a PR:
python -m scripts.run_presubmit_checks
Set the origin branch to compare against by adding
--branch=your_branch or -b=your_branch
By default, if the current branch tip exists on remote origin,
the current branch is compared against its tip on GitHub.
Otherwise it's compared against 'develop'.
This script runs the following tests in all cases.
- Javascript and Python Linting
- Backend Python tests
Only when frontend files are changed will it run Frontend Karma unit tests.
If any of these tests result in errors, this script will terminate.
Note: The test scripts are arranged in increasing order of time taken. This
enables a broken build to be detected as quickly as possible.
""")
_PARSER.add_argument(
'--branch', '-b',
help='optional; if specified, the origin branch to compare against.')
def main(args=None):
"""Run the presubmit checks."""
parsed_args = _PARSER.parse_args(args=args)
# Run Javascript and Python linters.
print('Linting files since the last commit')
pre_commit_linter.main(args=[])
print('Linting passed.')
print('')
current_branch = subprocess.check_output([
'git', 'rev-parse', '--abbrev-ref', 'HEAD'])
# If the current branch exists on remote origin, matched_branch_num=1
# else matched_branch_num=0.
matched_branch_num = subprocess.check_output([
'git', 'ls-remote', '--heads', 'origin', current_branch, '|', 'wc',
'-l'])
# Set the origin branch to develop if it's not specified.
if parsed_args.branch:
branch = parsed_args.branch
elif matched_branch_num == '1':
branch = 'origin/%s' % current_branch
else:
branch = 'develop'
print('Comparing the current branch with %s' % branch)
all_changed_files = subprocess.check_output([
'git', 'diff', '--cached', '--name-only', '--diff-filter=ACM', branch])
if common.FRONTEND_DIR in all_changed_files:
# Run frontend unit tests.
print('Running frontend unit tests')
run_frontend_tests.main(args=['--run_minified_tests'])
print('Frontend tests passed.')
else:
# If files in common.FRONTEND_DIR were not changed, skip the tests.
common.print_each_string_after_two_new_lines([
'No frontend files were changed.',
'Skipped frontend tests'])
# Run backend tests.
print('Running backend tests')
run_backend_tests.main(args=[])
print('Backend tests passed.')
if __name__ == '__main__':
main()
|
mdrasmus/argweaver | argweaver/deps/compbio/coal.py | Python | mit | 66,593 | 0.00039 | """
Coalescent methods
A note about popu | lation size. In this code all population sizes N or n are
uncorrected. If you need to | compute a coalescent for a diploid species
you must multiply N by 2 before passing it to any of these functions.
"""
#=============================================================================
# imports
from __future__ import division
# python imports
from itertools import chain, izip
from math import exp, log, sqrt
import random
# rasmus imports
from rasmus import treelib, stats, util, linked_list
try:
from rasmus.symbolic import assign_vars
from rasmus.symbolic import derivate
from rasmus.symbolic import simplify
except ImportError:
# only experimental functions need symbolic
pass
# compbio imports
from . import birthdeath
# import root finder
try:
from scipy.optimize import brentq
brentq
except ImportError:
def brentq(f, a, b, disp=False):
return stats.bisect_root(f, a, b)
#=============================================================================
# single coalescent PDFs, CDFs, and sampling functions
def prob_coal(t, k, n):
"""
Returns the probability density of observing the first coalesce of 'k'
individuals in a population size of 'n' at generation 't'
"""
# k choose 2
k2 = k * (k-1) / 2
k2n = k2 / n
return k2n * exp(- k2n * t)
def sample_coal(k, n):
"""
Returns a sample coalescent time for 'k' individuals in a population 'n'
"""
# k choose 2
k2 = k * (k-1) / 2
k2n = k2 / n
return random.expovariate(k2n)
def sample_coal_times(k, n):
"""
Returns a sampling of (k-1) coalescences for 'k' lineages in a
population of size 'n'.
"""
times = [0]
for j in xrange(k, 1, -1):
times.append(times[-1] + sample_coal(j, n))
return times[1:]
def prob_coal_counts(a, b, t, n):
"""
The probabiluty of going from 'a' lineages to 'b' lineages in time 't'
with population size 'n'
"""
if b <= 0:
return 0.0
C = stats.prod((b+y)*(a-y)/(a+y) for y in xrange(b))
s = exp(-b*(b-1)*t/2.0/n) * C
for k in xrange(b+1, a+1):
k1 = k - 1
C = (b+k1)*(a-k1)/(a+k1)/(b-k) * C
s += exp(-k*k1*t/2.0/n) * (2*k-1) / (k1+b) * C
return s / stats.factorial(b)
def prob_coal_counts_slow(a, b, t, n):
"""
The probability of going from 'a' lineages to 'b' lineages in time 't'
with population size 'n'
Implemented more directly, but slower. Good for testing against.
"""
s = 0.0
for k in xrange(b, a+1):
i = exp(-k*(k-1)*t/2.0/n) * \
(2*k-1)*(-1)**(k-b) / stats.factorial(b) / \
stats.factorial(k-b) / (k+b-1) * \
stats.prod((b+y)*(a-y)/(a+y) for y in xrange(k))
s += i
return s
def prob_coal_cond_counts(x, a, b, t, n):
"""
Returns the probability density of a coalescent happening at time 'x'
between 'a' lineages conditioned on there being 'b' lineages at time
't'. The population size is 'n'.
"""
lama = -a*(a-1)/2.0/n
C = stats.prod((b+y)*(a-1-y)/(a-1+y) for y in xrange(b))
s = exp(-b*(b-1)/2.0/n*(t-x) + lama*x) * C
for k in xrange(b+1, a):
k1 = k - 1
lam = -k*k1/2.0/n
C = (b+k1)*(a-1-k1)/(a-1+k1)/(b-k) * C
s += exp(lam*t + (lama-lam)*x) * (2*k-1) / (k1+b) * C
return s / stats.factorial(b) * (-lama) / prob_coal_counts(a, b, t, n)
def prob_coal_cond_counts_simple(x, a, b, t, n):
"""
Returns the probability density of a coalescent happening at time 'x'
between 'a' lineages conditioned on there being 'b' lineages at time
't'. The population size is 'n'.
"""
return (prob_coal_counts(a-1, b, t-x, n) * prob_coal(x, a, n) /
prob_coal_counts(a, b, t, n))
def cdf_coal_cond_counts(x, a, b, t, n):
"""
Returns the probability a coalescent happening *before* time 'x'
between 'a' lineages conditioned on there being 'b' lineages at time
't'. The population size is 'n'.
"""
lama = -a*(a-1)/2.0/n
C = stats.prod((b+y)*(a-1-y)/(a-1+y) for y in xrange(b))
c = -b*(b-1)/2.0/n
s = exp(c*t) * (exp((lama-c)*x)-1.0) / (lama-c) * C
for k in xrange(b+1, a):
k1 = k - 1
lam = -k*k1/2.0/n
C = (b+k1)*(a-1-k1)/(a-1+k1)/(b-k) * C
s += (exp(lam*t) * (exp((lama-lam)*x) - 1.0) / (lama - lam)
* (2*k-1) / (k1+b) * C)
return s / stats.factorial(b) * (-lama) / prob_coal_counts(a, b, t, n)
def sample_coal_cond_counts(a, b, t, n):
"""
Samples the next coalescent between 'a' lineages in a population size of
'n', conditioned on there being 'b' lineages at time 't'.
"""
# this code solves this equation for t
# cdf(t) - p = 0
# where p ~ U(0, 1)
p = random.random()
# compute constants
lama = -a*(a-1)/2.0/n
C0 = stats.prod((b+y)*(a-1-y)/(a-1+y) for y in xrange(b))
c = -b*(b-1)/2.0/n
d = 1.0/stats.factorial(b) * (-lama) / prob_coal_counts(a, b, t, n)
# CDF(t) - p
def f(x):
if x <= 0:
return x - p
if x >= t:
return 1.0 - p + (x - t)
C = C0
s = exp(c*t) * (exp((lama-c)*x)-1.0) / (lama-c) * C
for k in xrange(b+1, a):
k1 = k - 1
lam = -k*k1/2.0/n
C = (b+k1)*(a-1-k1)/(a-1+k1)/(b-k) * C
s += (exp(lam*t) * (exp((lama-lam)*x) - 1.0) / (lama - lam)
* (2*k-1) / (k1+b) * C)
return s * d - p
return brentq(f, 0.0, t, disp=False)
def prob_mrca(t, k, n):
"""
Probability density function of the age 't' of the most recent
common ancestor (MRCA) of 'k' lineages in a population size 'n'
"""
s = 0.0
for i in xrange(1, k):
lam = (i+1) * i / 2.0 / n
s += lam * exp(- lam * t) * mrca_const(i, 1, k-1)
return s
def cdf_mrca(t, k, n):
"""
Cumulative probability density of the age 't' of the most recent common
ancestor (MRCA) of 'k' lineages in a population size 'n'
"""
if k == 1:
return 1.0
s = 0.0
for i in xrange(1, k+1):
lam = i * (i-1) / (2.0 * n)
p = 1.0
for y in xrange(1, i):
p *= (y-k) / (k+y)
s += exp(-lam * t) * (2*i - 1) * p
return s
def mrca_const(i, a, b):
"""A constant used in calculating MRCA"""
# i+1 choose 2
y = (i+1) * i / 2.0
prod = 1.0
for j in xrange(a, b+1):
if j == i:
continue
# j+1 choose 2
x = (j+1) * j / 2.0
prod *= x / (x - y)
return prod
def prob_bounded_coal(t, k, n, T):
"""
Probability density function of seeing a coalescence at 't' from
'k' lineages in a population of size 'n' with bounding time 'T'
"""
if t > T:
return 0.0
if k == 2:
prob_coal(t, k, n)
return (prob_coal(t, k, n) * cdf_mrca(T-t, k-1, n) /
cdf_mrca(T, k, n))
def cdf_bounded_coal(t, k, n, T):
"""
Cumalative density function of seeing a coalescence at 't' from
'k' lineages in a population of size 'n' with bounding time 'T'
"""
i = k - 1
lam_i = (i+1)*i/2.0 / n
C = [mrca_const(j, 1, i-1) for j in xrange(1, i)]
#A = lam_i / n / cdf_mrca(T, k, n)
B = sum(C) / lam_i
F = [C[j-1] * exp(-(j+1)*j/2.0/n * T) / ((j+1)*j/2.0/n - lam_i)
for j in xrange(1, i)]
return (lam_i / cdf_mrca(T, k, n) *
(B * (1-exp(-lam_i * t))
- sum(F[j-1] * (exp(((j+1)*j/2.0/n - lam_i)*t)-1)
for j in xrange(1, i))))
def sample_bounded_coal(k, n, T):
"""
Sample a coalescent time 't' for 'k' lineages and population 'n'
on the condition that the MRCA is before 'T'
"""
# special case
if k == 2:
return sample_bounded_coal2(n, T)
# this code solves this equation for t
# cdf(t) - p = 0
# where p ~ U(0, 1)
i = k - 1
p = random.random()
# compute constants
lam_i = (i+1)*i/2.0 / n
C = [mrca_const(j, 1, i-1) for j in xrange(1, i)]
A = lam_i / cdf_mrca(T, k, n)
B = sum(C) / lam_i
F = [C[j-1] * exp(-(j+1)*j/2.0 |
ajfranke/subscrape | processtitles.py | Python | gpl-2.0 | 1,317 | 0.006834 | #!/usr/bin/python
import pandas as pd
import numpy as np
import time, datetime
# script to load data as raw csv, and add columns to data frame
infilename = "Homebrewing_titles_1214239837_1424289414.csv"
columns = ['row_id', 'post_id', 'score', 'time','title']
# row is an integer
# post_id is a alphanumeric integer.
# score is an integer
# time is a POSIX tim | estamp, in decimal
# Word is a string consisting of | a single word or symbol
frame = pd.read_csv(infilename, names=columns, sep='\t', header=1)
dayofweek = [datetime.datetime.fromtimestamp(x).isocalendar()[2] for x in frame['time']]
isoweek = [datetime.datetime.fromtimestamp(x).isocalendar()[1] for x in frame['time']]
dayofyear = [datetime.datetime.fromtimestamp(x).utctimetuple().tm_yday for x in frame['time']]
month = [datetime.datetime.fromtimestamp(x).utctimetuple().tm_mon for x in frame['time']]
year = [datetime.datetime.fromtimestamp(x).utctimetuple().tm_year for x in frame['time']]
frame['dayofweek'] = dayofweek
frame['isoweek'] = isoweek
frame['dayofyear'] = dayofyear
frame['month'] = month
frame['year'] = year
has_first = ['first' in str(x).lower() for x in frame['title']]
has_question = [('?' in str(x) or 'question' in str(x).lower()) for x in frame['title']]
frame['has_first'] = has_first
frame['has_question'] = has_question
|
cedrick-f/pySequence | src/lien.py | Python | gpl-3.0 | 36,961 | 0.016217 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
##This file is part of pySequence
#############################################################################
#############################################################################
## ##
## pysequence ##
## ##
#############################################################################
#############################################################################
## Copyright (C) 2014 Cédrick FAURY - Jean-Claude FRICOU
##
## pyS�quence : aide � la construction
## de S�quences et Progressions p�dagogiques
## et � la validation de Projets
# pySequence is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
# pySequence is distributed in the ho | pe that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pySequence; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
module lien
********** | *
"""
import os, sys, subprocess
import wx
import re
from util_path import toFileEncoding, toSystemEncoding, SYSTEM_ENCODING
from widgets import messageErreur, scaleImage, Grammaire, img2str, str2img
import images
from drag_file import *
from util_path import *
from file2bmp import *
# from dpi_aware import *
SSCALE = 1.0
if sys.platform == 'darwin':
def openFolder(path):
subprocess.check_call(['open', '--', path])
elif sys.platform == 'linux2':
def openFolder(path):
subprocess.check_call(['xdg-open', '--', path])
elif sys.platform == 'win32':
def openFolder(path):
# subprocess.Popen(["explorer", path], shell=True)
subprocess.call(['explorer', path.encode(sys.getfilesystemencoding())], shell=True)
####################################################################################
#
# Objet lien vers un fichier, un dossier ou bien un site web
#
####################################################################################
regex = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
class Lien():
def __init__(self, path = "", typ = ""):
self.path = path # Impérativement toujours encod� en FILE_ENCODING !!
self.type = typ # Type de lien ('d' = dossier, 'f' = fichier, 'u' = url)
self.ok = False # Etat du lien (False = lien rompu)
self.abs = False # Lien absolu (défaut = lien relatif)
######################################################################################
def __repr__(self):
return self.type + " : " + toSystemEncoding(self.path)
######################################################################################
def reset(self):
self.path = ""
self.type = ""
self.ok = False
self.abs = False
######################################################################################
def setPath(self, path):
self.path = path
######################################################################################
def __neq__(self, l):
if self.type != l.type:
return True
elif self.path != l.path:
return True
return False
######################################################################################
def __eq__(self, lien):
return self.path == lien.path
######################################################################################
def DialogCreer(self, pathref):
dlg = URLDialog(None, self, pathref)
dlg.ShowModal()
dlg.Destroy()
######################################################################################
def Afficher(self, pathref, fenSeq = None):
""" Lance l'affichage du contenu du lien
<pathref> = chemin de l'application pour déterminer le chemin absolu
"""
t = self.getTexte()
print("Afficher", self.type, self.path)
path = self.GetAbsPath(pathref)
# print " ", path
# print " ", path.decode("unicode-escape")
# print " ", path.encode(sys.getfilesystemencoding())
if self.type == "f":
if os.path.exists(path):
try:
os.startfile(path)
except:
messageErreur(None, "Ouverture impossible",
"Impossible d'ouvrir le fichier\n\n%s\n" %toSystemEncoding(path))
else:
messageErreur(None, "Chemin non trouvé",
"Le fichiern'a pas été trouvé\n\n%s" %toSystemEncoding(path))
elif self.type == 'd':
if os.path.isdir(path):
openFolder(path)
# try:
# # subprocess.Popen(["explorer", path])
#
# except:
# messageErreur(None, u"Ouverture impossible",
# u"Impossible d'acc�der au dossier\n\n%s\n" %toSystemEncoding(path))
else:
messageErreur(None, "Chemin non trouvé",
"Le dossiern'a pas été trouvé\n\n%s" %toSystemEncoding(path))
elif self.type == 'u':
try:
webbrowser.open(self.path)
except:
messageErreur(None, "Ouverture impossible",
"Impossible d'ouvrir l'url\n\n%s\n" %toSystemEncoding(self.path))
elif self.type == 's':
if os.path.isfile(path):
# self.Show(False)
child = fenSeq.commandeNouveau()
child.ouvrir(path)
######################################################################################
def isOk(self):
self.EvalTypeLien()
return self.ok
######################################################################################
def EvalTypeLien(self, pathref = ""):
""" Evaluation du de self.lien.path
par rapport à pathref
et attribue un type
"""
# print("EvalTypeLien\n ", self.path, "\n ", pathref)
abspath = self.GetAbsPath(pathref)
if os.path.exists(abspath):
if os.path.isfile(abspath):
self.type = 'f'
elif os.path.isdir(abspath):
self.type = 'd'
# if not self.abs:
# self.path = relpath
# else:
# self.path = abspath
self.ok = True
elif re.match(regex, self.path):
self.type = 'u'
self.ok = True
else:
self.type = ''
self.ok = False
return
######################################################################################
def EvalLien(self, path, pathref):
""" Teste la validité du chemin <path> (SYSTEM_ENCODING)
par rapport au dossier de référence <pathref> (FILE_ENCODING)
et change self.path (FILE_ENCODIN |
tgquintela/pySpatialTools | pySpatialTools/tests/test_api.py | Python | mit | 96 | 0 |
"""
test api
--------
test for api functions to improve usability.
| """
def test(): |
pass
|
OCA/account-invoicing | account_move_exception/wizard/account_exception_confirm.py | Python | agpl-3.0 | 724 | 0 | # Copyright 2021 ForgeFlow (http://www.forgeflow.com)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo import fields, models
class AccountExceptionConfirm(models.TransientModel):
_name = "account.exception.confi | rm"
_description = "Account exception wizard"
_inherit = ["exception.rule.confirm"]
related_model_id = fields.Many2one(
comodel_name="account.move", string="Journal Entry"
)
def action_confirm(self):
self.ensure_one()
if self.ignore:
self.related_model_id.button_draft()
self.related_model_id.ignore_exception = True
self.related_m | odel_id.action_post()
return super().action_confirm()
|
Bideau/SmartForrest | RaspberryPi/dataBase/oldmysql/testmultitarg.py | Python | mit | 651 | 0.012289 |
def test(*args):
if len(args) == 2:
print "le premier argument est : " + str(args[0])
print "le second arguement est : " + str(args[1])
| elif len(args) == 3:
print "le premier argument est : " + str(args[0])
print "le second arguement est : " + str(args[1])
print "le second arguement est : " + str(args[2])
else:
print "bad parameter"
#programme
arg1 = "toto"
arg2 = "titi"
arg3 = "tata"
arg4 = "rien"
print "debit des test"
print "test 1"
test()
print "test 2"
| test(arg1)
print "test 3"
test(arg1,arg2)
print "test 4"
test(arg1,arg2,arg3)
print "test 5"
test(arg1,arg2,arg3,arg4) |
UTSA-ICS/keystone-kerberos | keystone/tests/unit/test_ldap_tls_livetest.py | Python | apache-2.0 | 4,402 | 0.000227 | # Copyright 2013 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ldap
import ldap.modlist
from oslo_config import cfg
from keystone import exception
from keystone import identity
from keystone.tests import unit as tests
from keystone.tests.unit import test_ldap_livetest
CONF = cfg.CONF
def create_object(dn, attrs):
conn = ldap.initialize(CONF.ldap.url)
conn.simple_bind_s(CONF.ldap.user, CONF.ldap.password)
ldif = ldap.modlist.addModlist(attrs)
conn.add_s(dn, ldif)
conn.unbind_s()
class LiveTLSLDAPIdentity(test_ldap_livetest.LiveLDAPIdentity):
def _ldap_skip_live(self):
self.skip_if_env_not_set('ENABLE_TLS_LDAP_LIVE_TEST')
def config_files(self):
config_files = super(LiveTLSLDAPIdentity, self).config_files()
config_files.append(tests.dirs.tests_conf('backend_tls_liveldap.conf'))
return config_files
def config_overrides(self):
super(LiveTLSLDAPIdentity, self).config_overrides()
self.config_fixture.config(
group='identity',
driver='keystone.identity.backends.ldap.Identity')
def test_tls_certfile_demand_option(self):
self.config_fixture.config(group='ldap',
use_tls=True,
tls_cacertdir=None,
tls_req_cert='demand')
self.identity_api = identity.backends.ldap.Identity()
user = {'name': 'fake1',
'password': 'fakepass1',
'tenants': ['bar']}
user = self.identity_api.create_user('user')
user_ref = self.identity_api.get_user(user['id'])
self.assertEqual(user['id'], user_ref['id'])
user['password'] = 'fakepass2'
self.identity_api.update_user(user['id'], user)
self.identity_api.delete_user(user['id'])
self.assertRaises(exception.UserNotFound, self.identity_api.get_user,
user['id'])
def test_tls_certdir_demand_option(self):
self.config_fixture.config(group='ldap',
use_tls=True,
tls_cacertdir=None,
tls_req_cert='demand')
self.identity_api = identity.backends.ldap.Identity()
user = {'id': 'fake1',
| 'name': 'fake1',
'password': 'fakepass1',
'tenants': ['bar']}
self.identity_api.create_user('fake1', user)
user_ref = self.identity_api.get_user('fake1')
self.assertEqual('fake1', user_ref['id'])
user['password'] = 'fakepass2'
self.identity_api.update_user('fake1', user)
| self.identity_api.delete_user('fake1')
self.assertRaises(exception.UserNotFound, self.identity_api.get_user,
'fake1')
def test_tls_bad_certfile(self):
self.config_fixture.config(
group='ldap',
use_tls=True,
tls_req_cert='demand',
tls_cacertfile='/etc/keystone/ssl/certs/mythicalcert.pem',
tls_cacertdir=None)
self.identity_api = identity.backends.ldap.Identity()
user = {'name': 'fake1',
'password': 'fakepass1',
'tenants': ['bar']}
self.assertRaises(IOError, self.identity_api.create_user, user)
def test_tls_bad_certdir(self):
self.config_fixture.config(
group='ldap',
use_tls=True,
tls_cacertfile=None,
tls_req_cert='demand',
tls_cacertdir='/etc/keystone/ssl/mythicalcertdir')
self.identity_api = identity.backends.ldap.Identity()
user = {'name': 'fake1',
'password': 'fakepass1',
'tenants': ['bar']}
self.assertRaises(IOError, self.identity_api.create_user, user)
|
devilry/devilry-django | devilry/devilry_import_v2database/tests/test_modelimporters/test_deliveryimporter.py | Python | bsd-3-clause | 9,808 | 0.002243 | import unittest
from django import test
from django.conf import settings
from model_bakery import baker
from devilry.devilry_group.models import FeedbackSet, GroupComment
from devilry.devilry_import_v2database.modelimporters. | delivery_feedback_importers import DeliveryImporter
from .importer_testcase_mixin import ImporterTest | CaseMixin
@unittest.skip('Not relevant anymore, keep for history.')
class TestDeliveryImporterImporter(ImporterTestCaseMixin, test.TestCase):
def _create_model_meta(self):
return {
'model_class_name': 'Delivery',
'max_id': 143,
'app_label': 'core'
}
def _create_delivery_dict(self, feedback_set, candidate_id=None):
return {
'pk': 3,
'model': 'core.delivery',
'fields': {
'delivery_type': 0,
'alias_delivery': None,
'successful': True,
'number': 1,
'delivered_by': candidate_id,
'last_feedback': 3,
'deadline': feedback_set.id,
'copy_of': None,
'time_of_delivery': '2016-04-10T07:04:00'
},
}
def test_importer(self):
test_student_user = baker.make(settings.AUTH_USER_MODEL)
test_group = baker.make('core.AssignmentGroup')
candidate = baker.make('core.Candidate',
assignment_group=test_group,
relatedstudent__user=test_student_user,
relatedstudent__period=test_group.parentnode.parentnode)
test_feedbackset = baker.make('devilry_group.FeedbackSet', group=test_group)
self.create_v2dump(
model_name='core.delivery',
data=self._create_delivery_dict(
feedback_set=test_feedbackset,
candidate_id=candidate.id),
)
DeliveryImporter(input_root=self.temp_root_dir).import_models()
self.assertEqual(FeedbackSet.objects.count(), 1)
self.assertEqual(GroupComment.objects.count(), 1)
def test_importer_pk(self):
test_student_user = baker.make(settings.AUTH_USER_MODEL)
test_group = baker.make('core.AssignmentGroup')
candidate = baker.make('core.Candidate',
assignment_group=test_group,
relatedstudent__user=test_student_user,
relatedstudent__period=test_group.parentnode.parentnode)
test_feedbackset = baker.make('devilry_group.FeedbackSet', group=test_group)
self.create_v2dump(
model_name='core.delivery',
data=self._create_delivery_dict(
feedback_set=test_feedbackset,
candidate_id=candidate.id)
)
DeliveryImporter(input_root=self.temp_root_dir).import_models()
comment = GroupComment.objects.first()
self.assertEqual(comment.pk, 3)
self.assertEqual(comment.id, 3)
def test_importer_feedback_set(self):
test_student_user = baker.make(settings.AUTH_USER_MODEL)
test_group = baker.make('core.AssignmentGroup')
candidate = baker.make('core.Candidate',
assignment_group=test_group,
relatedstudent__user=test_student_user,
relatedstudent__period=test_group.parentnode.parentnode)
test_feedbackset = baker.make('devilry_group.FeedbackSet', group=test_group)
self.create_v2dump(
model_name='core.delivery',
data=self._create_delivery_dict(
feedback_set=test_feedbackset,
candidate_id=candidate.id)
)
DeliveryImporter(input_root=self.temp_root_dir).import_models()
comment = GroupComment.objects.first()
self.assertEqual(comment.feedback_set, test_feedbackset)
def test_importer_text(self):
test_student_user = baker.make(settings.AUTH_USER_MODEL)
test_group = baker.make('core.AssignmentGroup')
candidate = baker.make('core.Candidate',
assignment_group=test_group,
relatedstudent__user=test_student_user,
relatedstudent__period=test_group.parentnode.parentnode)
test_feedbackset = baker.make('devilry_group.FeedbackSet', group=test_group)
self.create_v2dump(
model_name='core.delivery',
data=self._create_delivery_dict(
feedback_set=test_feedbackset,
candidate_id=candidate.id)
)
DeliveryImporter(input_root=self.temp_root_dir).import_models()
comment = GroupComment.objects.first()
self.assertEqual(comment.text, 'Delivery')
def test_importer_comment_type(self):
test_student_user = baker.make(settings.AUTH_USER_MODEL)
test_group = baker.make('core.AssignmentGroup')
candidate = baker.make('core.Candidate',
assignment_group=test_group,
relatedstudent__user=test_student_user,
relatedstudent__period=test_group.parentnode.parentnode)
test_feedbackset = baker.make('devilry_group.FeedbackSet', group=test_group)
self.create_v2dump(
model_name='core.delivery',
data=self._create_delivery_dict(
feedback_set=test_feedbackset,
candidate_id=candidate.id)
)
DeliveryImporter(input_root=self.temp_root_dir).import_models()
comment = GroupComment.objects.first()
self.assertEqual(comment.comment_type, GroupComment.COMMENT_TYPE_GROUPCOMMENT)
def test_importer_comment_part_of_grading_false(self):
test_student_user = baker.make(settings.AUTH_USER_MODEL)
test_group = baker.make('core.AssignmentGroup')
candidate = baker.make('core.Candidate',
assignment_group=test_group,
relatedstudent__user=test_student_user,
relatedstudent__period=test_group.parentnode.parentnode)
test_feedbackset = baker.make('devilry_group.FeedbackSet', group=test_group)
self.create_v2dump(
model_name='core.delivery',
data=self._create_delivery_dict(
feedback_set=test_feedbackset,
candidate_id=candidate.id)
)
DeliveryImporter(input_root=self.temp_root_dir).import_models()
comment = GroupComment.objects.first()
self.assertFalse(comment.part_of_grading)
def test_importer_user(self):
test_student_user = baker.make(settings.AUTH_USER_MODEL)
test_group = baker.make('core.AssignmentGroup')
candidate = baker.make('core.Candidate',
assignment_group=test_group,
relatedstudent__user=test_student_user,
relatedstudent__period=test_group.parentnode.parentnode)
test_feedbackset = baker.make('devilry_group.FeedbackSet', group=test_group)
self.create_v2dump(
model_name='core.delivery',
data=self._create_delivery_dict(
feedback_set=test_feedbackset,
candidate_id=candidate.id)
)
DeliveryImporter(input_root=self.temp_root_dir).import_models()
comment = GroupComment.objects.first()
self.assertEqual(comment.user, test_student_user)
def test_importer_user_role(self):
test_student_user = baker.make(settings.AUTH_USER_MODEL)
test_group = baker.make('core.AssignmentGroup')
candidate = baker.make('core.Candidate',
assignment_group=test_group,
relatedstudent__user=test_student_user,
relatedstudent__period=test_group.parentnode.parentnode)
test_feedbackset = baker.make('devilry_group.FeedbackSet', group=test_group)
self.create_v2dump(
model_name='core.delivery',
data=self._cr |
mor1/pyrt | ospf.py | Python | gpl-2.0 | 26,560 | 0.012387 | #! /usr/bin/env python2.5
## PyRT: Python Routeing Toolkit
## OSPF module: provides the OSPF listener and OSPF PDU parsers
## Copyright (C) 2010 Richard Mortier <mort@cantab.net>
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
## 02111-1307 USA
# RFC 1584 -- MOSPF
# RFC 2328 -- OSPF v2
# RFC 2370 -- Opaque LSAs (updated by RFC 3670)
# [ This is such a mess compared with IS-IS! Opaque LSAs have a
# different LSA header format due to the need to encode an Opaque
# LSA type ]
# RFC 2676 -- QoS routing mechanisms
# RFC 3101 -- Not-so-stubby-area (NSSA) option
# RFC 3137 -- Stu | b routers (where metric == 0xffffff > LSInfinity, 0xffff)
# RFC 3623 -- Graceful restart
# RFC 3630 -- Traffic engineering extensions
## LSUPD/LSA notes:
# router id:
# the IP address of the router that generated the packet
# advrtr:
# the IP address of the advertising router
# src:
# the IP address of th | e interface from which the LSUPD came
# link state id (lsid):
# identifier for this link (interface) dependent on type of LSA:
# 1 (router) ID of router generating LSA
# 2 (network) IP address of DR for LAN
# 3 (summary IP) IP address of link reported as dst
# 4 (summary ASBR) IP address of reachable ASBR
# 5 (external AS) IP address of link reported as dst
# link id:
# what is connected to this router by this link, dependent on type
# 1 (p2p) ID of neighbour router
# 2 (transit) IP address of DR for LAN
# 3 (stub) IP address of LAN (no DR since a stub network)
# 4 (virtual) ID of neighbour router
# link data:
# subnet mask if lsid==3; else IP address of the router that
# generated the LSA on the advertised link (~= advrtr?)
# summary LSA:
# created by ASBR and flooded into area; type 3 report cost to
# prefix outside area, type 4 report cost to ASBR
import struct, socket, sys, math, getopt, string, os.path, time, select, traceback
from mutils import *
#-------------------------------------------------------------------------------
INDENT = " "
VERSION = "2.9"
RECV_BUF_SZ = 8192
OSPF_LISTEN_PORT = 89
LS_INFINITY = 0xffff
LS_STUB_RTR = 0xffffff
IP_HDR = "> BBH HH BBH LL"
IP_HDR_LEN = struct.calcsize(IP_HDR)
OSPF_HDR = "> BBH L L HH L L"
OSPF_HDR_LEN = struct.calcsize(OSPF_HDR)
OSPF_HELLO = "> L HBB L L L"
OSPF_HELLO_LEN = struct.calcsize(OSPF_HELLO)
OSPF_DESC = "> HBB L "
OSPF_DESC_LEN = struct.calcsize(OSPF_DESC)
OSPF_LSREQ = "> L L L"
OSPF_LSREQ_LEN = struct.calcsize(OSPF_LSREQ)
OSPF_LSUPD = "> L"
OSPF_LSUPD_LEN = struct.calcsize(OSPF_LSUPD)
OSPF_LSAHDR = "> HBB L L L HH"
OSPF_LSAHDR_LEN = struct.calcsize(OSPF_LSAHDR)
OSPF_LSARTR = "> BBH"
OSPF_LSARTR_LEN = struct.calcsize(OSPF_LSARTR)
OSPF_LSANET = "> L"
OSPF_LSANET_LEN = struct.calcsize(OSPF_LSANET)
OSPF_LINK = "> L L BBH"
OSPF_LINK_LEN = struct.calcsize(OSPF_LINK)
OSPF_METRIC = "> BBH"
OSPF_METRIC_LEN = struct.calcsize(OSPF_METRIC)
OSPF_LSASUMMARY = "> L"
OSPF_LSASUMMARY_LEN = struct.calcsize(OSPF_LSASUMMARY)
OSPF_LSAEXT = "> L"
OSPF_LSAEXT_LEN = struct.calcsize(OSPF_LSAEXT)
OSPF_LSAEXT_METRIC = "> BBH L L"
OSPF_LSAEXT_METRIC_LEN = struct.calcsize(OSPF_LSAEXT_METRIC)
################################################################################
DLIST = []
ADDRS = { str2id("224.0.0.5"): "AllSPFRouters",
str2id("224.0.0.6"): "AllDRouters",
}
DLIST += [ADDRS]
AFI_TYPES = { 1L: "IP",
2L: "IP6",
}
DLIST += [AFI_TYPES]
MSG_TYPES = { 1L: "HELLO",
2L: "DBDESC",
3L: "LSREQ",
4L: "LSUPD",
5L: "LSACK",
}
DLIST += [MSG_TYPES]
AU_TYPES = { 0L: "NULL",
1L: "PASSWD",
2L: "CRYPTO",
}
DLIST += [AU_TYPES]
LSA_TYPES = { 1L: "ROUTER", # links between routers in the area
2L: "NETWORK", # links between "networks" in the area
3L: "SUMMARY (IP)", # networks rechable outside area; gen. by ASBR
4L: "SUMMARY (ASBR)", # ASBRs reachable outside area; gen. by (local) ASBR
5L: "EXTERNAL AS", # prefixes reachable outside the AS; gen. by (local) ASBR
6L: "MOSPF",
7L: "NSSA",
9L: "OPAQUE LINK LOCAL",
10L: "OPAQUE AREA LOCAL",
11L: "OPAQUE AS LOCAL",
}
DLIST += [LSA_TYPES]
OPAQUE_TYPES = { 1L: "TRAFFIC ENGINEERING",
3L: "GRACEFUL RESTART",
}
DLIST += [OPAQUE_TYPES]
TE_TLV_TS = { 1L: "ROUTER ADDRESS",
2L: "LINK",
}
DLIST += [TE_TLV_TS]
TE_TLV_LS = { 1L: 4,
2L: 0, ## variable
}
TE_LINK_SUBTYPES = { 1L: "TYPE",
2L: "ID",
3L: "LOCAL IF",
4L: "REMOTE IF",
5L: "TE METRIC",
6L: "MAX BW",
7L: "MAX RSVBL BW",
8L: "UNRSVD BW",
9L: "ADMIN GROUP",
}
DLIST += [TE_LINK_SUBTYPES]
TE_LINK_SUBTYPE_LS = { 1L: 1,
2L: 4,
3L: 4,
4L: 4,
5L: 4,
6L: 4,
7L: 4,
8L: 32,
9L: 4,
}
GRACE_TLV_TS = { 1L: "PERIOD",
2L: "REASON",
3L: "IP ADDR",
}
DLIST += [GRACE_TLV_TS]
GRACE_REASONS = { 0L: "UNKNOWN",
1L: "SW RESTART",
2L: "SW RELOAD/UPGRADE",
3L: "SWITCH REDUNDANT RCP",
}
DLIST += [GRACE_REASONS]
GRACE_TLV_LS = { 1L: 4,
2L: 1,
3L: 4,
}
RTR_LINK_TYPE = { 1L: "P2P",
2L: "TRANSIT",
3L: "STUB",
4L: "VIRTUAL",
}
DLIST += [RTR_LINK_TYPE]
for d in DLIST:
for k in d.keys():
d[ d[k] ] = k
################################################################################
def parseIpHdr(msg, verbose=1, level=0):
if verbose > 1: print prtbin(level*INDENT, msg[:IP_HDR_LEN])
(verhlen, tos, iplen, ipid, frag, ttl, proto, cksum, src, dst) =\
struct.unpack(IP_HDR, msg)
ver = (verhlen & 0xf0) >> 4
hlen = (verhlen & 0x0f) * 4
if verbose > 0:
print level*INDENT +\
"IP (len=%d)" % len(msg)
print (level+1)*INDENT +\
"ver:%s, hlen:%s, tos:%s, len:%s, id:%s, frag:%s, ttl:%s, prot:%s, cksm:%x" %\
(ver, hlen, int2bin(tos), iplen, ipid, frag, ttl, proto, cksum)
print (level+1)*INDENT +\
"src:%s, dst:%s" % (id2str(src), id2str(dst))
return { "VER" : ver,
"HLEN" : hlen,
"TOS" : tos,
"IPLEN" : iplen,
"IPID" : ipid,
"FRAG" : frag,
"TTL" : ttl,
"PROTO" : proto,
"CKSUM" : cksum,
"SRC" : src,
"DST" : dst
}
def parseOspfHdr(msg, verbose=1, level=0):
if verbose > 1: print prtbin(level*INDENT, msg[:OSPF_HDR_LEN])
(ver, typ, len, rid, aid, cksum, autype, auth1, auth2) = struct.unpack(OSPF_HDR, msg)
if verbose > 0 |
tanmoy7989/idp | runmd.py | Python | gpl-3.0 | 5,497 | 0.019465 | #!/usr/bin/env python
import os, sys
import numpy as np
import pickleTraj
import sim
Verbose = False
DelTempFiles = True
useREMD = True
# Basic params
TempSet = 300.0
FORCEFIELD_FILE = None
Prefix = 'modtrj'
BoxL = None
# MD settings
StepsMin = 1000
StepsEquil = 500000
StepsProd = 5000000
StepFreq = 100
StepsSwap = 500
TimeStep = 1.0 # timestep in femtoseconds
# REMD settings
Temps = None
HighTemp = 600
NCores = 8
StepsSwap = 1000
# Lammps settings
LammpsExec = os.path.expanduser('~/mysoftware/tanmoy_lammps/lammps-15May15/src/lmp_ZIN')
def ReorderTraj(Sys, TrajFile, LogFile, TempInd = 0, MultiTrajFn = None, MultiEneFn = None):
RepInds = [np.where(x[1:] == TempInd)[0][0] for x in np.loadtxt(LogFile, skiprows = 3)]
TrajList = []
MultiEne = []
this_Traj = {}; this_Ene = {}
if StepFreq <= StepsSwap:
for ii, i in enumerate(RepInds):
if not i in this_Traj.keys():
this_Traj[i] = sim.traj.Lammps(TrajFile + '.%d' % i, LogFile = LogFile + '.%d' % i, LogFileToken = '#run production')
tmpInit = this_Traj[i][0]
this_Ene[i] = this_Traj[i].ThermoDict['PEnergy']
start = ii * StepsSwap / StepFreq # assume mod(StepsSwap, StepFreq) = 0
stop = (ii + 1) * StepsSwap / StepFreq
TrajList.append(this_Traj[i][start:stop])
MultiEne.extend(this_Ene[i][start:stop])
else:
NSkip = StepFreq / StepsSwap # assume mod(StepFreq, StepsSwap) = 0
for ii, i in enumerate(RepInds[0::NSkip]):
if not i in this_Traj.keys():
this_Traj[i] = sim.traj.Lammps(TrajFile + '.%d' % i, LogFile = LogFile + '.%d' % i, LogFileToken = '#run production')
tmpInit = this_Traj[i][0]
this_Ene[i] = this_Traj[i].ThermoDict['PEnergy']
TrajList.append(this_Traj[i][ii:ii+1])
MultiEne.append(this_Ene[i][ii])
MultiTraj = sim.traj.Multi(TrajList, Sys)
if MultiTrajFn is None: MultiTrajFn = Prefix + '_remd.lammpstrj.gz'
if MultiEneFn is None: MultiEneFn = Prefix + '_remd.ene.dat'
sim.traj.base.Convert(InTraj = MultiTraj, OutTrajClass = sim.traj. | LammpsWrite, FileName = MultiTrajFn)
np.savetxt(MultiEneFn, MultiEne, fmt = '%11.4e')
return MultiTraj, MultiEne, MultiTrajFn, MultiEneFn
def ReorderAllTraj(Sys, TrajFile, LogFile):
TrajFnList = []
EneFnList = []
for i, t in enumerate(Temps):
print 'Reordering replica at temperature ', t
this_Traj, this_Ene, this_TrajFn, this_EneFn = ReorderTraj(Sys = Sys, TempInd = i | , TrajFile = TrajFile, LogFile = LogFile,
MultiTrajFn = Prefix + '.lammpstrj.gz.%3.2f' % t,
MultiEneFn = Prefix + '.ene.dat.%3.2f' % t)
TrajFnList.append(this_TrajFn)
EneFnList.append(this_EneFn)
return TrajFnList, EneFnList
def runMD(Sys):
global Prefix, INITPARAMFILE
global TempSet, Temps, HighTemp, NCores
global StepsMin, StepsEquil, StepsProd, StepsSwap, Verbose
# system params
Sys.TempSet = TempSet
if not BoxL is None: Sys.BoxL = BoxL
if Temps is None:
Temps = np.logspace(np.log10(TempSet), np.log10(HighTemp), NCores)
np.savetxt(os.path.join(os.getcwd(), 'temps.txt'), Temps)
# load in the forcefield
if not FORCEFIELD_FILE is None:
Sys.ForceField.SetParamString(file(FORCEFIELD_FILE).read())
else: raise IOError('ForceField not supplied')
# Lammps settings
sim.export.lammps.LammpsExec = LammpsExec
sim.srel.base.DiffEneFracTol = 0.1 # relaxed tolerance to prevent crash due to lammps-sim mismatch
#REMD settings
if useREMD:
sim.export.lammps_REMD.TEMPS = Temps
sim.export.lammps_REMD.HighTemp = HighTemp
sim.export.lammps_REMD.NCores = NCores
sim.export.lammps_REMD.NStepsSwap = StepsSwap
makeFunc = sim.export.lammps_REMD.MakeLammpsReplicaMD
runFunc = sim.export.lammps_REMD.RunLammpsReplica
else:
makeFunc = sim.export.lammps.MakeLammpsMD
runFunc = sim.export.lammps.RunLammps
# run
LammpsFiles, TrajFile = makeFunc(Sys, Prefix = Prefix, TrajFile = '.lammpstrj.gz',
NStepsMin = StepsMin, NStepsEquil = StepsEquil,
NStepsProd = StepsProd, WriteFreq = StepFreq)
InFile, DataFile, TableFile, DihedralFile = LammpsFiles
if useREMD:
LogFile, ScreenFile, returncode = runFunc(InFile, Prefix = Prefix, Verbose = Verbose)
#LogFile = Prefix + 'lammps.log' ; ScreenFile = Prefix + 'screen'
TrajFiles, EneFiles = ReorderAllTraj(Sys, TrajFile, LogFile)
ret = (TrajFiles, EneFiles, LogFile)
else:
LogFile, returncode = runFunc(InFile, Prefix = Prefix, Verbose = Verbose)
ret = (TrajFile, LogFile)
if DelTempFiles:
for i in InFile, DataFile, TableFile, DihedralFile, ScreenFile:
if os.path.isfile(i): os.remove(i)
for i in range(len(Temps)):
this_screenfile = Prefix + 'screen.%d' % i
this_logfile = Prefix + 'lammps.log.%d' % i
this_trajfile = Prefix + 'lammps.trj.%d' % i
for i in this_screenfile, this_logfile, this_trajfile:
if os.path.isfile(i): os.remove(i)
return ret
|
elba7r/system | erpnext/stock/doctype/landed_cost_voucher/landed_cost_voucher.py | Python | gpl-3.0 | 4,474 | 0.023692 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import flt
from frappe.model.document import Document
from erpnext.stock.doctype.serial_no.serial_no import get_serial_nos
class LandedCostVoucher(Document):
def get_items_from_purchase_receipts(self):
self.set("items", [])
for pr in self.get("purchase_receipts"):
if pr.receipt_document_type and pr.receipt_document:
pr_items = frappe.db.sql("""select pr_item.item_code, pr_item.description,
pr_item.qty, pr_item.base_rate, pr_item.base_amount, pr_item.name
from `tab{doctype} Item` pr_item where parent = %s
and exists(select name from tabItem where name = pr_item.item_code and is_stock_item = 1)
""".format(doctype=pr.receipt_document_type), pr.receipt_document, as_dict=True)
for d in pr_items:
item = self.append("items")
item.item_code = d.item_code
item.description = d.description
item.qty = d.qty
item.rate = d.base_rate
item.amount = d.base_amount
item.receipt_document_type = pr.receipt_document_type
item.receipt_document = pr.receipt_document
item.purchase_receipt_item = d.name
if self.get("taxes"):
self.set_applicable_charges_for_item()
def validate(self):
self.check_mandatory()
self.validate_purchase_receipts()
self.set_total_taxes_and_charges()
if not self.get("items"):
self.get_items_from_purchase_receipts()
else:
self.set_applicable_charges_for_item()
def check_mandatory(self):
if not self.get("purchase_receipts"):
frappe.throw(_("Please enter Receipt Document"))
if not self.get("taxes"):
frappe.throw(_("Please enter Taxes and Charges"))
def validate_purchase_receipts(self):
receipt_documents = []
fo | r d in self.get("purchase_receipts"):
if frappe.db.get_value(d.receipt_document_type, d.receipt_document, "docstatus") != 1:
frappe.throw(_("Receipt document must be submitted"))
else:
receipt_documents.append(d.receipt_document)
for item in self.get | ("items"):
if not item.receipt_document:
frappe.throw(_("Item must be added using 'Get Items from Purchase Receipts' button"))
elif item.receipt_document not in receipt_documents:
frappe.throw(_("Item Row {idx}: {doctype} {docname} does not exist in above '{doctype}' table")
.format(idx=item.idx, doctype=item.receipt_document_type, docname=item.receipt_document))
def set_total_taxes_and_charges(self):
self.total_taxes_and_charges = sum([flt(d.amount) for d in self.get("taxes")])
def set_applicable_charges_for_item(self):
based_on = self.distribute_charges_based_on.lower()
total = sum([flt(d.get(based_on)) for d in self.get("items")])
if not total:
frappe.throw(_("Total {0} for all items is zero, may you should change 'Distribute Charges Based On'").format(based_on))
for item in self.get("items"):
item.applicable_charges = flt(item.get(based_on)) * flt(self.total_taxes_and_charges) / flt(total)
def on_submit(self):
self.update_landed_cost()
def on_cancel(self):
self.update_landed_cost()
def update_landed_cost(self):
for d in self.get("purchase_receipts"):
doc = frappe.get_doc(d.receipt_document_type, d.receipt_document)
# set landed cost voucher amount in pr item
doc.set_landed_cost_voucher_amount()
# set valuation amount in pr item
doc.update_valuation_rate("items")
# save will update landed_cost_voucher_amount and voucher_amount in PR,
# as those fields are allowed to edit after submit
doc.save()
# update latest valuation rate in serial no
self.update_rate_in_serial_no(doc)
# update stock & gl entries for cancelled state of PR
doc.docstatus = 2
doc.update_stock_ledger(allow_negative_stock=True, via_landed_cost_voucher=True)
doc.make_gl_entries_on_cancel(repost_future_gle=False)
# update stock & gl entries for submit state of PR
doc.docstatus = 1
doc.update_stock_ledger(via_landed_cost_voucher=True)
doc.make_gl_entries()
def update_rate_in_serial_no(self, receipt_document):
for item in receipt_document.get("items"):
if item.serial_no:
serial_nos = get_serial_nos(item.serial_no)
if serial_nos:
frappe.db.sql("update `tabSerial No` set purchase_rate=%s where name in ({0})"
.format(", ".join(["%s"]*len(serial_nos))), tuple([item.valuation_rate] + serial_nos))
|
datacratic/Diamond-old | src/collectors/openstackswift/openstackswift.py | Python | mit | 3,925 | 0.00051 | # coding=utf-8
"""
Openstack swift collector.
#### Dependencies
* swift-dispersion-report commandline tool (for dispersion report)
if using this, make sure swift.conf and dispersion.conf are reable by diamond
also get an idea of the runtime of a swift-dispersion-report call and make
sure the collect interval is high enough to avoid contention.
* swift commandline tool (for container_metrics)
both of these should come installed with swift
"""
import diamond.collector
from subprocess import Popen, PIPE
try:
import json
json # workaround for pyflakes issue #13
except ImportError:
import simplejson as json
class OpenstackSwiftCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(OpenstackSwiftCollector,
self).get_default_config_help()
config_help.update({
'enable_dispersion_report': 'gather swift-dispersion-report metrics'
+ ' (default False)',
'enable_container_metrics': 'gather containers metrics'
+ '(# objects, bytes used, x_timestamp. default True)',
'auth_url': 'authentication url (for enable_container_metrics)',
'accoun | t': 'swift auth acco | unt (for enable_container_metrics)',
'user': 'swift auth user (for enable_container_metrics)',
'password': 'swift auth password (for enable_container_metrics)',
'containers': 'containers on which to count number of objects, '
+ 'space separated list (for enable_container_metrics)'
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(OpenstackSwiftCollector, self).get_default_config()
config.update({
'path': 'openstackswift',
'enable_dispersion_report': False,
'enable_container_metrics': True,
# don't use the threaded model with this one.
# for some reason it crashes.
'interval': 1200, # by default, every 20 minutes
})
return config
def collect(self):
# dispersion report. this can take easily >60s. beware!
if (self.config['enable_dispersion_report']):
p = Popen(
['swift-dispersion-report', '-j'],
stdout=PIPE,
stderr=PIPE)
stdout, stderr = p.communicate()
self.publish('dispersion.errors', len(stderr.split('\n')) - 1)
data = json.loads(stdout)
for t in ('object', 'container'):
for (k, v) in data[t].items():
self.publish('dispersion.%s.%s' % (t, k), v)
# container metrics returned by stat <container>
if(self.config['enable_container_metrics']):
account = '%s:%s' % (self.config['account'], self.config['user'])
for container in self.config['containers'].split(','):
cmd = ['swift', '-A', self.config['auth_url'],
'-U', account,
'-K', self.config['password'],
'stat', container]
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
stats = {}
# stdout is some lines in 'key : val' format
for line in stdout.split('\n'):
if line:
line = line.split(':', 2)
stats[line[0].strip()] = line[1].strip()
key = 'container_metrics.%s.%s' % (self.config['account'],
container)
self.publish('%s.objects' % key, stats['Objects'])
self.publish('%s.bytes' % key, stats['Bytes'])
self.publish('%s.x_timestamp' % key, stats['X-Timestamp'])
|
ericholscher/django | django/conf/urls/__init__.py | Python | bsd-3-clause | 2,589 | 0.003476 | from importlib import import_module
from django.core.urlresolvers import (RegexURLPattern,
RegexURLResolver, LocaleRegexURLResolver)
from django.core.exceptions import ImproperlyConfigured
from django.utils import six
__all__ = ['handler400', 'handler403', 'handler404', 'handler500', 'include', 'patterns', 'url']
handler400 = 'django.views.defaults.bad_request'
handler403 = 'django.views.defaults.permission_denied'
handler404 = 'django.views.defaults.page_not_found'
handler500 = 'django.views.defaults.server_error'
def include(arg, namespace=None, app_name=None):
if isinstance(arg, tuple):
| # callable returning a namespace hint
if namespace:
raise ImproperlyConfigured('Cannot override the namespace for a dynamic module that provides a namespace')
urlconf_module, app_name, namespace = arg
else:
# | No namespace hint - use manually provided namespace
urlconf_module = arg
if isinstance(urlconf_module, six.string_types):
urlconf_module = import_module(urlconf_module)
patterns = getattr(urlconf_module, 'urlpatterns', urlconf_module)
# Make sure we can iterate through the patterns (without this, some
# testcases will break).
if isinstance(patterns, (list, tuple)):
for url_pattern in patterns:
# Test if the LocaleRegexURLResolver is used within the include;
# this should throw an error since this is not allowed!
if isinstance(url_pattern, LocaleRegexURLResolver):
raise ImproperlyConfigured(
'Using i18n_patterns in an included URLconf is not allowed.')
return (urlconf_module, app_name, namespace)
def patterns(prefix, *args):
pattern_list = []
for t in args:
if isinstance(t, (list, tuple)):
t = url(prefix=prefix, *t)
elif isinstance(t, RegexURLPattern):
t.add_prefix(prefix)
pattern_list.append(t)
return pattern_list
def url(regex, view, kwargs=None, name=None, prefix=''):
if isinstance(view, (list, tuple)):
# For include(...) processing.
urlconf_module, app_name, namespace = view
return RegexURLResolver(regex, urlconf_module, kwargs, app_name=app_name, namespace=namespace)
else:
if isinstance(view, six.string_types):
if not view:
raise ImproperlyConfigured('Empty URL pattern view name not permitted (for pattern %r)' % regex)
if prefix:
view = prefix + '.' + view
return RegexURLPattern(regex, view, kwargs, name)
|
sffjunkie/hiss | src/hiss/handler/kodi/jsonrpc/__init__.py | Python | apache-2.0 | 145 | 0.013793 | # Copyright (c) 2014 Simon Kennedy <sffjunkie+code@gmail.com>.
class RPCError(Exception):
pa | ss
class RPCResponseError(RPCError):
p | ass
|
iniverno/RnR-LLC | simics-3.0-install/simics-3.0.31/amd64-linux/lib/python/mod_ppc440gx_turbo_commands.py | Python | gpl-2.0 | 293 | 0.006826 | import ppc_commands
ppc_model = | 'ppc440gx'
funcs = {}
ppc_commands.setup_local_functions(ppc_model, funcs)
class_funcs = { ppc_model: funcs }
ppc_commands.enabl | e_generic_ppc_commands(ppc_model)
ppc_commands.enable_4xx_tlb_commands(ppc_model)
ppc_commands.enable_440_tlb_commands(ppc_model)
|
lgrech/MapperPy | mapperpy/test/conversions/test_datetime_conversion.py | Python | bsd-3-clause | 6,813 | 0.003816 | import | unittest
from assertpy import assert_that
from mapperpy.test.common_test_classes import *
| from mapperpy import OneWayMapper, ObjectMapper
from datetime import datetime
__author__ = 'lgrech'
class DateTimeConversionTest(unittest.TestCase):
def test_map_from_datetime_it_target_type_not_known(self):
# given
mapper = OneWayMapper.for_target_class(TestClassSomePropertyEmptyInit2)
test_datetime = datetime.now()
# when
mapped_object = mapper.map(TestClassSomePropertyEmptyInit1(
some_property="some_value",
some_property_02=test_datetime))
# then
assert_that(mapped_object).is_instance_of(TestClassSomePropertyEmptyInit2)
assert_that(mapped_object.some_property).is_equal_to("some_value")
assert_that(mapped_object.some_property_02).is_equal_to(test_datetime)
def test_map_from_datetime_to_string(self):
# given
mapper = OneWayMapper.for_target_prototype(TestClassSomePropertyEmptyInit2(some_property_02="string"))
test_datetime = datetime.now()
# when
mapped_object = mapper.map(TestClassSomePropertyEmptyInit1(
some_property="some_value",
some_property_02=test_datetime))
# then
assert_that(mapped_object).is_instance_of(TestClassSomePropertyEmptyInit2)
assert_that(mapped_object.some_property).is_equal_to("some_value")
assert_that(mapped_object.some_property_02).is_equal_to(test_datetime.isoformat())
def test_map_from_string_to_datetime_wrong_format_should_raise_exception(self):
# given
mapper = OneWayMapper.for_target_prototype(TestClassSomePropertyEmptyInit2(some_property_02=datetime.now()))
# when
with self.assertRaises(ValueError) as context:
mapper.map(TestClassSomePropertyEmptyInit1(some_property_02="wrong_date_format"))
# then
assert_that(context.exception.message).contains("wrong_date_format")
def test_map_from_string_to_datetime(self):
# given
mapper = OneWayMapper.for_target_prototype(TestClassSomePropertyEmptyInit2(some_property_02=datetime.now()))
test_datetime = datetime.now()
# when
mapped_object = mapper.map(TestClassSomePropertyEmptyInit1(
some_property="some_value",
some_property_02=test_datetime.isoformat()))
# then
assert_that(mapped_object).is_instance_of(TestClassSomePropertyEmptyInit2)
assert_that(mapped_object.some_property).is_equal_to("some_value")
assert_that(mapped_object.some_property_02).is_instance_of(datetime)
assert_that(mapped_object.some_property_02).is_equal_to(test_datetime)
def test_map_both_ways(self):
# given
mapper = ObjectMapper.from_prototype(TestClassSomePropertyEmptyInit1(some_property_02="str"),
TestClassSomePropertyEmptyInit2(some_property_02=datetime.now()))
test_datetime = datetime.now()
# when
mapped_object = mapper.map(TestClassSomePropertyEmptyInit1(
some_property="some_value",
some_property_02=test_datetime.isoformat()))
# then
assert_that(mapped_object).is_instance_of(TestClassSomePropertyEmptyInit2)
assert_that(mapped_object.some_property).is_equal_to("some_value")
assert_that(mapped_object.some_property_02).is_instance_of(datetime)
assert_that(mapped_object.some_property_02).is_equal_to(test_datetime)
# when
mapped_object_rev = mapper.map(TestClassSomePropertyEmptyInit2(
some_property="some_value",
some_property_02=test_datetime))
# then
assert_that(mapped_object_rev).is_instance_of(TestClassSomePropertyEmptyInit1)
assert_that(mapped_object_rev.some_property).is_equal_to("some_value")
assert_that(mapped_object_rev.some_property_02).is_instance_of(str)
assert_that(mapped_object_rev.some_property_02).is_equal_to(test_datetime.isoformat())
def test_map_from_unicode_string(self):
# given
mapper = ObjectMapper.from_prototype(TestClassSomePropertyEmptyInit1(),
TestClassSomePropertyEmptyInit2(some_property_02=datetime.now()))
test_datetime = datetime.now()
# when
mapped_object = mapper.map(TestClassSomePropertyEmptyInit1(
some_property="some_value",
some_property_02=unicode(test_datetime.isoformat())))
# then
assert_that(mapped_object).is_instance_of(TestClassSomePropertyEmptyInit2)
assert_that(mapped_object.some_property).is_equal_to("some_value")
assert_that(mapped_object.some_property_02).is_instance_of(datetime)
assert_that(mapped_object.some_property_02).is_equal_to(test_datetime)
def test_map_string_without_millis_to_datetime(self):
# given
mapper = OneWayMapper.for_target_prototype(TestClassSomePropertyEmptyInit2(some_property=datetime.now()))
# when
mapped_object = mapper.map(TestClassSomePropertyEmptyInit1(
some_property="2015-11-02T18:14:42"))
# then
assert_that(mapped_object).is_instance_of(TestClassSomePropertyEmptyInit2)
assert_that(mapped_object.some_property).is_instance_of(datetime)
assert_that(mapped_object.some_property).is_equal_to(datetime(2015, 11, 2, 18, 14, 42, 0))
def test_map_string_with_millis_to_datetime(self):
# given
mapper = OneWayMapper.for_target_prototype(TestClassSomePropertyEmptyInit2(some_property=datetime.now()))
# when
mapped_object = mapper.map(TestClassSomePropertyEmptyInit1(
some_property="2015-11-02T18:14:42.000123"))
# then
assert_that(mapped_object).is_instance_of(TestClassSomePropertyEmptyInit2)
assert_that(mapped_object.some_property).is_instance_of(datetime)
assert_that(mapped_object.some_property).is_equal_to(datetime(2015, 11, 2, 18, 14, 42, 123))
def test_map_attr_value_with_string_to_datetime_conversion(self):
# given
mapper = OneWayMapper.for_target_prototype(TestClassSomePropertyEmptyInit1(some_property_02=datetime.now()))
# then
assert_that(mapper.map_attr_value("some_property_02", "2015-11-02T18:14:42")).is_equal_to(
datetime(2015, 11, 2, 18, 14, 42, 0))
def test_map_attr_value_with_datetime_to_string_conversion(self):
# given
mapper = OneWayMapper.for_target_prototype(TestClassSomePropertyEmptyInit1(some_property_02=""))
# then
assert_that(mapper.map_attr_value("some_property_02", datetime(2015, 11, 2, 18, 14, 42, 123))).is_equal_to(
"2015-11-02T18:14:42.000123")
|
diefans/debellator | src/implant/master.py | Python | apache-2.0 | 5,656 | 0.001768 | """Controlles a bunch of remotes."""
import asyncio
import functools
import logging
import os
import pathlib
import signal
import sys
import traceback
from implant import commands, connect, core, testing
log = logging.getLogger(__name__)
PLUGINS_ENTRY_POINT_GROUP = 'implant.plugins'
def parse_command(line):
"""Parse a command from line."""
args = []
kwargs = {}
command, *parts = line.split(' ')
for part in parts:
if '=' in part:
k, v = part.split('=')
kwargs[k] = v
else:
args.append(part)
return command, args, kwargs
async def _execute_command(io_queues, line):
default_lines = {
b'e\n': (b'implant.commands:Echo data=bar\n', {}),
b'i\n': (b'implant.core:InvokeImport fullname=implant.commands\n', {}),
b'\n': (b'implant.commands:SystemLoad data=bar\n', {}),
}
if line in default_lines:
line, _ = default_lines[line]
command_name, _, params = parse_command(line[:-1].decode())
log.info("sending: %s %s", command_name, params)
try:
result = await io_queues.execute(command_name, **params)
except Exception as ex: # noqa
log.error("Error:\n%s", traceback.format_exc())
else:
return result
async def log_remote_stderr(remote):
# await remote.launched()
if remote.stderr:
log.info("Logging remote stderr: %s", remote)
async for line in remote.stderr:
log.debug("\tRemote #%s: %s", remote.pid, line[:-1].decode())
class Console:
def __init__(self, connectors, *, loop=None, **options):
self.loop = loop if loop is not None else asyncio.get_event_loop()
self.options = options
self.connectors = connectors
async def feed_stdin_to_remotes(self, remotes):
try:
async with core.Incomming(pipe=sys.stdin, loop=self.loop) as reader:
while True:
line = await reader.readline()
if line == b'':
break
result = await asyncio.gather(
*(_execute_command(remote, line) for remote, *_ in remotes.values()),
loop=self.loop
)
print("< {}\n >".format(result), end="")
except asyncio.CancelledError:
log.info("Terminating...")
except Exception as ex:
log.info(ex)
for remote, fut_remote, error_log in remotes.values():
fut_remote.cancel()
await fut_remote
error_log.cancel()
await error_log
async def connect(self):
remotes = {}
for connector, default_args in self.connectors.items():
if remotes.get(connector, None) is not None:
log.warning('Process for %s already launched! Skipping...', connector)
continue
remote = await connector.launch(
options=self.options, **default_args, loop=self.loop
)
fut_remote = asyncio.ensure_future(remote.communicate(), loop=self.loop)
error_log = asyncio.ensure_future(log_remote_stderr(remote), loop=self.loop)
remotes[connector] = (remote, fut_remote, error_log)
return remotes
async def run(self):
never_ending = asyncio.Future(loop=self.loop)
remotes = await self.connect()
feeder = asyncio | .ensure_future(self.feed_stdin_to_remotes(remotes), loop=self.loop)
def _sigint_handler():
log.info('SIGINT...')
never_ending.cancel()
self.l | oop.add_signal_handler(signal.SIGINT, _sigint_handler)
try:
await never_ending
except asyncio.CancelledError:
log.debug('Cancelled')
pass
feeder.cancel()
await feeder
def main(debug=False, log_config=None):
log.info('deballator master process: %s', os.getpid())
loop = asyncio.get_event_loop()
# replace existing signal handler with noop as long as our remotes are not fully running
# otherwise cancellation of process startup will lead to orphaned remote processes
def noop():
log.error('Noop on signal SIGINT')
loop.add_signal_handler(signal.SIGINT, noop)
options = {
'debug': debug,
'log_config': log_config,
# 'venv': False,
# 'venv': True,
# 'venv': '~/.implant',
}
# if debug:
# log.setLevel(logging.DEBUG)
console = Console({
# testing.PipeConnector(loop=loop): {},
connect.Local(): {
'python_bin': pathlib.Path('~/.pyenv/versions/3.5.2/bin/python').expanduser(),
},
# connect.Ssh(hostname='localhost'): {
# 'python_bin': pathlib.Path('~/.pyenv/versions/3.5.2/bin/python').expanduser(),
# },
# connect.Lxd(
# container='zesty',
# hostname='localhost',
# ): {
# 'python_bin': pathlib.Path('/usr/bin/python3').expanduser()
# },
}, loop=loop, **options)
task = asyncio.ensure_future(console.run())
try:
loop.run_until_complete(task)
except KeyboardInterrupt:
log.error('Keyboard interrupt...')
task.cancel()
loop.run_until_complete(task)
except BaseException as ex:
core.log.error("Error %s:\n%s", type(ex), traceback.format_exc())
finally:
for task in asyncio.Task.all_tasks():
if not task.done():
log.error("pending: %s", task)
log.info(' - '.join(["this is the end"] * 3))
loop.stop()
loop.close()
|
tombstone/models | research/delf/delf/python/training/model/export_global_model.py | Python | apache-2.0 | 5,972 | 0.004856 | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIN | D, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Export g | lobal feature tensorflow inference model.
This model includes image pyramids for multi-scale processing.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
import tensorflow as tf
from delf.python.training.model import delf_model
from delf.python.training.model import export_model_utils
FLAGS = flags.FLAGS
flags.DEFINE_string('ckpt_path', '/tmp/delf-logdir/delf-weights',
'Path to saved checkpoint.')
flags.DEFINE_string('export_path', None, 'Path where model will be exported.')
flags.DEFINE_list(
'input_scales_list', None,
'Optional input image scales to use. If None (default), an input end-point '
'"input_scales" is added for the exported model. If not None, the '
'specified list of floats will be hard-coded as the desired input scales.')
flags.DEFINE_enum(
'multi_scale_pool_type', 'None', ['None', 'average', 'sum'],
"If 'None' (default), the model is exported with an output end-point "
"'global_descriptors', where the global descriptor for each scale is "
"returned separately. If not 'None', the global descriptor of each scale is"
' pooled and a 1D global descriptor is returned, with output end-point '
"'global_descriptor'.")
flags.DEFINE_boolean('normalize_global_descriptor', False,
'If True, L2-normalizes global descriptor.')
class _ExtractModule(tf.Module):
"""Helper module to build and save global feature model."""
def __init__(self,
multi_scale_pool_type='None',
normalize_global_descriptor=False,
input_scales_tensor=None):
"""Initialization of global feature model.
Args:
multi_scale_pool_type: Type of multi-scale pooling to perform.
normalize_global_descriptor: Whether to L2-normalize global descriptor.
input_scales_tensor: If None, the exported function to be used should be
ExtractFeatures, where an input end-point "input_scales" is added for
the exported model. If not None, the specified 1D tensor of floats will
be hard-coded as the desired input scales, in conjunction with
ExtractFeaturesFixedScales.
"""
self._multi_scale_pool_type = multi_scale_pool_type
self._normalize_global_descriptor = normalize_global_descriptor
if input_scales_tensor is None:
self._input_scales_tensor = []
else:
self._input_scales_tensor = input_scales_tensor
# Setup the DELF model for extraction.
self._model = delf_model.Delf(block3_strides=False, name='DELF')
def LoadWeights(self, checkpoint_path):
self._model.load_weights(checkpoint_path)
@tf.function(input_signature=[
tf.TensorSpec(shape=[None, None, 3], dtype=tf.uint8, name='input_image'),
tf.TensorSpec(shape=[None], dtype=tf.float32, name='input_scales'),
tf.TensorSpec(
shape=[None], dtype=tf.int32, name='input_global_scales_ind')
])
def ExtractFeatures(self, input_image, input_scales, input_global_scales_ind):
extracted_features = export_model_utils.ExtractGlobalFeatures(
input_image,
input_scales,
input_global_scales_ind,
lambda x: self._model.backbone.build_call(x, training=False),
multi_scale_pool_type=self._multi_scale_pool_type,
normalize_global_descriptor=self._normalize_global_descriptor)
named_output_tensors = {}
if self._multi_scale_pool_type == 'None':
named_output_tensors['global_descriptors'] = tf.identity(
extracted_features, name='global_descriptors')
else:
named_output_tensors['global_descriptor'] = tf.identity(
extracted_features, name='global_descriptor')
return named_output_tensors
@tf.function(input_signature=[
tf.TensorSpec(shape=[None, None, 3], dtype=tf.uint8, name='input_image')
])
def ExtractFeaturesFixedScales(self, input_image):
return self.ExtractFeatures(input_image, self._input_scales_tensor,
tf.range(tf.size(self._input_scales_tensor)))
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
export_path = FLAGS.export_path
if os.path.exists(export_path):
raise ValueError('export_path %s already exists.' % export_path)
if FLAGS.input_scales_list is None:
input_scales_tensor = None
else:
input_scales_tensor = tf.constant(
[float(s) for s in FLAGS.input_scales_list],
dtype=tf.float32,
shape=[len(FLAGS.input_scales_list)],
name='input_scales')
module = _ExtractModule(FLAGS.multi_scale_pool_type,
FLAGS.normalize_global_descriptor,
input_scales_tensor)
# Load the weights.
checkpoint_path = FLAGS.ckpt_path
module.LoadWeights(checkpoint_path)
print('Checkpoint loaded from ', checkpoint_path)
# Save the module
if FLAGS.input_scales_list is None:
served_function = module.ExtractFeatures
else:
served_function = module.ExtractFeaturesFixedScales
tf.saved_model.save(
module, export_path, signatures={'serving_default': served_function})
if __name__ == '__main__':
app.run(main)
|
s390guy/SATK | asma/asmbase.py | Python | gpl-3.0 | 85,139 | 0.016127 | #!/usr/bin/python3.3
# Copyright (C) 2015-2017 Harold Grovesteen
#
# This f | ile is part of SATK.
#
# SATK is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# | SATK is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SATK. If not, see <http://www.gnu.org/licenses/>.
# This module provides base classes used in multiple places within ASMA
this_module="asmbase.py"
# Python imports: None
# SATK imports:
import fsmparser # Access Finite-State-Machine-based parsing technology
import lexer # Access some objects
import pratt3 # Identify pratt literals
# ASMA imports:
import assembler
import asmtokens
import lnkbase
# This class supports expressions as lexical tokens, pratt token creation and
# evaluation.
class ASMExpr(object):
def __init__(self,tokens):
assert isinstance(tokens,list),\
"'tokens' argument must be a list: %s" % tokens
self.tokens=tokens # Lexical token list comprising the expression
# Pratt expression for evaluation (see prepare_arith or prepare_binary)
self.pratt=None
# Set to the pratt token when a quick evaluation can be performed.
self.quick=False
# Returns the number of lexical token in the list of lexical tokens
def __len__(self):
return len(self.tokens)
def __str__(self):
return "%s: %s" % (self.__class__.__name__,self.tokens)
def display(self,indent="",string=False):
s=""
lindent="%s " % indent
for t in self.tokens:
if isinstance(t,ASMOperand):
s="%s%s" % (s,t.display(indent=indent,string=True))
else:
s='%s, "%s"' % (s,t.string)
if s[0:2]==", ":
s=s[2:]
if string:
return s
print(s)
def evaluate(self,external,debug=False,trace=False):
assert self.pratt is not None,"%s pratt attribute is None" \
% assembler.eloc(self,"evaluate",module=this_module)
# Quick execution if the expression is a single pratt Token. No need to
# invoke the parser with its context generation, etc.
if self.quick:
return self.quick.value(external,debug=debug,trace=trace)
# Use the Pratt parser to evaluate the expression
return self.pratt.evaluate(external,debug=debug,trace=trace)
# Finds the first tid in an expression list. Used mainly for address expressions
# that need to find the first tid. The user must know the TID.
def find_first_ltok(self,tid=[]):
if not isinstance(tid,list):
# Scan tokens for the requested TID
for tok in self.tokens:
if tok.tid == tid:
return tok
else:
# Scan tokens for a TID in the requested list
for tok in self.tokens:
if tok.tid in tid:
return tok
return None # Not found
def find_first_ptok(self,lineno,cls=[],debug=False):
assert self.pratt is not None,"%s [%s] pratt attribute is None" \
% (assembler.eloc(self,"find_first_ptok",module=this_module),lineno)
assert len(self.pratt.toks)>0,"%s [%s] pratt.toks length: zero" \
% (assembler.eloc(self,"find_first_ptok",module=this_module),lineno)
if not isinstance(cls,list):
for ptok in self.pratt.toks:
if __debug__:
if debug:
print("%s [%s] ptok: %s" \
% (assembler.eloc(self,"find_first_ptok",\
module=this_module),lineno,ptok.__class__.__name__))
print("%s [%s] class: %s" \
% (assembler.eloc(self,"find_first_ptok",\
module=this_module),lineno,cls))
if isinstance(ptok,cls):
return ptok
else:
for ptok in self.pratt.toks:
if __debug__:
if debug:
print("%s [%s] ptok: %s" \
% (assembler.eloc(self,"find_first_ptok",\
module=this_module),lineno,ptok.__class__.__name__))
for c in cls:
if __debug__:
if debug:
print("%s [%s] class: %s" \
% (assembler.eloc(self,"find_first_ptok",\
module=this_module),lineno,c))
if isinstance(ptok,c):
return ptok
return None # Not found
def prepare(self,stmt,desc,debug=False):
raise NotImplementedError("%s subclass %s must supply prepare() method" \
% (assembler.eloc(self,"prepare",module=this_module),\
self.__class__.__name__))
class ASMExprArith(ASMExpr):
def __init__(self,tokens):
super().__init__(tokens)
# Whether the expression references the current location counter. See
# the prepare() method.
self.loc_ctr=False
def evaluate(self,external,debug=False,trace=False):
assert self.pratt is not None,"%s pratt attribute is None" \
% assembler.eloc(self,"evaluate",module=this_module)
# Quick execution if the expression is a single pratt Token. No need to
# invoke the parser with its overhead, context generation, etc.
if self.quick:
val=self.quick.value(external,debug=debug,trace=trace)
if isinstance(val,lnkbase.SectAddr) and val.isRelative():
# Quick expressions avoid invoking the Pratt expression processing
# by returning the single Pratt token itself. When the value
# of the token is a section relative address the same object
# may end up being made absolute more than once. When that happens
# an uncaught exception is raised during post pass 1 processing.
# This can occur with an EQU when the first operand is a single
# label referencing a section relative address.
# By placing the fix here (rather than elsewhere) it addresses the
# problem with EQU and any other potential place with the same
# issue.
return val.clone()
return val
#return self.quick.value(external,debug=debug,trace=trace)
# Use the Pratt parser to evaluate the expression
return self.pratt.evaluate(external,debug=debug,trace=trace)
# Preparse the expression for execution by converting lexical tokens into
# a list of Pratt tokens within a Pratt arithmetic expression object.
# Method Arguments:
# stmt The statement in which the expression is used.
# desc A description of the expression for debugging purposes.
def prepare(self,stmt,desc,debug=False):
assert self.pratt is None,\
"%s pratt attribute not None: %s" \
% (assembler.eloc(self,"prepare",module=this_module),self.pratt)
# Create an empty Pratt arithmetic expression
pexpr=asmtokens.ArithExpr(desc,stmt.lineno,tokens=[])
if __debug__:
if debug:
print("%s: %s expr before: %s" % (desc,lineno,expr))
# Now popultate it with pratt tokens from lexical tokens usign their
# atoken() method.
for ltok in self.tokens:
if isinstance(ltok,CTerm):
# Note currently CTerm is only used by macro directives so it is
# not sensitive to location counter usage.
|
naokiur/circle-ci-demo | backend/employee/api/views.py | Python | apache-2.0 | 863 | 0 | import time
from datetime import datetime
from logging import getLogger
# from django.contrib.auth.models import User, Group
from rest_framework.views import APIView
from employee.api.models import Employee, Login
# from employee.api.serializers import UserSerializer, GroupSerializer
from employee.api.serializers import DummySerializer, LoginSerializer
from employee.common.constants import LOGGER_NAME
from rest_framework import viewsets
from rest_framework.response import Response
class LoginViewSet(APIView):
def __init__(self):
self.logger = getLogger(LOGGER_NAME)
def get(self, reque | st, format=None):
self.logger.info('get method')
queryset = Login.objects.all()
self.logger.info(querys | et)
# serializer = LoginSerializer(queryset)
# return Response(serializer.data)
return Response()
|
olivierkes/bible_libre | biblification_2.py | Python | unlicense | 3,439 | 0.001459 | #!/usr/bin/python
# -*- coding: utf8 -*-
import csv
import argparse
import re
if __name__ == "__main__":
# Parsing arguments
parser = argparse.ArgumentParser(description='This generates a t2t bible.')
parser.add_argument('-p', '--plan', help='plan to be used',
default="nouveau-testament-commente")
parser.add_argument('-v', help='show verses references',
action='store_const', const=True, default=False)
parser.add_argument('-m', help='show marks only',
action='store_const', const=True, default=False)
parser.add_argument('-t', help='show references in titles',
action='store_const', const=True, default=False)
args = parser.parse_args()
plan = args.plan
showVerse = args.v
showMarks = args.m
showInTitles = args.t
text = ""
def parseText(t):
"Format verse references in the chosen way."
a = re.compile('\[(\d*)\:(\d*)\]')
if showVerse:
s = | r':sup:`\1:\2` '
elif showMarks:
s = r"° "
else:
s = r""
t = a.sub(s, t)
return t
def getText(book, startChapter, startVerse, endChapter, endVerse):
"Renvoie le texte demandé."
r = ""
f = open('textes/' + book + ".txt", 'r')
text = f.read()
f | .close()
start = text.find("[{}:{}]".format(startChapter, startVerse))
end = text.find("[{}:{}]".format(endChapter, str(int(endVerse) + 1)))
if end < 0: # Chapitre suivant
end = text.find("[{}:{}]".format(str(int(endChapter) + 1), 1))
if end < 0: # Fin du livre
end = len(text)
return parseText(text[start:end])
def makeTitle(row):
"Renvoie un titre formatté comme il faut."
charSet = "#=-~_"
titre = row[6]
if showInTitles: # ajoute la référence si demandée
if row[2] == row[4]:
if row[3] == row[5]:
tt = "{},{}".format(row[2], row[3])
else:
tt = "{},{}-{}".format(row[2], row[3], row[5])
else:
tt = "{},{} – {},{}".format(row[2], row[3], row[4], row[5])
# Ajoute la référence au titre, différement suivant le niveau
if row[1] == "1":
titre = "{}: {}".format(tt, titre)
else:
titre = "{} ({})".format(titre, tt)
t = "\n\n" + titre + "\n" + charSet[int(row[1])] * len(titre) + "\n"
return t
with open('plans/' + plan + ".csv", 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
r = 0
struct = []
for row in reader:
if r != 0:
struct.append(row)
r += 1
for i in range(len(struct)):
# Row: 0 = Livre 1 = Niveau 2 = chapitre début
# 3 = verset debut 4 = chapitre fin 5 = verset fin
# 6 = Titre
row = struct[i]
nextRow = -1
text += makeTitle(row)
if i != len(struct) - 1:
nextRow = struct[i + 1]
if nextRow != -1 and nextRow[2] == row[2] and nextRow[3] == row[3]:
pass
else:
text += getText(row[0], row[2], row[3], row[4], row[5])
print text
|
huggingface/pytorch-transformers | src/transformers/models/bart/modeling_tf_bart.py | Python | apache-2.0 | 70,462 | 0.003917 | # coding=utf-8
# Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 Bart model. """
import random
from typing import Dict, Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import get_tf_activation
from ...file_utils import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_tf_outputs import (
TFBaseModelOutput,
TFBaseModelOutputWithPastAndCrossAttentions,
TFSeq2SeqLMOutput,
TFSeq2SeqModelOutput,
)
# Public API
from ...modeling_tf_utils import (
DUMMY_INPUTS,
TFCausalLanguageModelingLoss,
TFPreTrained | Model,
TFSharedEmbeddings,
TFWrappedEmbeddings,
input_processing,
keras_serializable,
shape_list,
)
from ...utils import logging
from .configuration_bart import BartConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "facebook/bart-large"
_CONFIG_FOR_DOC = "BartConfig"
_TOKENIZER_FOR_DOC = "BartTokenizer"
LARGE_NEGATIVE = -1e8
def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, | decoder_start_token_id: int):
shifted_input_ids = tf.roll(input_ids, 1, axis=-1)
start_tokens = tf.fill((shape_list(shifted_input_ids)[0], 1), decoder_start_token_id)
shifted_input_ids = tf.concat([start_tokens, shifted_input_ids[:, 1:]], -1)
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids = tf.where(
shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids
)
if tf.executing_eagerly():
# "Verify that `labels` has only positive values and -100"
assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0))
# Make sure the assertion op is called by wrapping the result in an identity no-op
with tf.control_dependencies([assert_gte0]):
shifted_input_ids = tf.identity(shifted_input_ids)
return shifted_input_ids
def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = tf.ones((tgt_len, tgt_len)) * LARGE_NEGATIVE
mask_cond = tf.range(shape_list(mask)[-1])
mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask)
if past_key_values_length > 0:
mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1)
return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1))
def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None, past_key_values_length: int = 0):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
src_len = shape_list(mask)[1]
tgt_len = tgt_len if tgt_len is not None else src_len
one_cst = tf.constant(1.0)
mask = tf.cast(mask, dtype=one_cst.dtype)
expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1))
return (one_cst - expanded_mask) * LARGE_NEGATIVE
class TFBartLearnedPositionalEmbedding(TFSharedEmbeddings):
"""
This module learns positional embeddings up to a fixed maximum size.
"""
def __init__(self, num_embeddings: int, embedding_dim: int, **kwargs):
# Bart is set up so that if padding_idx is specified then offset the embedding ids by 2
# and adjust num_embeddings appropriately. Other models don't have this hack
self.offset = 2
super().__init__(num_embeddings + self.offset, embedding_dim, **kwargs)
def call(self, input_shape: tf.TensorShape, past_key_values_length: int = 0):
"""Input is expected to be of size [bsz x seqlen]."""
bsz, seq_len = input_shape[:2]
positions = tf.range(past_key_values_length, seq_len + past_key_values_length, delta=1, name="range")
return super().call(positions + self.offset)
class TFBartAttention(tf.keras.layers.Layer):
"""Multi-headed attention from "Attention Is All You Need"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
**kwargs,
):
super().__init__(**kwargs)
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = tf.keras.layers.Dropout(dropout)
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.is_decoder = is_decoder
self.k_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj")
self.q_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj")
self.v_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj")
self.out_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj")
def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int):
return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3))
def call(
self,
hidden_states: tf.Tensor,
key_value_states: Optional[tf.Tensor] = None,
past_key_value: Optional[Tuple[Tuple[tf.Tensor]]] = None,
attention_mask: Optional[tf.Tensor] = None,
layer_head_mask: Optional[tf.Tensor] = None,
training=False,
) -> Tuple[tf.Tensor, Optional[tf.Tensor]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, embed_dim = shape_list(hidden_states)
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = tf.concat([past_key_value[0], key_states], axis=2)
value_states = tf.concat([past_key_value[1], value_states], axis=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `p |
clalancette/pycdlib | pycdlib/facade.py | Python | lgpl-2.1 | 36,359 | 0.001403 | # Copyright (C) 2019 Chris Lalancette <clalancette@gmail.com>
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation;
# version 2.1 of the License.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""Facade classes to make the main PyCdlib object easier to use."""
from __future_ | _ import absolute_import
from pycdlib import dr
from pycdlib import pycdlibexception
from pycdlib import udf as udfmod
from pycdlib import utils
# For mypy annotations
if False: # pylint: disable=using-constant-test
from typing import BinaryIO, Generator, Optional, Tuple # NOQA pylint: disable=unused-import
# NOTE: these imports have to be here to avoid circular deps
from pycdlib import pycdlib # NOQA p | ylint: disable=unused-import
from pycdlib import pycdlibio # NOQA pylint: disable=unused-import
def iso_path_to_rr_name(iso_path, interchange_level, is_dir):
# type: (str, int, bool) -> str
"""
Take an absolute ISO path and generate a corresponding Rock Ridge basename.
Parameters:
iso_path - The absolute iso_path to generate a Rock Ridge name from.
interchange_level - The interchange level at which to operate.
is_dir - Whether this will be a directory or not.
Returns:
The Rock Ridge name as a string.
"""
if iso_path[0] != '/':
raise pycdlibexception.PyCdlibInvalidInput("iso_path must start with '/'")
namesplit = utils.split_path(utils.normpath(iso_path))
iso_name = namesplit.pop()
if is_dir:
rr_name = utils.mangle_dir_for_iso9660(iso_name.decode('utf-8'),
interchange_level)
else:
basename, ext = utils.mangle_file_for_iso9660(iso_name.decode('utf-8'),
interchange_level)
rr_name = '.'.join([basename, ext])
return rr_name
class PyCdlibISO9660(object):
"""The class representing the PyCdlib ISO9660 facade."""
__slots__ = ('pycdlib_obj',)
def __init__(self, pycdlib_obj):
# type: (pycdlib.PyCdlib) -> None
self.pycdlib_obj = pycdlib_obj
def get_file_from_iso(self, local_path, iso_path):
# type: (str, str) -> None
"""
Fetch a single file from the ISO via an absolute ISO path and write it
out to a local file.
Parameters:
local_path - The local file to write to.
iso_path - The absolute ISO9660 path to lookup on the ISO.
Returns:
Nothing.
"""
self.pycdlib_obj.get_file_from_iso(local_path, iso_path=iso_path)
def get_file_from_iso_fp(self, outfp, iso_path):
# type: (BinaryIO, str) -> None
"""
Fetch a single file from the ISO via an absolute ISO path and write it
out to the file object.
Parameters:
outfp - The file object to write data to.
iso_path - The absolute ISO9660 path to lookup on the ISO.
Returns:
Nothing.
"""
self.pycdlib_obj.get_file_from_iso_fp(outfp, iso_path=iso_path)
def add_fp(self, fp, length, iso_path):
# type: (BinaryIO, int, str) -> None
"""
Add a file to the ISO. While using this facade, a file will only be
added to the ISO9660 context (and by extension, the Rock Ridge
context). If the ISO is a Rock Ridge one, then a Rock Ridge name will
be generated from the ISO path. For more control over which contexts
a file shows up in, use the 'add_hard_link' API and/or use the regular
PyCdlib object (not this facade). Note that the caller must ensure
that 'fp' remains open for the lifetime of the PyCdlib object, as the
PyCdlib class uses the file descriptor internally when writing
(mastering) the ISO. To have PyCdlib manage this automatically, use
'add_file' instead.
Parameters:
fp - The file object to use for the contents of the new file.
length - The length of the data for the new file.
iso_path - The ISO9660 absolute path to the file destination on the ISO.
Returns:
Nothing.
"""
rr_name = None
if self.pycdlib_obj.has_rock_ridge():
rr_name = iso_path_to_rr_name(iso_path, self.pycdlib_obj.interchange_level, False)
self.pycdlib_obj.add_fp(fp, length, iso_path=iso_path, rr_name=rr_name)
def add_file(self, filename, iso_path):
# type: (str, str) -> None
"""
Add a file to the ISO. While using this facade, a file will only be
added to the ISO9660 context (and by extension, the Rock Ridge
context). If the ISO is a Rock Ridge one, then a Rock Ridge name will
be generated from the ISO path. For more control over which contexts
in which a file shows up, use the 'add_hard_link' API and/or use the
regular PyCdlib object (not this facade).
Parameters:
filename - The filename to use for the data contents for the new file.
iso_path - The ISO9660 absolute path to the file destination on the ISO.
Returns:
Nothing.
"""
rr_name = None
if self.pycdlib_obj.has_rock_ridge():
rr_name = iso_path_to_rr_name(iso_path, self.pycdlib_obj.interchange_level, False)
self.pycdlib_obj.add_file(filename, iso_path=iso_path, rr_name=rr_name)
def add_directory(self, iso_path):
# type: (str) -> None
"""
Add a directory to the ISO9660 context (and by extension, the Rock
Ridge context). If the ISO is a Rock Ridge one, then a Rock Ridge name
will be generated from the ISO path. For more control over which
contexts in which a directory shows up, use the regular PyCdlib object
(not this facade).
Parameters:
iso_path - The ISO9660 absolute path to use for the directory.
Returns:
Nothing.
"""
rr_name = None
if self.pycdlib_obj.has_rock_ridge():
rr_name = iso_path_to_rr_name(iso_path, self.pycdlib_obj.interchange_level, True)
self.pycdlib_obj.add_directory(iso_path=iso_path, rr_name=rr_name)
def rm_file(self, iso_path):
# type: (str) -> None
"""
Remove a file from the ISO. This removes the data and the listing of
the file from all contexts. Due to some complexities of the ISO format,
removal of zero-byte files from all contexts does not automatically
happen, so this method may need to be called on more than one facade for
zero-byte files.
Parameters:
iso_path - The path to the file to remove.
Returns:
Nothing.
"""
self.pycdlib_obj.rm_file(iso_path=iso_path)
def rm_directory(self, iso_path):
# type: (str) -> None
"""
Remove a directory from the ISO. This removes the directory from
just the ISO9660 context (and by extension, the Rock Ridge context).
The directory must be empty.
Parameters:
iso_path - The path to the directory to remove.
Returns:
Nothing.
"""
self.pycdlib_obj.rm_directory(iso_path=iso_path)
def list_children(self, iso_path):
# type: (str) -> Generator
"""
Generate a list of all of the file/directory objects in the
specified location on the ISO.
Parameters:
iso_path - The absolute path on the ISO to list the children for.
Yields:
Children of this path.
Returns:
Nothing.
"""
return self.pycdlib_obj. |
ismail-s/warehouse | tests/unit/utils/test_paginate.py | Python | apache-2.0 | 2,849 | 0 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pretend
import pytest
from webob.multidict import MultiDict
from warehouse.utils import paginate
class FakeResult:
def __init__(self, data, total):
self.data = data
self.total = total
@property
def hits(self):
return pretend.stub(total=self.total)
def __iter__(self):
for i in self.data:
yield i
class FakeQuery:
def __init__(self, fake):
self.fake = fake
self.range = slice(None)
def __getitem__(self, range):
self.range = range
return self
@property
def results(self):
return pretend.stub(hits=pretend.stub(total=len(self.fake)))
def execute(self):
return FakeResult(self.fake[self.range], len(self.fake))
class TestElasticsearchWrapper:
def test_slices_and_length(self):
wrapper = paginate._ElasticsearchWrapper(FakeQuery([1, 2, 3, 4, 5, 6]))
assert wrapper[1:3] == [2, 3]
assert len(wrapper) == 6
def test_second_slice_fails(self):
wrapper = paginate._ElasticsearchWrapper(FakeQuery([1, 2, 3, 4, 5, 6]))
wrapper[1:3]
with pytest.raises(RuntimeError):
wrapper[1:3]
def test_len_before_slice_fails(self):
wrapper = paginate._ElasticsearchWrapper(FakeQuery([1, 2, 3, 4, 5, 6 | ]))
with pytest.rai | ses(RuntimeError):
len(wrapper)
def test_elasticsearch_page_has_wrapper(monkeypatch):
page_obj = pretend.stub()
page_cls = pretend.call_recorder(lambda *a, **kw: page_obj)
monkeypatch.setattr(paginate, "Page", page_cls)
assert paginate.ElasticsearchPage("first", second="foo") is page_obj
assert page_cls.calls == [
pretend.call(
"first",
second="foo",
wrapper_class=paginate._ElasticsearchWrapper,
),
]
def test_paginate_url(pyramid_request):
pyramid_request.GET = MultiDict(pyramid_request.GET)
pyramid_request.GET["foo"] = "bar"
url = pretend.stub()
pyramid_request.current_route_path = \
pretend.call_recorder(lambda _query: url)
url_maker = paginate.paginate_url_factory(pyramid_request)
assert url_maker(5) is url
assert pyramid_request.current_route_path.calls == [
pretend.call(_query=[("foo", "bar"), ("page", 5)]),
]
|
kauralasoo/Blood_ATAC | scripts/postprocessCrossmap.py | Python | apache-2.0 | 728 | 0.026099 | import subprocess
import os
import argparse
import gzip
parser = argparse.ArgumentParser(description = "Postprocess the VCF file creat | ed by the CrossMap to make it valid again.", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--vcf", help = "Path to the VCF file.")
args = parser.parse_args()
vcf_file = gzip.open(args.vcf)
contigs = dict()
for line in vcf_file:
line = line.rstrip()
if(line[0] == "#"):
print(line)
if(line[0:8] == "##contig"):
contig = line.split("##contig=<ID=")[1].split(",assembly=")[0]
contigs[contig] = 1
else:
fields = line.split("\t",1)
if(fields[0] i | n contigs):
print("\t".join(fields)) #Only keep SNPs that fall into contigs mentioned in the header
|
darfire/screp | setup.py | Python | gpl-3.0 | 1,503 | 0.005323 | #!/usr/bin/env python
# Bootstrap installation of Distribute
import distribute_setup
distribute_setup.use_setuptools()
import os
from setuptools import setup
PROJECT = u'screp'
VERSION = '0.3.2'
URL = 'https://github.com/darfire/screp'
AUTHOR = u'Doru Arfire'
AUTHOR_EMAIL = u'doruarfire@gmail.com'
DESC = u'Command-line utility for easy scraping of HTML documents'
requires = [
'pyparsing',
'lxml',
'cssselect >= 0.7.1',
]
def read_file(file_name):
file_path = os.path.join(
os.path.dirname(__file__),
file_name
)
return open(file_path).read()
setup(
name=PROJECT,
version=VERSION,
description=DESC,
long_description=read_file('README.rst'),
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url=URL,
license='LGPL',
namespace_packages=[],
packages=['screp'],
include_package_data=True,
zip_safe=False,
install_requires=re | quires,
entry_points = {
'console_scripts': [
'screp=screp.main:main',
],
},
classifiers=[
# -*- Classifiers -*-
'License :: OSI Approved',
'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)',
"Programming Language :: Python" | ,
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Development Status :: 4 - Beta',
'Environment :: Console',
'Topic :: Internet :: WWW/HTTP',
],
)
|
amdorra/django-kvmodel | kvmodel/models.py | Python | mit | 1,100 | 0 | """
Example
-------
class SystemSetting(KVModel):
pass
setting = SystemSetting.create(key='foo', value=100)
loaded_setting = SystemSetting.get_by_key('foo')
"""
from django.db imp | ort models
|
from .fields import SerializableField
class KVModel(models.Model):
"""
An Abstract model that has key and value fields
key -- Unique CharField of max_length 255
value -- SerializableField by default could be used to store bool, int,
float, str, list, dict and date
"""
key = models.CharField(max_length=255, unique=True)
value = SerializableField(blank=True, null=True)
def __unicode__(self):
return 'KVModel instance: ' + self.key + ' = ' + unicode(self.value)
@classmethod
def get_by_key(cls, key):
"""
A static method that returns a KVModel instance.
key -- unique key that is used for the search.
this method will throw a DoesNotExist exception if an object with the
key provided is not found.
"""
return cls.objects.get(key=key)
class Meta:
abstract = True
|
googlei18n/fontuley | src/third_party/fontTools/Lib/fontTools/ttLib/tables/_h_h_e_a.py | Python | apache-2.0 | 2,656 | 0.030873 | from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from fontTools.misc import sstruct
from fontTools.misc.textTools import safeEval
from . import DefaultTable
hheaFormat = """
> # big endian
tableVersion: 16.16F
ascent: h
descent: h
lineGap: h
advanceWidthMax: H
minLeftSideBearing: h
minRightSideBearing: h
xMaxExtent: h
caretSlopeRise: h
caretSlopeRun: h
caretOffset: h
reserved0: h
reserved1: h
reserved2: h
reserved3: h
metricDataFormat: h
numberOfHMetrics: H
"""
class table__h_h_e_a(DefaultTable.DefaultTable):
# Note: Keep in sync with table__v_h_e_a
dependencies = ['hmtx', 'glyf']
def decompile(self, data, ttFont):
sstruct.unpack(hheaFormat, data, self)
def compile(self, ttFont):
if ttFont.isLoaded('glyf') and ttFont.recalcBBoxes:
self.recalc(ttFont)
return sstruct.pack(hheaFormat, self)
def recalc(self, ttFont):
hmtxTable = ttFont['hmtx']
if 'glyf' in ttFont:
glyfTable = ttFont['glyf']
INFINITY = 100000
advanceWidthMax = 0
minLeftSideBearing = +INFINITY # arbitrary big number
minRightSideBearing = +INFINITY # arbitrary big number
xMaxExtent = -INFINITY # arbitrary big negative number
for name in ttFont.getGlyphOrder():
width, lsb = hmtxTable[name]
advanceWidthMax = max(advanceWidthMax, width)
g = glyfTable[name]
if g.numberOfContours == 0:
continue
if g.numberOfContours < 0 and not hasattr(g, "xMax"):
# Composite glyph without extents set.
# Calculate those.
g.recalcBounds(glyfTable)
minLeftSideBearing = min(minLeftSideBearing, lsb)
rsb = width - lsb - (g.xMax - g.xMin)
minRightSideBearing = min(minRightSideBearing, rsb)
extent = lsb + (g.xMax - g.xMin)
xMaxExtent = max(xMaxExtent, extent)
if xMaxExtent == -INFINIT | Y:
# No glyph has outlines.
minLeftSideBearing = 0
| minRightSideBearing = 0
xMaxExtent = 0
self.advanceWidthMax = advanceWidthMax
self.minLeftSideBearing = minLeftSideBearing
self.minRightSideBearing = minRightSideBearing
self.xMaxExtent = xMaxExtent
else:
# XXX CFF recalc...
pass
def toXML(self, writer, ttFont):
formatstring, names, fixes = sstruct.getformat(hheaFormat)
for name in names:
value = getattr(self, name)
writer.simpletag(name, value=value)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
setattr(self, name, safeEval(attrs["value"]))
|
DannyLee1991/article_cosine_similarity | utils/log.py | Python | apache-2.0 | 873 | 0.00578 | import time
def progress(index, size, for | _what='当前进度', step=10):
block_size = int(size / step)
if index % block_size == 0:
crt = int(index / block_size)
print('%s ==> [%d / %d]' % (for_what, crt, step))
def log_time():
def _log_time(func):
# func()
def wrapper(*args, **kwargs):
print("start")
start_time = time.time()
result = func() if len(args) == len(kwargs) == 0 else func(*args, **kwargs)
end_time = time.t | ime()
cost_time = end_time - start_time
print("[%s] cost time -> %s" % (func.__name__, cost_time))
return result
return wrapper
return _log_time
def line(log_str, style='-'):
print(style * 12 + str(log_str) + style * 12)
def block(style="-",w=100,h=5):
for _ in range(h):
print(style*w)
|
safwanrahman/drf-spirit | drf_spirit/serializers.py | Python | mit | 975 | 0.001026 | from rest_framework.serializers import ModelSerializer, SerializerMethodField
from .fields import UserReadOnlyFiled
from .models import Topic, Category, Comment
from .relations import PresentableSlugRelatedField
class CategorySerializer(ModelSerializer):
class Meta:
model = Category
fields = '__all__'
class CategoryLiteSerializer(ModelSerializer):
class Meta:
model = Category
fields = ('title', 'slug', 'color')
class TopicSerializer(ModelSerializer):
user = UserReadOnlyFiled()
category = PresentableSlugRelatedField(queryset=Category.objects.all(),
presentation_serializer=CategoryLiteSerializer,
slug_field='slug')
class Meta:
model = Topic
fields = '__al | l__'
class CommentSerializer(ModelSerializer):
user = UserReadOnlyFiled()
class Meta:
model = Comment
fields = | '__all__'
|
DiplomadoACL/problemasenclase | Problema2/problema2crocha.py | Python | lgpl-3.0 | 1,040 | 0.04845 | #Hecho en python 3.5
from gutenberg.acqui | re import load_etext
from gutenberg.cleanup import strip_headers
librosCodigo = {"Francés":[13735,13808],"Español":[24925,15027],"Portugés":[14904,16384],"Inglés":[10422,1013]}
dic_idiomas={}
#hola dos
for idioma in librosCodigo.keys():
| diccionario_largo_palabras={}
for indeCo in librosCodigo[idioma]:
texto= strip_headers(load_etext(indeCo))
dic_idiomas[idioma]= diccionario_largo_palabras
for caracter_especial in ['"',"...","¿","?","=","_","[","]","(",")",",",".",":",";","!","¡","«","»","*","~","' "," '","- "," -","--"]:
texto=texto.replace(caracter_especial," ")
palabras=texto.split()
for palabra in palabras:
largo_palabra = len(palabra)
if largo_palabra in diccionario_largo_palabras:
diccionario_largo_palabras[largo_palabra] = diccionario_largo_palabras[largo_palabra]+1
else:
diccionario_largo_palabras[largo_palabra]= 1
print (dic_idiomas)
|
open-mmlab/mmdetection | configs/sparse_rcnn/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco.py | Python | apache-2.0 | 853 | 0 | _base_ = './sparse_rcnn_r50_fpn_1x_coco.py'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
min_values = (480, 512, 544, 576 | , 608, 640, 672, 704, 736, 768, 800)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, value) for value in min_values],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict( | type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
data = dict(train=dict(pipeline=train_pipeline))
lr_config = dict(policy='step', step=[27, 33])
runner = dict(type='EpochBasedRunner', max_epochs=36)
|
mirrorcoder/paramiko | paramiko/util.py | Python | lgpl-2.1 | 8,572 | 0.00035 | # Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
Useful functions used by the rest of paramiko.
"""
from __future__ import generators
import errno
import sys
import struct
import traceback
import threading
import logging
from paramiko.common import DEBUG, zero_byte, xffffffff, max_byte
from paramiko.py3compat import PY2, long, byte_chr, byte_ord, b
from paramiko.config import SSHConfig
def inflate_long(s, always_positive=False):
"""turns a normalized byte string into a long-int
(adapted from Crypto.Util.number)"""
out = long(0)
negative = 0
if not always_positive and (len(s) > 0) and (byte_ord(s[0]) >= | 0x80):
negative = 1
if len(s) % 4:
filler = zero_byte
if negative:
filler = max_byte
# never convert this to ``s +=`` because this is a string, not a number
# noinspection PyAugmentAssignment
s = filler * (4 - len(s) % 4) + s
for i in range(0, len(s), 4):
out = (out << 32) + struct.unpack(">I", s[i : | i + 4])[0]
if negative:
out -= long(1) << (8 * len(s))
return out
deflate_zero = zero_byte if PY2 else 0
deflate_ff = max_byte if PY2 else 0xff
def deflate_long(n, add_sign_padding=True):
"""turns a long-int into a normalized byte string
(adapted from Crypto.Util.number)"""
# after much testing, this algorithm was deemed to be the fastest
s = bytes()
n = long(n)
while (n != 0) and (n != -1):
s = struct.pack(">I", n & xffffffff) + s
n >>= 32
# strip off leading zeros, FFs
for i in enumerate(s):
if (n == 0) and (i[1] != deflate_zero):
break
if (n == -1) and (i[1] != deflate_ff):
break
else:
# degenerate case, n was either 0 or -1
i = (0,)
if n == 0:
s = zero_byte
else:
s = max_byte
s = s[i[0] :]
if add_sign_padding:
if (n == 0) and (byte_ord(s[0]) >= 0x80):
s = zero_byte + s
if (n == -1) and (byte_ord(s[0]) < 0x80):
s = max_byte + s
return s
def format_binary(data, prefix=""):
x = 0
out = []
while len(data) > x + 16:
out.append(format_binary_line(data[x : x + 16]))
x += 16
if x < len(data):
out.append(format_binary_line(data[x:]))
return [prefix + line for line in out]
def format_binary_line(data):
left = " ".join(["{:02X}".format(byte_ord(c)) for c in data])
right = "".join(
[".{:c}..".format(byte_ord(c))[(byte_ord(c) + 63) // 95] for c in data]
)
return "{:50s} {}".format(left, right)
def safe_string(s):
out = b""
for c in s:
i = byte_ord(c)
if 32 <= i <= 127:
out += byte_chr(i)
else:
out += b("%{:02X}".format(i))
return out
def bit_length(n):
try:
return n.bit_length()
except AttributeError:
norm = deflate_long(n, False)
hbyte = byte_ord(norm[0])
if hbyte == 0:
return 1
bitlen = len(norm) * 8
while not (hbyte & 0x80):
hbyte <<= 1
bitlen -= 1
return bitlen
def tb_strings():
return "".join(traceback.format_exception(*sys.exc_info())).split("\n")
def generate_key_bytes(hash_alg, salt, key, nbytes):
"""
Given a password, passphrase, or other human-source key, scramble it
through a secure hash into some keyworthy bytes. This specific algorithm
is used for encrypting/decrypting private key files.
:param function hash_alg: A function which creates a new hash object, such
as ``hashlib.sha256``.
:param salt: data to salt the hash with.
:type salt: byte string
:param str key: human-entered password or passphrase.
:param int nbytes: number of bytes to generate.
:return: Key data `str`
"""
keydata = bytes()
digest = bytes()
if len(salt) > 8:
salt = salt[:8]
while nbytes > 0:
hash_obj = hash_alg()
if len(digest) > 0:
hash_obj.update(digest)
hash_obj.update(b(key))
hash_obj.update(salt)
digest = hash_obj.digest()
size = min(nbytes, len(digest))
keydata += digest[:size]
nbytes -= size
return keydata
def load_host_keys(filename):
"""
Read a file of known SSH host keys, in the format used by openssh, and
return a compound dict of ``hostname -> keytype ->`` `PKey
<paramiko.pkey.PKey>`. The hostname may be an IP address or DNS name. The
keytype will be either ``"ssh-rsa"`` or ``"ssh-dss"``.
This type of file unfortunately doesn't exist on Windows, but on posix,
it will usually be stored in ``os.path.expanduser("~/.ssh/known_hosts")``.
Since 1.5.3, this is just a wrapper around `.HostKeys`.
:param str filename: name of the file to read host keys from
:return:
nested dict of `.PKey` objects, indexed by hostname and then keytype
"""
from paramiko.hostkeys import HostKeys
return HostKeys(filename)
def parse_ssh_config(file_obj):
"""
Provided only as a backward-compatible wrapper around `.SSHConfig`.
"""
config = SSHConfig()
config.parse(file_obj)
return config
def lookup_ssh_host_config(hostname, config):
"""
Provided only as a backward-compatible wrapper around `.SSHConfig`.
"""
return config.lookup(hostname)
def mod_inverse(x, m):
# it's crazy how small Python can make this function.
u1, u2, u3 = 1, 0, m
v1, v2, v3 = 0, 1, x
while v3 > 0:
q = u3 // v3
u1, v1 = v1, u1 - v1 * q
u2, v2 = v2, u2 - v2 * q
u3, v3 = v3, u3 - v3 * q
if u2 < 0:
u2 += m
return u2
_g_thread_ids = {}
_g_thread_counter = 0
_g_thread_lock = threading.Lock()
def get_thread_id():
global _g_thread_ids, _g_thread_counter, _g_thread_lock
tid = id(threading.currentThread())
try:
return _g_thread_ids[tid]
except KeyError:
_g_thread_lock.acquire()
try:
_g_thread_counter += 1
ret = _g_thread_ids[tid] = _g_thread_counter
finally:
_g_thread_lock.release()
return ret
def log_to_file(filename, level=DEBUG):
"""send paramiko logs to a logfile,
if they're not already going somewhere"""
logger = logging.getLogger("paramiko")
if len(logger.handlers) > 0:
return
logger.setLevel(level)
f = open(filename, "a")
handler = logging.StreamHandler(f)
frm = "%(levelname)-.3s [%(asctime)s.%(msecs)03d] thr=%(_threadid)-3d"
frm += " %(name)s: %(message)s"
handler.setFormatter(logging.Formatter(frm, "%Y%m%d-%H:%M:%S"))
logger.addHandler(handler)
# make only one filter object, so it doesn't get applied more than once
class PFilter(object):
def filter(self, record):
record._threadid = get_thread_id()
return True
_pfilter = PFilter()
def get_logger(name):
logger = logging.getLogger(name)
logger.addFilter(_pfilter)
return logger
def retry_on_signal(function):
"""Retries function until it doesn't raise an EINTR error"""
while True:
try:
return function()
except EnvironmentError as e:
if e.errno != errno.EINTR:
raise
def constant_time_bytes_eq(a, b):
if len(a) != len(b):
return False
res = 0
# noinspection |
jabooth/menpodetect | menpodetect/ffld2/detect.py | Python | bsd-3-clause | 5,711 | 0.0007 | from __future__ import division
from functools import partial
from pathlib import Path
from menpo.base import MenpoMissingDependencyError
try:
from cyffld2 import (load_model, detect_objects,
get_frontal_face_mixture_model)
except ImportError:
raise MenpoMissingDependencyError('cyffld2')
from menpodetect.detect import detect
from menpodetect.compatibility import STRING_TYPES
from .conversion import pointgraph_from_rect, ensure_channel_axis
class _ffld2_detect(object):
r"""
A utility callable that allows the caching of an ffld2 detector.
This callable is important for presenting the correct parameters to the
user. It also marshalls the return type of the detector back to
menpo.shape.PointDirectedGraph.
Parameters
----------
model : `Path` or `str` or `cyffld2.FFLDMixture`
Either a path to an `cyffld2.FFLDMixture` or the detector itself.
Raises
------
ValueError
If a path was provided and it does not exist.
"""
def __init__(self, model):
if isinstance(model, STRING_TYPES) or isinstance(model, Path):
m_path = Path(model)
if not Path(m_path).exists():
raise ValueError('Model {} does not exist.'.format(m_path))
model = load_model(str(m_path))
self._ffld2_model = model
def __call__(self, uint8_image, padding=6, interval=5, threshold=0.5,
overlap=0.3):
r"""
Perform a detection using the cached ffld2 detector.
Parameters
----------
uint8_image : `ndarray`
A Greyscale or RGB image.
padding : `int`, optional
Amount of zero padding in HOG cells
interval : `int`, optional
Number of levels per octave in the HOG pyramid
threshold : `double`
Minimum detection threshold. Detections with a score less than this
value are not returned. Values can be negative.
overlap : `double`, optional
Minimum overlap in in latent positive search and
non-maxima suppression.
As discussed in the Face Detection Without Bells and Whistles paper,
a sensible value for overlap is 0.3
Returns
------
bounding_boxes : `list` of `menpo.shape.PointDirectedGraph`
The detected objects.
"""
# Add the channel to a greyscale image.
uint8_image = ensure_channel_axis(uint8_image)
rects = detect_objects(self._ffld2_model, uint8_image,
padding=padding, interval=interval,
threshold=threshold, overlap=overlap)
return [pointgraph_from_rect(r) for r in rects]
class FFLD2Detector(object):
r"""
A generic f | fld2 detector.
Wraps an ffld2 object detector inside the menpodetect framework and
provides a clean interface to expose the ffld2 arguments.
"""
def __init__(self, model):
self._detector = _ffld2_detect(model)
def __call__(self, image, greyscale=True, image_diagonal=None,
group_prefix='ffld2', padding=6, interval=5, threshold=0.5,
overlap=0.3):
r"""
Perform a dete | ction using the cached ffdl2 detector.
The detections will also be attached to the image as landmarks.
Parameters
----------
image : `menpo.image.Image`
A Menpo image to detect. The bounding boxes of the detected objects
will be attached to this image.
greyscale : `bool`, optional
Whether to convert the image to greyscale or not.
image_diagonal : `int`, optional
The total size of the diagonal of the image that should be used for
detection. This is useful for scaling images up and down for
detection.
group_prefix : `str`, optional
The prefix string to be appended to each each landmark group that is
stored on the image. Each detection will be stored as group_prefix_#
where # is a count starting from 0.
padding : `int`, optional
Amount of zero padding in HOG cells
interval : `int`, optional
Number of levels per octave in the HOG pyramid
threshold : `double`, optional
Minimum detection threshold. Detections with a score less than this
value are not returned. Values can be negative.
overlap : `double`, optional
Minimum overlap in in latent positive search and
non-maxima suppression.
As discussed in the Face Detection Without Bells and Whistles paper,
a sensible value for overlap is 0.3
Returns
------
bounding_boxes : `list` of `menpo.shape.PointDirectedGraph`
The detected objects.
"""
detect_partial = partial(self._detector, padding=padding,
interval=interval, threshold=threshold,
overlap=overlap)
return detect(detect_partial, image, greyscale=greyscale,
image_diagonal=image_diagonal, group_prefix=group_prefix)
def load_ffld2_frontal_face_detector():
r"""
Load the ffld2 frontal face detector. This detector is the DPM baseline
provided from [1]_.
Returns
-------
detector : FFLD2Detector
The frontal face detector.
References
----------
.. [1] M. Mathias and R. Benenson and M. Pedersoli and L. Van Gool
Face detection without bells and whistles
ECCV 2014
"""
return FFLD2Detector(get_frontal_face_mixture_model())
|
UK992/servo | tests/wpt/web-platform-tests/tools/third_party/html5lib/html5lib/tests/test_sanitizer.py | Python | mpl-2.0 | 5,540 | 0.002527 | from __future__ import absolute_import, division, unicode_literals
from html5lib import constants, parseFragment, serialize
from html5lib.filters import sanitizer
def runSanitizerTest(_, expected, input):
parsed = parseFragment(expected)
expected = serialize(parsed,
omit_optional_tags=False,
use_trailing_solidus=True,
space_before_trailing_solidus=False,
quote_attr_values="always",
quote_char='"',
alphabetical_attributes=True)
assert expected == sanitize_html(input)
def sanitize_html(stream):
parsed = parseFragment(stream)
serialized = serialize(parsed,
sanitize=True,
omit_optional_tags=False,
use_trailing_solidus=True,
space_before_trailing_solidus=False,
quote_attr_values="always",
quote_char='"',
alphabetical_attributes=True)
return serialized
def test_should_handle_astral_plane_characters():
sanitized = sanitize_html("<p>𝒵 𝔸</p>")
expected = '<p>\U0001d4b5 \U0001d538</p>'
assert expected == sanitized
def test_should_allow_relative_uris():
sanitized = sanitize_html('<p><a href="/example.com"></a></p>')
expected = '<p><a href="/example.com"></a></p>'
assert expected == sanitized
def test_invalid_data_uri():
sanitized = sanitize_html('<audio controls="" src="data:foobar"></audio>')
expected = '<audio controls></audio>'
assert expected == sanitized
def test_invalid_ipv6_url():
sanitized = sanitize_html('<a href="h://]">')
expected = "<a></a>"
assert expected == sanitized
def test_data_uri_disallowed_type():
sanitized = sanitize_html('<audio controls="" src="data:text/html,<html>"></audio>')
expected = "<audio controls></audio>"
assert expected == sanitized
def test_sanitizer():
for ns, tag_name in sanitizer.allowed_elements:
if ns != constants.namespaces["html"]:
continue
if tag_name in ['caption', 'col', 'colgroup', 'optgroup', 'option', 'table', 'tbody', 'td',
'tfoot', 'th', 'thead', 'tr', 'select']:
continue # TODO
if tag_name == 'image':
yield (runSanitizerTest, "test_should_allow_%s_tag" % tag_name,
"<img title=\"1\"/>foo <bad>bar</bad> baz",
"<%s title='1'>foo <bad>bar</bad> baz</%s>" % (tag_name, tag_name))
elif tag_name == 'br':
yield (runSanitizerTest, "test_should_allow_%s_tag" % tag_name,
"<br title=\"1\"/>foo <bad>bar</bad> baz<br/>",
"<%s title='1'>foo <bad>bar</bad> baz</%s>" % (tag_name, tag_name))
elif tag_name in constants.voidElements:
yield (runSanitizerTest, "test_should_allow_%s_tag" % tag_name,
"<%s title=\"1\"/>foo <bad>bar</bad> baz" % tag_name,
"<%s title='1'>foo <bad>bar</bad> baz</%s>" % (tag_name, tag_name))
else:
yield (runSanitizerTest, "test_should_allow_%s_tag" % tag_name,
"<%s title=\"1\">foo <bad>bar</bad> baz</%s>" % (tag_name, tag_name),
"<%s title='1'>foo <bad>bar</bad> baz</%s>" % (tag_name, tag_name))
for ns, attribute_name in sanitizer.allowed_attributes:
if ns is not None:
continue
if attribute_name != attribute_name.lower():
| continue # TODO
if attribute_name == 'style':
continue
attribute_value = 'foo'
if attribute_name in sanitizer.attr_val_is_uri:
attribute_value = '%s://sub.domain.tld/path/object.ext' % sanitizer.allowed_protocols[0]
yield (runSanitizerTest, "test_should_allow_%s_attribute" % attribute_name,
"<p %s=\"%s\">foo <bad>bar</bad> baz</p>" % (attribute_name, attribute | _value),
"<p %s='%s'>foo <bad>bar</bad> baz</p>" % (attribute_name, attribute_value))
for protocol in sanitizer.allowed_protocols:
rest_of_uri = '//sub.domain.tld/path/object.ext'
if protocol == 'data':
rest_of_uri = 'image/png;base64,aGVsbG8gd29ybGQ='
yield (runSanitizerTest, "test_should_allow_uppercase_%s_uris" % protocol,
"<img src=\"%s:%s\">foo</a>" % (protocol, rest_of_uri),
"""<img src="%s:%s">foo</a>""" % (protocol, rest_of_uri))
for protocol in sanitizer.allowed_protocols:
rest_of_uri = '//sub.domain.tld/path/object.ext'
if protocol == 'data':
rest_of_uri = 'image/png;base64,aGVsbG8gd29ybGQ='
protocol = protocol.upper()
yield (runSanitizerTest, "test_should_allow_uppercase_%s_uris" % protocol,
"<img src=\"%s:%s\">foo</a>" % (protocol, rest_of_uri),
"""<img src="%s:%s">foo</a>""" % (protocol, rest_of_uri))
def test_lowercase_color_codes_in_style():
sanitized = sanitize_html("<p style=\"border: 1px solid #a2a2a2;\"></p>")
expected = '<p style=\"border: 1px solid #a2a2a2;\"></p>'
assert expected == sanitized
def test_uppercase_color_codes_in_style():
sanitized = sanitize_html("<p style=\"border: 1px solid #A2A2A2;\"></p>")
expected = '<p style=\"border: 1px solid #A2A2A2;\"></p>'
assert expected == sanitized
|
plotly/python-api | packages/python/plotly/plotly/validators/bar/_y.py | Python | mit | 480 | 0 | import _plotly_utils.basevalidators
class YValidator(_plotly_utils.basevalidators.DataArrayValidato | r):
def __init__(self, plotly_name="y", parent_name="bar", **kwargs):
super(YValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
anim=kwargs.pop("anim", True),
edit_type=kwargs.pop("edit_type", "calc+clear | AxisTypes"),
role=kwargs.pop("role", "data"),
**kwargs
)
|
catkin/catkin_tools | catkin_tools/execution/events.py | Python | apache-2.0 | 2,304 | 0.001302 | # Copyright 2016 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
class ExecutionEvent(object):
"""Structure for events generated by the Executor.
Events can be jobs starting/finishing, commands starting/failing/finishing,
commands producing output (each line is an event), or when the executor
quits or fails.
"""
# TODO: Make this a map of ID -> fields
EVENT_IDS = [
'JOB_STATUS', # A report of running job states
'QUEUED_JOB', # A job has been queued to be executed
'STARTED_JOB', # A job has started to be executed
'FINISHED_JOB', # A job has finished executing (succeeded or failed)
'ABANDONED_JOB', # A job has been abandoned for some reason
'STARTED_STAGE', # A job stage has started to be executed
'FINISHED_STAGE', # A job stage has finished executing (succeeded or failed)
'STAGE_PROGRESS', # A job stage has executed partially
'STDOUT', # A status message from a job
'STDERR', # A warning or error message from a job
'SUBPROCESS', # A subprocess has been created
'MESSAGE'
]
def __init__(self, event | _id, **kwargs):
"""Create a new event.
:param event_id: One of the valid EVENT_IDS
:param **kwargs: The additional data to be passed along with this event.
"""
# Store the time this event was generated
self.time = time.time()
# Make sure the event ID is valid
if event_id not in ExecutionEvent.EVENT_IDS:
print(ExecutionEvent.EVENT_IDS)
raise ValueError("The event ID %s is not a valid executor event." % event_id)
| # Store the event data
self.event_id = event_id
self.data = kwargs
|
dvdmgl/django-pg-fts | pg_fts/utils.py | Python | bsd-2-clause | 1,343 | 0.000745 |
class TranslationDictionary(object):
"""
TranslationDictionary
"""
def __init__(self, dictionaries=None, default=None):
self.dictionaries = dictionaries or {
'pt': ('portuguese', _('Portuguese')),
'en': ('english', _('English')),
'es': | ('spanish', _('Spanish')),
'de': ('german', _('German')),
'da': ('danis | h', _('Danish')),
'nl': ('dutch', _('Dutch')),
'fi': ('finnish', _('Finnish')),
'fr': ('french', _('French')),
'hu': ('hungarian', _('Hungarian')),
'it': ('italian', _('Italian')),
'nn': ('norwegian', _('Norwegian')),
'ro': ('romanian', _('Romanian')),
'ru': ('russian', _('Russian')),
'sv': ('swedish', _('Swedish')),
'tr': ('turkish', _('Turkish')),
}
self.default = default or ('simple', _('Simple'))
def get_dictionary_tuple(self, language):
return self.dictionaries.get(language.split('-')[0], self.default)
def get_dictionary_pg(self, language):
return self.get_dictionary_tuple(language)[0]
def get_dictionaries(self, languages=None):
if languages:
return tuple(self.get_dictionary(l) for l in self.dictionaries)
return self.dictionaries.values()
|
crcresearch/osf.io | tests/test_serializers.py | Python | apache-2.0 | 21,918 | 0.002327 | # -*- coding: utf-8 -* | -
import mock
import datetime as dt
from nose.tools import * # noqa (PEP8 asserts)
import pytest
from osf_tests.factories import (
ProjectFactory,
U | serFactory,
RegistrationFactory,
NodeFactory,
CollectionFactory,
)
from osf.models import NodeRelation
from tests.base import OsfTestCase, get_default_metaschema
from framework.auth import Auth
from website.project.views.node import _view_project, _serialize_node_search, _get_children, _get_readable_descendants
from website.views import serialize_node_summary
from website.profile import utils
from website import filters, settings
from website.util import permissions
pytestmark = pytest.mark.django_db
class TestUserSerializers(OsfTestCase):
def test_serialize_user(self):
master = UserFactory()
user = UserFactory()
master.merge_user(user)
d = utils.serialize_user(user)
assert_equal(d['id'], user._primary_key)
assert_equal(d['url'], user.url)
assert_equal(d.get('username', None), None)
assert_equal(d['fullname'], user.fullname)
assert_equal(d['registered'], user.is_registered)
assert_equal(d['absolute_url'], user.absolute_url)
assert_equal(d['date_registered'], user.date_registered.strftime('%Y-%m-%d'))
assert_equal(d['active'], user.is_active)
def test_serialize_user_merged(self):
master = UserFactory()
user = UserFactory()
master.merge_user(user)
d = utils.serialize_user(user, full=True)
assert_true(d['is_merged'])
assert_equal(d['merged_by']['url'], user.merged_by.url)
assert_equal(d['merged_by']['absolute_url'], user.merged_by.absolute_url)
def test_serialize_user_full(self):
user = UserFactory()
ProjectFactory(creator=user, is_public=False)
NodeFactory(creator=user)
ProjectFactory(creator=user, is_public=True)
CollectionFactory(creator=user)
d = utils.serialize_user(user, full=True, include_node_counts=True)
profile_image_url = filters.profile_image_url(settings.PROFILE_IMAGE_PROVIDER,
user,
use_ssl=True,
size=settings.PROFILE_IMAGE_LARGE)
assert_equal(d['id'], user._primary_key)
assert_equal(d['url'], user.url)
assert_equal(d.get('username'), None)
assert_equal(d['fullname'], user.fullname)
assert_equal(d['registered'], user.is_registered)
assert_equal(d['profile_image_url'], profile_image_url)
assert_equal(d['absolute_url'], user.absolute_url)
assert_equal(d['date_registered'], user.date_registered.strftime('%Y-%m-%d'))
projects = [
node
for node in user.contributed
if node.category == 'project'
and not node.is_registration
and not node.is_deleted
]
public_projects = [p for p in projects if p.is_public]
assert_equal(d['number_projects'], len(projects))
assert_equal(d['number_public_projects'], len(public_projects))
class TestNodeSerializers(OsfTestCase):
# Regression test for #489
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/489
def test_serialize_node_summary_private_node_should_include_id_and_primary_boolean_reg_and_fork(self):
user = UserFactory()
# user cannot see this node
node = ProjectFactory(is_public=False)
result = serialize_node_summary(
node, auth=Auth(user),
primary=True,
)
# serialized result should have id and primary
assert_equal(result['id'], node._primary_key)
assert_true(result['primary'], True)
assert_equal(result['is_registration'], node.is_registration)
assert_equal(result['is_fork'], node.is_fork)
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/668
def test_serialize_node_summary_for_registration_uses_correct_date_format(self):
reg = RegistrationFactory()
res = serialize_node_summary(reg, auth=Auth(reg.creator))
assert_equal(res['registered_date'],
reg.registered_date.strftime('%Y-%m-%d %H:%M UTC'))
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/858
def test_serialize_node_summary_private_registration_should_include_is_registration(self):
user = UserFactory()
# non-contributor cannot see private registration of public project
node = ProjectFactory(is_public=True)
reg = RegistrationFactory(project=node, user=node.creator)
res = serialize_node_summary(reg, auth=Auth(user))
# serialized result should have is_registration
assert_true(res['is_registration'])
# https://openscience.atlassian.net/browse/OSF-4618
def test_get_children_only_returns_child_nodes_with_admin_permissions(self):
user = UserFactory()
admin_project = ProjectFactory()
admin_project.add_contributor(user, auth=Auth(admin_project.creator),
permissions=permissions.expand_permissions(permissions.ADMIN))
admin_project.save()
admin_component = NodeFactory(parent=admin_project)
admin_component.add_contributor(user, auth=Auth(admin_component.creator),
permissions=permissions.expand_permissions(permissions.ADMIN))
admin_component.save()
read_and_write = NodeFactory(parent=admin_project)
read_and_write.add_contributor(user, auth=Auth(read_and_write.creator),
permissions=permissions.expand_permissions(permissions.WRITE))
read_and_write.save()
read_only = NodeFactory(parent=admin_project)
read_only.add_contributor(user, auth=Auth(read_only.creator),
permissions=permissions.expand_permissions(permissions.READ))
read_only.save()
non_contributor = NodeFactory(parent=admin_project)
components = _get_children(admin_project, Auth(user))
assert_equal(len(components), 1)
def test_serialize_node_summary_private_fork_should_include_is_fork(self):
user = UserFactory()
# non-contributor cannot see private fork of public project
node = ProjectFactory(is_public=True)
consolidated_auth = Auth(user=node.creator)
fork = node.fork_node(consolidated_auth)
res = serialize_node_summary(
fork, auth=Auth(user),
primary=True,
)
# serialized result should have is_fork
assert_true(res['is_fork'])
def test_serialize_node_summary_private_fork_private_project_should_include_is_fork(self):
# contributor on a private project
user = UserFactory()
node = ProjectFactory(is_public=False)
node.add_contributor(user)
# contributor cannot see private fork of this project
consolidated_auth = Auth(user=node.creator)
fork = node.fork_node(consolidated_auth)
res = serialize_node_summary(
fork, auth=Auth(user),
primary=True,
)
# serialized result should have is_fork
assert_false(res['can_view'])
assert_true(res['is_fork'])
def test_serialize_node_summary_child_exists(self):
user = UserFactory()
parent_node = ProjectFactory(creator=user)
linked_node = ProjectFactory(creator=user)
result = _view_project(parent_node, Auth(user))
assert_equal(result['node']['child_exists'], False)
parent_node.add_node_link(linked_node, Auth(user), save=True)
result = _view_project(parent_node, Auth(user))
assert_equal(result['node']['child_exists'], False)
child_component = NodeFactory(creator=user, parent=parent_node)
result = _view_project(parent_node, Auth(user))
assert_equal(result['node']['child_exists'], True)
def test_serialize_node_search_returns_only_visible_contribu |
clarete/storagelib | storagelib.py | Python | agpl-3.0 | 10,480 | 0.001527 | # -*- Coding: utf-8; Mode: Python -*-
#
# storagelib.py - A simple and extensible storage library
#
# Copyright (C) 2010 Lincoln de Sousa <lincoln@comum.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""A simple storage lib
This library aims to provide a simple interface that receives a file
(or its data), store it and then return the URI to reach the stored
resource.
It is written to be simple but extensible. It should be easy to add a
new type of storage just by writting a simple plugin.
Another thing we had as goal is to provide a way for the system
administrator to manager more than one repos with priorities and
weights.
"""
import sys
import os
from datetime import datetime
from random import randint, choice
from ConfigParser import ConfigParser
_CHARS = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
_STORAGES = {}
_NAME_POLICIES = {}
def register_storage_type(klass):
"""Register a storage class in the _STORAGES dictionary
It also fills the `extra_attrs' attribute of the given klass to
make it possible to load these parameters from the config file.
"""
if not hasattr(klass, 'extra_attrs'):
klass.extra_attrs = []
for key in dir(klass):
val = getattr(klass, key)
if isinstance(val, Attr):
klass.extra_attrs.append(key)
setattr(klass, key, getattr(klass, key).default)
_STORAGES[klass.type_] = klass
def np_random(path):
"""Creates a random name for a file being stored
"""
npath = os.path.join(
os.path.dirname(path),
''.join(choice(_CHARS) for x in range(10)))
while os.path.exists(npath):
npath = np_random(npath)
return npath
_NAME_POLICIES['random'] = np_random
def np_preserve(path):
"""Tries to preserve the name of a file but when it already
exists, we add the date
"""
npath = path
while os.path.exists(npath):
npath = path + '.'
npath += datetime.now().strftime('%Y%m%d-%H%M%S')
return npath
_NAME_POLICIES['preserve'] = np_preserve
def np_preserve_ext(path):
"""Generates a random name but preserves the extension of the
given file.
"""
ext = os.path.splitext(path)[1]
npath = os.path.join(
os.path.dirname(path),
''.join(choice(_CHARS) for x in range(10)))
npath += ext
while os.path.exists(npath):
npath = np_preserve_ext(npath)
return npath
_NAME_POLICIES['preserve_ext'] = np_preserve_ext
class Attr(object):
"""A helper class to mark attributes of plugins.
Instances of this class should be used to mark attributes that
will be read from the general config file.
"""
def __init__(self, default=None):
self.default = default
class StorageMeta(type):
"""Storage metaclass
This metaclass has two goals:
* Register new types of storages.
* List attributes of new types of storages to be able to use the
main config file to store them.
"""
def __new__(mcs, name, bases, attrs):
klass = type.__new__(mcs, name, bases, attrs)
register_storage_type(klass)
return klass
class BaseStorage(object):
"""A storage representation.
This class holds all basic (and required) attributes that a
storage must have.
"""
__metaclass__ = StorageMeta
type_ = 'local'
name = None
dest = None
base_uri = None
name_policy = None
structure = None
priority = 0
weight = 0
def get_name(self, finst):
"""Gets a name for the file being sored.
The name is not actually created/choosen by this method. It
only calls the proper name policy giving the original name as
argument.
"""
if isinstance(finst, basestring):
fname = '__memory__'
else:
fname = getattr(finst, 'filename', finst.name)
fname = os.path.basename(fname)
fpath = os.path.join(self.dest, fname)
npolicy = _NAME_POLICIES[self.name_policy]
return npolicy(fpath)
def get_content(self, finst):
"""Gets the content of the file-like or buffer
"""
if hasattr(finst, 'read'):
return finst.read()
else:
return finst
def setup(self):
"""Tries to setup everything needed to ensure that this
storage is working. Returns True if everything is ok and False
otherwise.
"""
if not os.access(self.dest, os.W_OK):
return False
return True
def store(self, finst):
"""Actually stores the file.
"""
name = self.get_name(finst)
content = self.get_content(finst)
open(name, 'w').write(content)
# Time to say to the user where's the uploaded file
new_name = os.path.basename(name)
if not self.base_uri.endswith('/'):
self.base_uri += '/'
return self.base_uri + new_name
def cmp_storages(repo1, repo2):
"""A function used to do the first sort at the repos list putting
all repositories with the lower priority first.
"""
if repo1.priority == repo2.priority:
return int(repo1.weight) - int(repo2.weight)
else:
return int(repo1.priority) - int(repo2.priority)
class StorageContext(object):
"""Context to manage storages
This class looks for storages defined in a config file, sort them
using the same algorithm for sorting SRV records defined in the
RFC 2782.
"""
def __init__(self, cfg):
self.repo_list = []
self.parse_cfg(cfg)
self.sort_repos()
def parse_cfg(self, cfg_file):
"""Parses the config file looking for repositories
"""
cfg = ConfigParser()
cfg.read([cfg_file])
# Reading the Default section looking for the plugins entry
# and loading all of them.
if cfg.has_section('Default') and \
cfg.has_option('Default', 'plugins'):
plugins = cfg.get('Default', 'plugins').split(',')
for i in plugins:
module = __import__(i.strip(), globals(), fromlist='Storage')
register_storage_type(module.Storage)
for i in cfg.sections():
# We can't handle the Default secion as a storage
if i == 'Default':
continue
# The storage instance
storage = _STORAGES[cfg.get(i, 'type')]()
# reading attrs defined in BaseStorage
storage.name = i
storage.dest = cfg.get(i, 'dest')
storage.base_uri = cfg.get(i, 'base_uri')
storage.name_policy = cfg.get(i, 'name_policy')
storage.structure = cfg.get(i, 'structure')
if cfg.has_option(i, 'priority'):
storage.priority = cfg.getint(i, 'priority')
if cfg.has_option(i, 'weight'):
storage.weight = cfg.getint(i, 'weight')
# reading extra attrs, defined | by each storage, like ssh
for extra_attr in storage.extra_attrs:
if cfg.has_option(i, extra_attr):
setattr(storage, extra_attr, cfg.get(i, extra_attr))
self.repo_list.append(storage)
def sort_repos(self):
"""Sorts repositories in order of precedence
This sorts repositories using their `priority' and `weight'
fields, just like RFC 2782 spec suggests to SRV targets.
"""
self.repo_list.sort(cmp_ | storages)
# let's copy the sorted list above
unordered = |
danieltellez/career | career/models.py | Python | gpl-2.0 | 2,454 | 0.007335 | # -*- coding: utf-8 -*-
from django.contrib.auth.models import User
from django.db import models
from hvad.models import TranslatableModel, TranslatedFields
from hvad.manager import TranslationManager
class Quiz(TranslatableModel):
short_description = models.CharField(max_length=128)
translations = TranslatedFields(
title = models.CharField(max_length=256, null=True, blank=True),
description = models.TextField(null=True, blank=True)
)
def __unicode__(self):
return u'%s' % self.short_description
def get_questions(self):
return Question.objects.get_questions_for_quiz(self.id)
class Meta:
verbose_name_plural = 'Quizzes'
class Answer(TranslatableModel):
short_description = models.CharField(max_length=128)
translations = TranslatedFields(
description = models.TextField(null=True, blank=True)
)
class QuestionManager(Translatio | nManager):
def get_questions_for_quiz(self, quiz):
return self.get_query_set().filter(quiz=quiz).order_by('order')
class Question(TranslatableModel):
short_description = models.CharField(max_length=128)
quiz = models.ForeignKey('Quiz')
order = models.IntegerField(null=True, blank=True)
translations = TranslatedFields(
title = models.CharField(max_length=256, null=True, blank=True),
descript | ion = models.TextField(null=True, blank=True),
help_text = models.TextField(null=True, blank=True)
)
objects = QuestionManager()
class QuizToCareer(models.Model):
quiz = models.ForeignKey('Quiz')
career = models.ForeignKey('Career')
passed = models.BooleanField(default=False)
due_date = models.DateField(null=True, blank=True)
class Career(models.Model):
user = models.ForeignKey(User)
quizzes = models.ManyToManyField(Quiz, null=True, blank=True,
through='QuizToCareer')
class Student(models.Model):
user = models.ForeignKey(User)
def __unicode__(self):
return u'%s' % self.user.username
class Teacher(models.Model):
user = models.ForeignKey(User)
students = models.ManyToManyField('Student', null=True, blank=True,
through='StudentToTeacher')
def __unicode__(self):
return u'%s' % self.user.username
class StudentToTeacher(models.Model):
student = models.ForeignKey('Student')
teacher = models.ForeignKey('Teacher')
|
cosmoharrigan/matrix-entropy | main.py | Python | gpl-3.0 | 1,624 | 0.000616 | """
Python implementation of the matrix information measurement examples from the
StackExchange answer written by WilliamAHuber for
"Measuring entropy/ information/ patterns of a 2d binary matrix"
| http://stats.stackexchange.com/a/17556/43909
Copyright 2014 Cosmo Harrigan
This program is free software, distributed under the terms of the GNU LGPL v3.0
"""
__author__ = 'Cosmo Harrigan'
from matplotlib import pyplot
from neighborhood_functions import avg_components
from moving_window_filter im | port moving_window_filter
from calculate_profile import profile
# Function to apply
F = avg_components
# Define the matrices as input_matrices
from data import *
# Iterate over the input matrices
for m in range(0, len(input_matrices)):
active_matrix = input_matrices[m]
print("---------\nMatrix #{0}\n---------\n".format(m))
# Produce the filtered matrices at varying scales and the associated
# entropy "profiles"
matrices = []
for n in range(1, min(active_matrix.shape)):
output_matrix = moving_window_filter(matrix=active_matrix,
f=F,
neighborhood_size=n)
matrices.append(output_matrix)
subplot = pyplot.subplot(5, 4, m * 4 + n)
pyplot.axis('off')
pyplot.imshow(output_matrix,
interpolation='nearest',
cmap='Greys_r',
vmin=0,
vmax=1)
print("Neighborhood size = {0}\n{1}\n".format(n, output_matrix))
print("Profile:\n{0}\n".format(profile(matrices)))
pyplot.show()
|
olivierkes/manuskript | manuskript/exporter/pandoc/__init__.py | Python | gpl-3.0 | 3,708 | 0.001888 | #!/usr/bin/env python
# --!-- coding: utf8 --!--
import subprocess
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QCursor
from PyQt5.QtWidgets import qApp, QMessageBox
from manuskript.exporter.basic import basicExporter, basicFormat
from manuskript.exporter.pandoc.HTML import HTML
from manuskript.exporter.pandoc.PDF import PDF
from manuskript.exporter.pandoc.outputFormats import ePub, OpenDocument, DocX
from manuskript. | exporter.pandoc.plainText import reST, markdown, latex, OPML
from manuskript.functions import mainWindow
import logging
LOGGER = logging.getLogger(__name__)
class pandocExporter(basicExporter):
name = "Pandoc"
description = qApp.translate("Export", """<p>A universal docum | ent converter. Can be used to convert Markdown to a wide range of other
formats.</p>
<p>Website: <a href="http://www.pandoc.org">http://pandoc.org/</a></p>
""")
cmd = "pandoc"
absentTip = "Install pandoc to benefit from a wide range of export formats (DocX, ePub, PDF, etc.)"
absentURL = "http://pandoc.org/installing.html"
def __init__(self):
basicExporter.__init__(self)
self.exportTo = [
markdown(self),
latex(self),
HTML(self),
ePub(self),
OpenDocument(self),
DocX(self),
PDF(self),
reST(self),
OPML(self),
]
def version(self):
if self.isValid():
r = self.run(["--version"])
return r.split("\n")[0]
else:
return ""
def convert(self, src, args, outputfile=None):
if self.isValid() == 2:
run = self.cmd
elif self.isValid() == 1:
run = self.customPath
else:
LOGGER.error("No command for pandoc.")
return None
args = [run] + args
if outputfile:
args.append("--output={}".format(outputfile))
for name, col, var in [
("Title", 0, "title"),
("Subtitle", 1, "subtitle"),
("Serie", 2, ""),
("Volume", 3, ""),
("Genre", 4, ""),
("License", 5, ""),
("Author", 6, "author"),
("Email", 7, ""),
]:
item = mainWindow().mdlFlatData.item(0, col)
if var and item and item.text().strip():
args.append("--variable={}:{}".format(var, item.text().strip()))
# Add title metadata required for pandoc >= 2.x
title = "Untitled"
if mainWindow().mdlFlatData.item(0, 0):
title = mainWindow().mdlFlatData.item(0, 0).text().strip()
args.append("--metadata=title:{}".format(title))
qApp.setOverrideCursor(QCursor(Qt.WaitCursor))
p = subprocess.Popen(
args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
if not type(src) == bytes:
src = src.encode("utf-8") # assumes utf-8
stdout, stderr = p.communicate(src)
qApp.restoreOverrideCursor()
if stderr or p.returncode != 0:
err_type = "ERROR" if p.returncode != 0 else "WARNING"
err = "%s on export\n" % err_type \
+ "Return code: %d\n" % p.returncode \
+ "Command and parameters:\n%s\n" % p.args \
+ "Stderr content:\n" + stderr.decode("utf-8")
if p.returncode != 0:
LOGGER.error(err)
QMessageBox.critical(mainWindow().dialog, qApp.translate("Export", "Error"), err)
else:
LOGGER.warning(err)
return None
return stdout.decode("utf-8")
|
selective-inference/selective-inference | selectinf/sandbox/bayesian/estimator.py | Python | bsd-3-clause | 29,896 | 0.006456 | import numpy as np
import regreg.api as rr
from selection.randomized.glm import pairs_bootstrap_glm, bootstrap_cov
from selection.randomized.query import query
from selection.randomized.randomization import split
import functools
def pairs_bootstrap_glm(glm_loss,
active,
beta_full=None,
inactive=None,
scaling=1.,
solve_args={'min_its':50, 'tol':1.e-10}):
"""
pairs bootstrap of (beta_hat_active, -grad_inactive(beta_hat_active))
"""
X, Y = glm_loss.data
if beta_full is None:
beta_active = restricted_Mest(glm_loss, active, solve_args=solve_args)
beta_full = np.zeros(glm_loss.shape)
beta_full[active] = beta_active
else:
beta_active = beta_full[active]
X_active = X[:,active]
nactive = active.sum()
ntotal = nactive
if inactive is not None:
X_inactive = X[:,inactive]
ntotal += inactive.sum()
_bootW = np.diag(glm_loss.saturated_loss.hessian(X_active.dot(beta_active)))
_bootQ = X_active.T.dot(_bootW.dot(X_active))
_bootQinv = np.linalg.inv(_bootQ)
if inactive is not None:
_bootC = X_inactive.T.dot(_bootW.dot(X_active))
_bootI = _bootC.dot(_bootQinv)
else:
_bootI = None
nactive = active.sum()
if inactive is not None:
X_full = np.hstack([X_active,X_inactive])
beta_overall = np.zeros(X_full.shape[1])
beta_overall[:nactive] = beta_active
else:
X_full = X_active
beta_overall = beta_active
_boot_mu = lambda X_full, beta_overall: glm_loss.saturated_loss.mean_function(X_full.dot(beta_overall))
if ntotal > nactive:
observed = np.hstack([beta_active, -glm_loss.smooth_objective(beta_full, 'grad')[inactive]])
else:
observed = beta_active
# scaling is a lipschitz constant for a gradient squared
_sqrt_scaling = np.sqrt(scaling)
def _boot_score(X_full, Y, ntotal, _bootQinv, _bootI, nactive, _sqrt_scaling, beta_overall, indices):
X_star = X_full[indices]
Y_star = Y[indices]
score = X_star.T.dot(Y_star - _boot_mu(X_star, beta_overall))
result = np.zeros(ntotal)
result[:nactive] = _bootQinv.dot(score[:nactive])
if ntotal > nactive:
result[nactive:] = score[nactive:] - _bootI.dot(score[:nactive])
result[:nactive] *= _sqrt_scaling
result[nactive:] /= _sqrt_scaling
return result
observed[:nactive] *= _sqrt_scaling
observed[nactive:] /= _sqrt_scaling
return functools.partial(_boot_score, X_full, Y, ntotal, _bootQinv, _bootI, nactive, _sqrt_scaling, beta_overall), observed
def pairs_bootstrap_score(glm_loss,
active,
beta_active=None,
solve_args={'min_its':50, 'tol':1.e-10}):
"""
pairs bootstrap of (beta_hat_active, -grad_inactive(beta_hat_active))
"""
X, Y = glm_loss.data
if beta_active is None:
beta_active = restricted_Mest(glm_loss, active, solve_args=solve_args)
X_active = X[:,active]
_bootW = np.diag(glm_loss.saturated_loss.hessian(X_active.dot(beta_active)))
_boot_mu = lambda X_active, beta_active: glm_loss.saturated_loss.mean_function(X_active.dot(beta_active))
def _boot_score(X, Y, active, beta_active, indices):
X_star = X[indices]
Y_star = Y[indices]
score = -X_star.T.dot(Y_star - _boot_mu(X_star[:,active], beta_active))
return score
return functools.partial(_boot_score, X, Y, active, beta_active)
def set_alpha_matrix(glm_loss,
active,
beta_full=None,
inactive=None,
scaling=1.,
solve_args={'min_its': 50, 'tol': 1.e-10}):
X, Y = glm_loss.data
if beta_full is None:
beta_active = restricted_Mest(glm_loss, active, solve_args=solve_args)
beta_full = np.zeros(glm_loss.shape)
beta_full[active] = beta_active
else:
beta_active = beta_full[active]
X_active = X[:,active]
nactive = active.sum()
ntotal = nactive
if inactive is not None:
X_inactive = X[:,inactive]
ntotal += inactive.sum()
_W = np.diag(glm_loss.saturated_loss.hessian(X_active.dot(beta_active)))
_Q = X_active.T.dot(_W.dot(X_active))
_Qinv = np.linalg.inv(_Q)
nactive = active.sum()
if inactive is not None:
X_full = np.hstack([X_active, X_inactive])
beta_overall = np.zeros(X_full.shape[1])
beta_overall[:nactive] = beta_active
else:
X_full = X_active
beta_overall = beta_active
obs_residuals = Y - glm_loss.saturated_loss.mean_function(X_full.dot(beta_overall))
return np.dot(np.dot(_Qinv, X_active.T), np.diag(obs_residuals))
class M_estimator(query):
def __init__(self, loss, epsilon, penalty, randomization, solve_args={'min_its':50, 'tol':1.e-10}):
"""
Fits the logistic regression to a candidate active set, without penalty.
Calls the method bootstrap_covariance() to bootstrap the covariance matrix.
Computes $\bar{\beta}_E$ which is the restricted
M-estimator (i.e. subject to the constraint $\beta_{-E}=0$).
Parameters:
-----------
active: np.bool
The active set from fitting the logistic lasso
solve_args: dict
Arguments to be passed to regreg solver.
Returns:
--------
None
Notes:
------
Sets self._beta_unpenalized which will be used in the covariance matrix calculation.
Also computes Hessian of loss at restricted M-estimator as well as the bootstrap covariance.
"""
query.__init__(self, randomization)
(self.loss,
self.epsilon,
self.penalty,
self.randomization,
self.solve_args) = (loss,
epsilon,
penalty,
randomization,
solve_args)
# Methods needed for subclassing a query
def solve(self, scaling=1, solve_args={'min_its':20, 'tol':1.e-10}):
self.randomize()
(loss,
randomized_loss,
epsilon,
penalty,
randomization,
solve_args) = (self.loss,
self.randomized_loss,
self.epsilon,
self.penalty,
self.randomization,
self.solve_args)
# initial solution
problem = rr.simple_problem(randomized_loss, penalty)
self.initial_soln = problem.solve(**solve_args)
# find the active groups and their direction vectors
# as well as unpenalized groups
groups = np.unique(penalty.groups)
active_groups = np.zeros(len(groups), np.bool)
unpenalized_groups = np.zeros(len(groups), np.bool)
active_directions = []
active = np.zeros(loss.shape, np.bool)
unpenalized = np.zeros(loss.shape, np.bool)
initial_scalings = []
for i, g in enumerate(groups):
group = penalty.groups == g
active_groups[i] = (np.linalg.norm(self.initial_soln[group]) > 1.e-6 * penalty.weights[g]) and (penalty.weights[g] > 0)
unpenalized_groups[i] = (penalty.weights[g] == 0)
if active_groups[i]:
active[group] = True
z | = np.zeros(active.shape, np.float)
z[group] = self.initial_soln[group] / np.linalg.norm(self.initial_soln[group])
active_directions.append( | z)
initial_scalings.append(np.linalg.norm(self.initial_soln[group]))
if unpenalized_groups[i]:
unpenalized[group] = True
# solve the restricted problem
self._overall = active + unpenalized
self._inactive = ~self._overall
self._unpenalized = unpenalized
self._active_directions = np.array(active_directions).T
self._active_groups = np.ar |
plotly/plotly.py | packages/python/plotly/plotly/validators/bar/marker/colorbar/title/font/_color.py | Python | mit | 455 | 0 | import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name="color",
| parent_name="bar.marker.colorbar.title.font",
**kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent | _name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs
)
|
fabricehong/zim-desktop | zim/stores/xml.py | Python | gpl-2.0 | 3,065 | 0.028059 | # -*- coding: utf-8 -*-
# Copyright 2008 Jaap Karssenberg <jaap.karssenberg@gmail.com>
'''This module reads an XML file defining zim pages.
For now the only XML tags which are supported are 'section' and 'page'. The
'section' tag serves as a container for multiple pages. The 'page' tag serves
as a container for the page content plus any sub-pages. Each page should have
an attribute 'name' giving it's basename, so the file can look like this::
<section>
<page name="Foo">
Some text in page Foo
<page name="Bar">
This is text in page 'Foo:Bar'
</page>
</page>
</section>
We read the whole file to memory, which puts certain limits on
scalability.
'''
# FUTURE: This module does not support attachments in the xml data
import zim.stores.memory
# importing class from this module makes get_store() fail
from zim.formats import get_format, ElementTreeModule
from zim.notebook impor | t Path
from zim.parsing import TextBuffer
class XMLStore(zim.stores.memory.MemoryStore):
properties = {
'read-only': True
}
def __init__(self, notebook, path, file=None):
zim.stores.memory.MemoryStore.__init__(self, notebook, path)
self.file = file
if not se | lf.store_has_file():
raise AssertionError, 'XMl store needs file'
# not using assert here because it could be optimized away
self.format = get_format('wiki') # FIXME store format in XML header
if self.file.exists():
self.parse(self.file.read())
def store_page(self, page):
memory.Store.store_page(self, page)
self.file.writelines(self.dump())
def parse(self, content):
if isinstance(content, list):
content = ''.join(content)
target = MemoryStoreTreeBuilder(self)
builder = ElementTreeModule.XMLTreeBuilder(target=target)
builder.feed(content)
builder.close()
def dump(self):
text = TextBuffer([
u'<?xml version="1.0" encoding="utf-8"?>\n',
u'<section>\n' ])
for node in self._nodetree:
text += self._dump_node(node)
text.append(u'</section>\n')
return text.get_lines()
def _dump_node(self, node):
text = [u'<page name="%s">\n' % node.basename]
if node.text:
text.append(node.text)
for n in node.children:
text += self._dump_node(n) # recurs
text.append('</page>\n')
return text
class MemoryStoreTreeBuilder(object):
def __init__(self, store):
self.store = store
self.path = Path(':')
self.stack = []
def start(self, tag, attrib):
if tag == 'section':
pass
elif tag == 'page':
assert 'name' in attrib
self.path = self.path + attrib['name']
node = self.store.get_node(self.path, vivificate=True)
self.stack.append(node)
else:
assert False, 'Unknown tag'
def data(self, data):
if self.stack:
node = self.stack[-1]
if node.text:
node.text += data
else:
node.text = data
def end(self, tag):
if tag == 'section':
pass
else:
assert self.stack
self.path = self.path.parent
node = self.stack.pop()
if node.text and node.text.isspace():
node.text = ''
elif node.text:
node.text = unicode(node.text.strip('\n') + '\n')
def close(self):
pass
|
engineeringbird/python_tests | test.py | Python | mit | 313 | 0.009585 | print("This is a test")
answer = raw_input("Please give me an | answer ")
print("Thank you " + answer)
def print_yes():
if answer == "yes":
print("YAS!")
else:
print("Hello World!")
print_yes()
kind_message = raw_input(answer + " you are a un | ique snowflake")
print(kind_message)
|
lawsie/guizero | examples/after_repeat.py | Python | bsd-3-clause | 629 | 0.009539 | from guizero import App, PushButt | on, TextBox
def welcome():
print("Welcome")
def hi():
print("Hi")
def message(my_message):
print(my_message)
def whatever():
| # say whatever using the message function, passing the text as an argument
app.after(200, message, args=["Whatever"])
def cancel_hi():
app.cancel(hi)
app = App()
# create some buttons
hi_button = PushButton(app, cancel_hi, text="Stop hi")
what_button = PushButton(app, whatever, text="Whatever")
# after a very short pause, say welcome
app.after(10, welcome)
# keep repeating hi, until it is cancelled
app.repeat(1000, hi)
app.display()
|
Aerojspark/PyFR | pyfr/readers/base.py | Python | bsd-3-clause | 7,767 | 0.000129 | # -*- coding: utf-8 -*-
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from itertools import chain
import uuid
import numpy as np
from pyfr.nputil import fuzzysort
class BaseReader(object, metaclass=ABCMeta):
@abstractmethod
def __init__(self):
pass
@abstractmethod
def _to_raw_pyfrm(self):
pass
def to_pyfrm(self):
mesh = self._to_raw_pyfrm()
# Add metadata
mesh['mesh_uuid'] = np.array(str(uuid.uuid4()), dtype='S')
return mesh
class NodalMeshAssembler(object):
# Dimensionality of each element type
_petype_ndim = {'tri': 2, 'quad': 2,
'tet': 3, 'hex': 3, 'pri': 3, 'pyr': 3}
# Face numberings for each element type
_petype_fnums = {
'tri': {'line': [0, 1, 2]},
'quad': {'line': [0, 1, 2, 3]},
'tet': {'tri': [0, 1, 2, 3]},
'hex': {'quad': [0, 1, 2, 3, 4, 5]},
'pri': {'quad': [2, 3, 4], 'tri': [0, 1]},
'pyr': {'quad': [0], 'tri': [1, 2, 3, 4]}
}
# Number of nodes in the first-order representation an element
_petype_focount = {'line': 2, 'tri': 3, 'quad': 4,
'tet': 4, 'pyr': 5, 'pri': 6, 'hex': 8}
def __init__(self, nodepts, elenodes, pents, maps):
self._nodepts = nodepts
self._elenodes = elenodes
self._felespent, self._bfacespents, self._pfacespents = pents
self._etype_map, self._petype_fnmap, self._nodemaps = maps
def _check_pyr_parallelogram(self, foeles):
nodepts = self._nodepts
# Find PyFR node map for the quad face
fnmap = self._petype_fnmap['pyr']['quad'][0]
pfnmap = self._nodemaps.from_pyfr['quad', 4][fnmap]
# Face nodes
fpts = np.array([[nodepts[i] for i in fidx]
for fidx in foeles[:, pfnmap]])
fpts = fpts.swapaxes(0, 1)
# Check parallelogram or not
if np.any(np.abs(fpts[0] - fpts[1] - fpts[2] + fpts[3]) > 1e-10):
raise ValueError('Pyramids with non-parallelogram bases are '
'currently unsupported')
def _to_first_order(self, elemap):
foelemap = {}
for (etype, epent), eles in elemap.items():
# PyFR element type ('hex', 'tri', &c)
petype = self._etype_map[etype][0]
# Number of nodes in the first-order representation
focount = self._petype_focount[petype]
foelemap[petype, epent] = eles[:,:focount]
# Check if pyramids have a parallelogram base or not
if petype == 'pyr':
self._check_pyr_parallelogram(foelemap[petype, epent])
return foelemap
def _split_fluid(self, elemap):
selemap = defaultdict(dict)
for (petype, epent), eles in elemap.items():
selemap[epent][petype] = eles
return selemap.pop(self._felespent), selemap
def _foface_info(self, petype, pftype, foeles):
# Face numbers of faces of this type on this element
fnums = self._petype_fnums[petype][pftype]
# First-order nodes associated with this face
fnmap = self._petype_fnmap[petype][pftype]
# Connectivity; (petype, eidx, fidx, flags)
con = [(petype, i, j, 0) for i in range(len(foeles)) for j in fnums]
# Nodes
nodes = np.sort(foeles[:, fnmap]).reshape(len(con), -1)
return con, nodes
def _extract_faces(self, foeles):
fofaces = defaultdict(list)
for petype, eles in foeles.items():
for pftype in self._petype_fnums[petype]:
fofinf = self._foface_info(petype, pftype, eles)
fofaces[pftype].append(fofinf)
return fofaces
def _pair_fluid_faces(self, ffofaces):
pairs = defaultdict(list)
resid = {}
for pftype, faces in ffofaces.items():
for f, n in chain.from_iterable(zip(f, n) for f, n in faces):
sn = tuple(n)
# See if the nodes are in resid
if sn in resid:
pairs[pftype].append([resid.pop(sn), f])
# Otherwise add them to the unpaired dict
else:
resid[sn] = f
return pairs, resid
def _pair_periodic_fluid_faces(self, bpart, resid):
pfaces = defaultdict(list)
nodepts = self._nodepts
for lpent, rpent in self._pfacespents.values():
for pftype in bpart[lpent]:
lfnodes = bpart[lpent][pftype]
rfnodes = bpart[rpent][pftype]
lfpts = np.array([[nodepts[n] for n in fn] for fn in lfnodes])
rfpts = np.array([[nodepts[n] for n in fn] for fn in rfnodes])
lfidx = fuzzysort(lfpts.mean(axis=1).T, range(len(lfnodes)))
rfidx = fuzzysort(rfpts.mean(axis=1).T, range(len(rfnodes)))
for lfn, rfn in zip(lfnodes[lfidx], rfnodes[rfidx]):
lf = resid.pop(tuple(sorted(lfn)))
rf = resid.pop(tuple(sorted(rfn)))
pfaces[pftype].append([lf, rf])
return pfaces
def _ident_boundary_faces(self, bpart, resid):
bfaces = defaultdict(list)
bpents = set(self._bfacespents.values())
for epent, fnodes in bpart.items():
if epent in bpents:
for fn in chain.from_iterable(fnodes.values()):
bfaces[epent].append(resid.pop(tuple(sorted(fn))))
return bfaces
def get_connectivity(self):
# For connectivity a first-order representation is sufficient
eles = self._to_first_order(self._elenodes)
# Split into fluid and boundary parts
fpart, bpart = self._split_fluid(eles)
# Extract the faces of the fluid elements
ffaces = self._extract_faces(fpart)
# Pair the fluid-fluid faces
fpairs, resid = self._pair_fluid_faces(ffaces)
# Identify periodic boundary face pairs
pfpairs = self._pair_periodic_fluid_faces(bpart, resid)
# Identify the fixed boundary faces
bf = self._ident_boundary_faces(bpart, resid)
if any(resid.values()):
raise ValueError('Unpaired faces in mesh')
# Flattern the face-pair dicts
pairs = chain(chain.from_iterable(fpairs.values()),
chain.from_iterable(pfpairs.values()))
# Generate the internal connectivity array
con = list(pairs)
# Generate boundary condition connectivity arrays
bcon = {}
for pbcrgn, pent in self._bfacespents.items():
bcon[pbcrgn] = bf[pent]
# Output
| ret = {'con_p0': np.array(con, dtype='S4,i4,i1,i1').T}
for k, v in bcon.items():
ret['bcon_{0}_p0'.format(k)] = np.array(v, dtype='S4,i4,i1,i1')
return ret
def get_shape_points(self):
spts = {}
# Global node map (node index to coords)
nodepts = self._nodepts
for etype, pent in self._elenodes:
if pent != s | elf._felespent:
continue
# Elements and type information
eles = self._elenodes[etype, pent]
petype, nnodes = self._etype_map[etype]
# Go from Gmsh to PyFR node ordering
peles = eles[:, self._nodemaps.from_pyfr[petype, nnodes]]
# Obtain the dimensionality of the element type
ndim = self._petype_ndim[petype]
# Build the array
arr = np.array([[nodepts[i] for i in nn] for nn in peles])
arr = arr.swapaxes(0, 1)
arr = arr[..., :ndim]
spts['spt_{0}_p0'.format(petype)] = arr
return spts
|
jmontleon/ansible-service-broker | scripts/create_broker_secret.py | Python | apache-2.0 | 4,268 | 0.003515 | #! /usr/bin/env python
import sys
import yaml
import subprocess
USAGE = """USAGE:
{command} NAME NAMESPACE IMAGE [KEY=VALUE]* [@FILE]*
NAME: the name of the secret to create/replace
NAMESPACE: the target namespace of the secret. It should be the namespace of the broker for most usecases
IMAGE: the docker image you would like to associate with the secret
KEY: a key to create inside the secret. This cannot contain an "=" sign
VALUE: the value for the KEY in the secret
FILE: a yaml loadable file containing key: value pairs. A file must begin with an "@" symbo | l to be loaded
EXAMPLE:
{command} mysecret | ansible-service-broker docker.io/ansibleplaybookbundle/hello-world-apb key1=hello key2=world @additional_keys.yml
"""
DATA_SEPARATOR="\n "
SECRET_TEMPLATE = """---
apiVersion: v1
kind: Secret
metadata:
name: {name}
namespace: {namespace}
stringData:
{data}
"""
def main():
name = sys.argv[1]
namespace = sys.argv[2]
apb = sys.argv[3]
keyvalues = list(map(lambda x: x.split("=", 1), filter(lambda x: "=" in x, sys.argv[3:])))
files = list(filter(lambda x: x.startswith("@"), sys.argv[3:]))
data = keyvalues + parse_files(files)
runcmd('oc project {}'.format(namespace))
try:
runcmd('oc get dc asb')
except Exception:
raise Exception("Error: No broker deployment found in namespace {}".format(namespace))
create_secret(name, namespace, data)
changed = update_config(name, apb)
if changed:
print("Rolling out a new broker...")
runcmd('oc rollout latest asb')
def parse_files(files):
params = []
for file in files:
file_name = file[1:]
with open(file_name, 'r') as f:
params.extend(yaml.load(f.read()).items())
return params
def create_secret(name, namespace, data):
secret = SECRET_TEMPLATE.format(
name=name,
namespace=namespace,
data=DATA_SEPARATOR.join(map(lambda x: ": ".join(map(quote, x)), data))
)
with open('/tmp/{name}-secret'.format(name=name), 'w') as f:
f.write(secret)
try:
runcmd('oc create -f /tmp/{name}-secret'.format(name=name))
except Exception:
runcmd('oc replace -f /tmp/{name}-secret'.format(name=name))
print('Created secret: \n\n{}'.format(secret))
def quote(string):
return '"{}"'.format(string)
def update_config(name, apb):
secret_entry = {"secret" : name, "apb_name": fqname(apb), "title": name}
config = get_broker_config()
if secret_entry not in config['data']['broker-config'].get('secrets', []):
config['data']['broker-config']['secrets'] = config['data']['broker-config'].get('secrets', []) + [secret_entry]
config_s = format_config(config)
with open('/tmp/broker-config', 'w') as f:
f.write(config_s)
runcmd('oc replace -f /tmp/broker-config'.format(name=name))
print('Updated broker config to \n\n{}'.format(config_s))
return True
else:
print("Skipping update to broker configuration becuase secret entry was already present")
return False
def format_config(config):
config['data']['broker-config'] = yaml.dump(config['data']['broker-config'])
for key in ('creationTimestamp', 'resourceVersion', 'selfLink', 'uid'):
del config['metadata'][key]
return yaml.dump(config)
def fqname(apb):
registries = {'docker.io': 'dh'}
registry, org, end = apb.split('/')
if ":" in end:
image, tag = end.split(":")
else:
image = end
tag = 'latest'
return '-'.join([registries[registry], org, image, tag])
def get_broker_config():
config = yaml.load(runcmd("oc get configmap broker-config -o yaml"))
config['data']['broker-config'] = yaml.load(config['data']['broker-config'])
return config
def runcmd(cmd):
print("Running: {}".format(cmd))
return subprocess.check_output(cmd.split())
if __name__ == '__main__':
if len(sys.argv) < 5 or sys.argv[1] in ("-h", "--help"):
print(USAGE.format(command=sys.argv[0]))
sys.exit()
try:
main()
except Exception:
print("Invalid invocation")
print(USAGE.format(command=sys.argv[0]))
raise
|
WIPACrepo/iceprod | iceprod/server/scheduled_tasks/dataset_monitor.py | Python | mit | 4,445 | 0.008774 | """
Monitor the datasets.
Send monitoring data to graphite.
Initial delay: rand(1 minute)
Periodic delay: 5 minutes
"""
import logging
import random
import time
from tornado.ioloop import IOLoop
from iceprod.server import GlobalID
logger = logging.getLogger('dataset_monitor')
def dataset_monitor(module):
"""
Initial entrypoint.
Args:
module (:py:class:`iceprod.server.modules.schedule`): schedule module
"""
# initial delay
IOLoop.current().call_later(random.randint(5,60), run,
module.rest_client, module.statsd)
async def run(rest_client, statsd, debug=False):
"""
Actual runtime / loop.
Args:
rest_client (:py:class:`iceprod.core.rest_client.Client`): rest client
statsd (:py:class:`statsd.StatsClient`): statsd (graphite) client
debug (bool): debug flag to propagate exceptions
"""
start_time = time.time()
try:
future_resources = {'gpu': 0,'cpu': 0}
datasets = await rest_client.request('GET', '/dataset_summaries/status')
for status in datasets:
for dataset_id in datasets[status]:
dataset = await rest_client.request('GET', f'/datasets/{dataset_id}')
dataset_num = dataset['dataset']
dataset_status = dataset['status']
jobs = await rest_client.request('GET', f'/datasets/{dataset_id}/job_counts/status')
jobs2 = {}
for status in jobs:
if dataset_status in ('suspended','errors') and status == 'processing':
jobs2['suspended'] = jobs[status]
else:
jobs2[status] = jobs[status]
jobs = jobs2
for status in ('processing','failed','suspended','errors','complete'):
if status not in jobs:
jobs[status] = 0
statsd.gauge(f'datasets.{dataset_num}.jobs.{status}', jobs[status])
tasks = await rest_client.request('GET', f'/datasets/{dataset_id}/task_counts/name_status')
task_stats = await rest_client.request('GET', f'/datasets/{dataset_id}/task_stats')
for name in tasks:
tasks2 = {}
for status in tasks[name]:
if dataset_status in ('suspended','errors') and status in ('waiting','queued','processing'):
if 'suspended' not in tasks2:
tasks2['suspended'] = tasks[name][status]
else:
tasks2['suspended'] += tasks[name][status]
tasks2[status] = 0
else:
tasks2[status] = tasks[name][status]
for status in ('idle','waiting','queued','processing','reset','failed','suspended','complete'):
if status not in tasks2:
tasks2[status] = 0
statsd.gauge(f'datasets.{dataset_num}.tasks.{name}.{status}', tasks2[status])
# now add to future resource prediction
if status not in ('idle','failed','suspended','complete'):
if name not in task_stats:
continue
res = 'gpu' if task_stats[name]['gpu'] > 0 else 'cpu'
future_resources[res] += tasks2[status]*task_stats[name]['avg_hrs']
# add jobs not materialized to future resource prediction
if dataset_status not in ('suspended','errors'):
num_jobs_remaining = dataset['jobs_submitted'] - sum(jobs.values())
for name in task_stats:
res = 'gpu' if task_stats[name]['gpu'] > 0 else 'cpu'
future_resources[res] += num_jobs_remaining*task_stats[name]['avg_hrs']
for res in future_resources:
statsd.gauge(f'future_resources.{res}', int(future_resources[res]))
except Exception:
| logger.error('error monitoring datasets', exc_info=True)
if debug:
raise
# run again after 60 minute de | lay
stop_time = time.time()
delay = max(60*5 - (stop_time-start_time), 60)
IOLoop.current().call_later(delay, run, rest_client, statsd)
|
invisiblek/python-for-android | python3-alpha/python3-src/Lib/test/test_decimal.py | Python | apache-2.0 | 86,081 | 0.003938 | # Copyright (c) 2004 Python Software Foundation.
# All rights reserved.
# Written by Eric Price <eprice at tjhsst.edu>
# and Facundo Batista <facundo at taniquetil.com.ar>
# and Raymond Hettinger <python at rcn.com>
# and Aahz (aahz at pobox.com)
# and Tim Peters
"""
These are the test cases for the Decimal module.
There are two groups of tests, Arithmetic and Behaviour. The former test
the Decimal arithmetic using the tests provided by Mike Cowlishaw. The latter
test the pythonic behaviour ac | cording to PEP 327.
Cowlishaw's tests can be downloaded from:
www2.hursley.ibm.com/decimal/dectest.zip
This test module can be called from command line with one parameter (Arithmetic
or Behaviour) to test each part, or without parameter to test both parts. If
you're working through IDLE, you can import this test module and call test_main()
with th | e corresponding argument.
"""
import math
import os, sys
import operator
import warnings
import pickle, copy
import unittest
from decimal import *
import numbers
from test.support import (run_unittest, run_doctest, is_resource_enabled,
requires_IEEE_754)
from test.support import check_warnings
import random
try:
import threading
except ImportError:
threading = None
# Useful Test Constant
Signals = tuple(getcontext().flags.keys())
# Signals ordered with respect to precedence: when an operation
# produces multiple signals, signals occurring later in the list
# should be handled before those occurring earlier in the list.
OrderedSignals = (Clamped, Rounded, Inexact, Subnormal,
Underflow, Overflow, DivisionByZero, InvalidOperation)
# Tests are built around these assumed context defaults.
# test_main() restores the original context.
def init():
global ORIGINAL_CONTEXT
ORIGINAL_CONTEXT = getcontext().copy()
DefaultTestContext = Context(
prec = 9,
rounding = ROUND_HALF_EVEN,
traps = dict.fromkeys(Signals, 0)
)
setcontext(DefaultTestContext)
TESTDATADIR = 'decimaltestdata'
if __name__ == '__main__':
file = sys.argv[0]
else:
file = __file__
testdir = os.path.dirname(file) or os.curdir
directory = testdir + os.sep + TESTDATADIR + os.sep
skip_expected = not os.path.isdir(directory)
# list of individual .decTest test ids that correspond to tests that
# we're skipping for one reason or another.
skipped_test_ids = set([
# Skip implementation-specific scaleb tests.
'scbx164',
'scbx165',
# For some operations (currently exp, ln, log10, power), the decNumber
# reference implementation imposes additional restrictions on the context
# and operands. These restrictions are not part of the specification;
# however, the effect of these restrictions does show up in some of the
# testcases. We skip testcases that violate these restrictions, since
# Decimal behaves differently from decNumber for these testcases so these
# testcases would otherwise fail.
'expx901',
'expx902',
'expx903',
'expx905',
'lnx901',
'lnx902',
'lnx903',
'lnx905',
'logx901',
'logx902',
'logx903',
'logx905',
'powx1183',
'powx1184',
'powx4001',
'powx4002',
'powx4003',
'powx4005',
'powx4008',
'powx4010',
'powx4012',
'powx4014',
])
# Make sure it actually raises errors when not expected and caught in flags
# Slower, since it runs some things several times.
EXTENDEDERRORTEST = False
#Map the test cases' error names to the actual errors
ErrorNames = {'clamped' : Clamped,
'conversion_syntax' : InvalidOperation,
'division_by_zero' : DivisionByZero,
'division_impossible' : InvalidOperation,
'division_undefined' : InvalidOperation,
'inexact' : Inexact,
'invalid_context' : InvalidOperation,
'invalid_operation' : InvalidOperation,
'overflow' : Overflow,
'rounded' : Rounded,
'subnormal' : Subnormal,
'underflow' : Underflow}
def Nonfunction(*args):
"""Doesn't do anything."""
return None
RoundingDict = {'ceiling' : ROUND_CEILING, #Maps test-case names to roundings.
'down' : ROUND_DOWN,
'floor' : ROUND_FLOOR,
'half_down' : ROUND_HALF_DOWN,
'half_even' : ROUND_HALF_EVEN,
'half_up' : ROUND_HALF_UP,
'up' : ROUND_UP,
'05up' : ROUND_05UP}
# Name adapter to be able to change the Decimal and Context
# interface without changing the test files from Cowlishaw
nameAdapter = {'and':'logical_and',
'apply':'_apply',
'class':'number_class',
'comparesig':'compare_signal',
'comparetotal':'compare_total',
'comparetotmag':'compare_total_mag',
'copy':'copy_decimal',
'copyabs':'copy_abs',
'copynegate':'copy_negate',
'copysign':'copy_sign',
'divideint':'divide_int',
'invert':'logical_invert',
'iscanonical':'is_canonical',
'isfinite':'is_finite',
'isinfinite':'is_infinite',
'isnan':'is_nan',
'isnormal':'is_normal',
'isqnan':'is_qnan',
'issigned':'is_signed',
'issnan':'is_snan',
'issubnormal':'is_subnormal',
'iszero':'is_zero',
'maxmag':'max_mag',
'minmag':'min_mag',
'nextminus':'next_minus',
'nextplus':'next_plus',
'nexttoward':'next_toward',
'or':'logical_or',
'reduce':'normalize',
'remaindernear':'remainder_near',
'samequantum':'same_quantum',
'squareroot':'sqrt',
'toeng':'to_eng_string',
'tointegral':'to_integral_value',
'tointegralx':'to_integral_exact',
'tosci':'to_sci_string',
'xor':'logical_xor',
}
# The following functions return True/False rather than a Decimal instance
LOGICAL_FUNCTIONS = (
'is_canonical',
'is_finite',
'is_infinite',
'is_nan',
'is_normal',
'is_qnan',
'is_signed',
'is_snan',
'is_subnormal',
'is_zero',
'same_quantum',
)
class DecimalTest(unittest.TestCase):
"""Class which tests the Decimal class against the test cases.
Changed for unittest.
"""
def setUp(self):
self.context = Context()
self.ignore_list = ['#']
# Basically, a # means return NaN InvalidOperation.
# Different from a sNaN in trim
self.ChangeDict = {'precision' : self.change_precision,
'rounding' : self.change_rounding_method,
'maxexponent' : self.change_max_exponent,
'minexponent' : self.change_min_exponent,
'clamp' : self.change_clamp}
def eval_file(self, file):
global skip_expected
if skip_expected:
raise unittest.SkipTest
return
with open(file) as f:
for line in f:
line = line.replace('\r\n', '').replace('\n', '')
#print line
try:
t = self.eval_line(line)
except DecimalException as exception:
#Exception raised where there shouldn't have been one.
self.fail('Exception "'+exception.__class__.__name__ + '" raised on line '+line)
return
def eval_line(self, s):
if s.find(' -> ') >= 0 and s[:2] != '--' and not s.startswith(' --'):
s = (s.split('->')[0] + '->' +
s.split('->')[1].split('--')[0]).strip()
else:
s = s.split('--')[0].strip()
for ignore in self.ignore_list:
if s.find(ignore) >= 0:
#print s.split()[0], 'NotImplemented--', ignore
return
if not s:
return
elif ':' in s:
r |
samedder/azure-cli | src/command_modules/azure-cli-appservice/azure/cli/command_modules/appservice/_help.py | Python | mit | 17,789 | 0.001349 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.help_files import helps
helps['appservice'] = """
type: group
short-summary: Manage App Service plans.
"""
helps['webapp'] = """
type: group
short-summary: Manage web apps.
"""
helps['webapp config'] = """
type: group
short-summary: Configure a web app.
"""
helps['webapp config show'] = """
type: command
short-summary: Get the details of a web app's configuration.
"""
helps['webapp config set'] = """
type: command
short-summary: Set a web app's configuration.
"""
helps['webapp config appsettings'] = """
type: group
short-summary: Configure web app settings.
"""
helps['webapp config appsettings delete'] = """
type: command
short-summary: Delete web app settings.
"""
helps['webapp config appsettings list'] = """
type: command
short-summary: Get the details of a web app's settings.
"""
helps['webapp config appsettings set'] = """
type: command
short-summary: Set a web app's settings.
examples:
- name: Set the default NodeJS version to 6.9.1 for a web app.
text: >
az webapp config appsettings set -g MyResourceGroup -n MyUniqueApp --settings WEBSITE_NODE_DEFAULT_VERSION=6.9.1
"""
helps['webapp config connection-string'] = """
type: group
short-summary: Manage a web app's connection strings.
"""
helps['webapp config connection-string show'] = """
type: command
short-summary: Get a web app's connection strings.
"""
helps['webapp config connection-string delete'] = """
type: command
short-summary: Delete a web app's connection strings.
"""
helps['webapp config connection-string set'] = """
type: command
short-summary: Update a web app's connection strings.
examples:
- name: Add a mysql connection string.
text: >
az webapp config connection-string set -g MyResourceGroup -n MyUniqueApp -t mysql \\
--settings mysql1='Server=myServer;Database=myDB;Uid=myUser;Pwd=myPwd;'
"""
helps['webapp config container'] = """
type: group
short-summary: Manage web app container settings.
"""
helps['webapp config container show'] = """
type: command
short-summary: Get details of a web app container's settings.
"""
helps['webapp config container set'] = """
type: command
short-summary: Set a web app container's settings.
"""
helps['webapp config container delete'] = """
type: command
short-summary: Delete a web app container's settings.
"""
helps['webapp config ssl'] = """
type: group
short-summary: Configure SSL certificates for web apps.
"""
helps['webapp config ssl list'] = """
type: command
short-summary: List SSL certificates for a web app.
"""
helps['webapp config ssl bind'] = """
type: command
short-summary: Bind an SSL certificate to a web app.
"""
helps['webapp config ssl unbind'] = """
type: command
short-summary: Unbind an SSL certificate from a web app.
"""
helps['webapp config ssl delete'] = """
type: command
short-summary: Delete an SSL certificate from a web app.
"""
helps['webapp config ssl upload'] = """
type: command
short-summary: Upload an SSL certificate to a web app.
"""
helps['webapp deployment'] = """
type: group
short-summary: Manage web app deployments.
"""
helps['webapp deployment slot'] = """
type: group
short-summary: Manage web app deployment slots.
"""
helps['webapp deployment slot auto-swap'] = """
type: group
short-summary: Enable or disable auto-swap for a web app deployment slot.
"""
helps['webapp log'] = """
type: group
short-summary: Manage web app logs.
"""
helps['webapp log config'] = """
type: command
short-summary: Configure logging for a web app.
"""
helps['webapp log show'] = """
type: command
short-summary: Get the details of a web app's logging configuration.
"""
helps['webapp log download'] = """
type: command
short-summary: Download a web app's log history as a zip file.
long-summary: This command may not work with web apps running on Linux.
"""
helps['webapp log tail'] = """
type: command
short-summary: Start live log tracing for a web app.
long-summary: This command may not work with web apps running on Linux.
"""
helps['webapp deployment'] = """
type: group
short-summary: Manage web app deployments.
"""
helps['webapp deployment list-publishing-profiles'] = """
type: command
short-summary: Get the de | tails for available web app deployment profiles.
"""
helps['webapp deployment container'] = """
type: group
short-summary: Manage container-based continuous deployment.
"""
helps['webapp deployment container config'] = """
type: command
short-summary: Configure continuous deployment via containers.
"""
helps['webapp deployment container sh | ow-cd-url'] = """
type: command
short-summary: Get the URL which can be used to configure webhooks for continuous deployment.
"""
helps['webapp deployment slot auto-swap'] = """
type: command
short-summary: Configure deployment slot auto swap.
"""
helps['webapp deployment slot create'] = """
type: command
short-summary: Create a deployment slot.
"""
helps['webapp deployment slot swap'] = """
type: command
short-summary: Change deployment slots for a web app.
examples:
- name: Swap a staging slot into production for the MyUniqueApp web app.
text: >
az webapp deployment slot swap -g MyResourceGroup -n MyUniqueApp --slot staging \\
--target-slot production
"""
helps['webapp deployment slot list'] = """
type: command
short-summary: List all deployment slots.
"""
helps['webapp deployment slot delete'] = """
type: command
short-summary: Delete a deployment slot.
"""
helps['webapp deployment user'] = """
type: group
short-summary: Manage user credentials for deployment.
"""
helps['webapp deployment user set'] = """
type: command
short-summary: Update deployment credentials.
long-summary: All function and web apps in the subscription will be impacted since they share
the same deployment credentials.
examples:
- name: Set FTP and git deployment credentials for all apps.
text: >
az webapp deployment user set --user-name MyUserName
"""
helps['webapp deployment slot'] = """
type: group
short-summary: Manage web app deployment slots.
"""
helps['webapp deployment source'] = """
type: group
short-summary: Manage web app deployment via source control.
"""
helps['webapp deployment source config'] = """
type: command
short-summary: Manage deployment from git or Mercurial repositories.
"""
helps['webapp deployment source config-local-git'] = """
type: command
short-summary: Get a URL for a git repository endpoint to clone and push to for web app deployment.
examples:
- name: Get an endpoint and add it as a git remote.
text: >
az webapp source-control config-local-git \\
-g MyResourceGroup -n MyUniqueApp
git remote add azure \\
https://<deploy_user_name>@MyUniqueApp.scm.azurewebsites.net/MyUniqueApp.git
"""
helps['webapp deployment source delete'] = """
type: command
short-summary: Delete a source control deployment configuration.
"""
helps['webapp deployment source show'] = """
type: command
short-summary: Get the details of a source control deployment configuration.
"""
helps['webapp deployment source sync'] = """
type: command
short-summary: Synchronize from the repository. Only needed under manual integration mode.
"""
helps['webapp traffic-routing'] = """
type: group
short-summary: Manage traffic routing for web apps.
"""
helps['webapp traffic-routing set'] = """
type: command
short-summary: Configure routing traffic to deployment slots.
"""
helps['webapp traffic-routing show'] = """
type: command
short-summary: Display the current distribution of traffic across slots.
"""
helps['webapp traffic-routing clear'] = """
type: command
short-summary: Clear the routing rules and send all traffic to production.
"""
help |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.