repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
ecolitan/fatics | venv/lib/python2.7/site-packages/twisted/names/test/test_cache.py | Python | agpl-3.0 | 4,915 | 0.001831 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.names.cache}.
"""
from __future__ import division, absolute_import
import time
from zope.interface.verify import verifyClass
from twisted.trial import unittest
from twisted.names import dns, cache
from twisted.internet import task, interfaces
class Caching(unittest.TestCase):
"""
Tests for L{cache.CacheResolver}.
"""
def test_interface(self):
"""
L{cache.CacheResolver} implements L{interfaces.IResolver}
"""
verifyClass(interfaces.IResolver, cache.CacheResolver)
def test_lookup(self):
c = cache.CacheResolver({
dns.Query(name=b'example.com', type=dns.MX, cls=dns.IN):
(time.time(), ([], [], []))})
return c.lookupMailExchange(b'example.com').addCallback(
self.assertEqual, ([], [], []))
def test_constructorExpires(self):
"""
Cache entries passed into L{cache.CacheResolver.__init__} get
cancelled just like entries added with cacheResult
"""
r = ([dns.RRHeader(b"example.com", dns.A, dns.IN, 60,
dns.Record_A("127.0.0.1", 60))],
[dns.RRHeader(b"example.com", dns.A, dns.IN, 50,
dns.Record_A("127.0.0.1", 50))],
[dns.RRHeader(b"example.com", dns.A, dns.IN, 40,
dns.Record_A("127.0.0.1", 40))])
clock = task.Clock()
query = dns.Query(name=b"example.com", type=dns.A, cls=dns.IN)
c = cache.CacheResolver({ query : (clock.seconds(), r)}, reactor=clock)
# 40 seconds is enough to expire the entry because expiration is based
# on the minimum TTL.
clock.advance(40)
self.assertNotIn(query, c.cache)
return self.assertFailure(
c.lookupAddress(b"example.com"), dns.DomainError)
def test_normalLookup(self):
"""
When a cache lookup finds a cached entry from 1 second ago, it is
returned with a TTL of original TTL minus the elapsed 1 second.
"""
r = ([dns.RRHeader(b"example.com", dns.A, dns.IN, 60,
dns.Record_A("127.0.0.1", 60))],
[dns.RRHeader(b"example.com", dns.A, dns.IN, 50,
dns.Record_A("127.0.0.1", 50))],
[dns.RRHeader(b"example.com", dns.A, dns.IN, 40,
dns.Record_A("127.0.0.1", 40))])
clock = task.Clock()
c = cache.CacheResolver(reactor=clock)
c.cacheResult(dns.Query(name=b"example.com", type=dns.A, cls=dns.IN), r)
clock.advance(1)
def cbLookup(result):
self.assertEquals(result[0][0].ttl, 59)
self.assertEquals(result[1][0].ttl, 49)
self.assertEquals(result[2][0].ttl, 39) |
self.assertEquals(result[0][0].name.name, b"example.com")
return c.lookupAddress(b"example.com").addCallback(cbLookup)
def test_cachedResultExpires(self):
"""
Once the TTL has been exceeded, the result is removed from the cache.
"""
| r = ([dns.RRHeader(b"example.com", dns.A, dns.IN, 60,
dns.Record_A("127.0.0.1", 60))],
[dns.RRHeader(b"example.com", dns.A, dns.IN, 50,
dns.Record_A("127.0.0.1", 50))],
[dns.RRHeader(b"example.com", dns.A, dns.IN, 40,
dns.Record_A("127.0.0.1", 40))])
clock = task.Clock()
c = cache.CacheResolver(reactor=clock)
query = dns.Query(name=b"example.com", type=dns.A, cls=dns.IN)
c.cacheResult(query, r)
clock.advance(40)
self.assertNotIn(query, c.cache)
return self.assertFailure(
c.lookupAddress(b"example.com"), dns.DomainError)
def test_expiredTTLLookup(self):
"""
When the cache is queried exactly as the cached entry should expire but
before it has actually been cleared, the cache does not return the
expired entry.
"""
r = ([dns.RRHeader(b"example.com", dns.A, dns.IN, 60,
dns.Record_A("127.0.0.1", 60))],
[dns.RRHeader(b"example.com", dns.A, dns.IN, 50,
dns.Record_A("127.0.0.1", 50))],
[dns.RRHeader(b"example.com", dns.A, dns.IN, 40,
dns.Record_A("127.0.0.1", 40))])
clock = task.Clock()
# Make sure timeouts never happen, so entries won't get cleared:
clock.callLater = lambda *args, **kwargs: None
c = cache.CacheResolver({
dns.Query(name=b"example.com", type=dns.A, cls=dns.IN) :
(clock.seconds(), r)}, reactor=clock)
clock.advance(60.1)
return self.assertFailure(
c.lookupAddress(b"example.com"), dns.DomainError)
|
ppolewicz/qBittorrent | src/searchengine/nova/engines/torrentz.py | Python | gpl-2.0 | 5,199 | 0.003078 | #VERSION: 2.14
#AUTHORS: Diego de las Heras (diegodelasheras@gmail.com)
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from novaprinter import prettyPrinter
from helpers import retrieve_url, download_file
from HTMLParser import HTMLParser
from urllib import urlencode
class torrentz(object):
# mandatory properties
url = 'https://torrentz.eu'
name = 'Torrentz'
supported_categories = {'all': ''}
trackers_list = ['udp://open.demonii.com:1337/announce',
'udp://tracker.leechers-paradise.org:6969',
'udp://exodus.desync.com:6969',
'udp://tracker.coppersurfer.tk:6969',
'udp://9.rarbg.com:2710/announce']
class MyHtmlParser(HTMLParser):
def __init__(self, results, url, trackers):
HTMLParser.__init__(self)
self.results = results
self.url = url
self.trackers = trackers
self.td_counter = None
self.current_item = None
def handle_starttag(self, tag, attrs):
if tag == 'a':
params = dict(attrs)
if 'href' in params:
self.current_item = {}
self.td_counter = 0
self.current_item['link'] = 'magnet:?xt=urn:btih:' + \
params['href'].strip(' /') + self.trackers
self.current_item['desc_link'] = self.url + params['href'].strip()
elif tag == 'span':
if isinstance(self.td_counter,int):
self.td_counter += 1
if self.td_counter > 6: # safety
self.td_counter = None
def handle_data(self, data):
i | f self.td_counter == 0:
if 'name' not in self.current_item:
self.current_item['name'] = ''
self.current_item['name'] += data
elif self.td_counter == 4:
if 'size' not in self.current_item:
self.current_item['size'] = data.strip()
elif self.td_counter == 5:
if 'seeds' not in self.current_item:
self.current_it | em['seeds'] = data.strip().replace(',', '')
elif self.td_counter == 6:
if 'leech' not in self.current_item:
self.current_item['leech'] = data.strip().replace(',', '')
# display item
self.td_counter = None
self.current_item['engine_url'] = self.url
if self.current_item['name'].find(' \xc2'):
self.current_item['name'] = self.current_item['name'].split(' \xc2')[0]
self.current_item['link'] += '&' + urlencode({'dn' : self.current_item['name']})
if not self.current_item['seeds'].isdigit():
self.current_item['seeds'] = 0
if not self.current_item['leech'].isdigit():
self.current_item['leech'] = 0
prettyPrinter(self.current_item)
self.results.append('a')
def download_torrent(self, info):
print(download_file(info))
def search(self, what, cat='all'):
# initialize trackers for magnet links
trackers = '&' + '&'.join(urlencode({'tr' : tracker}) for tracker in self.trackers_list)
i = 0
while i < 6:
results_list = []
# "what" is already urlencoded
html = retrieve_url('%s/any?f=%s&p=%d' % (self.url, what, i))
parser = self.MyHtmlParser(results_list, self.url, trackers)
parser.feed(html)
parser.close()
if len(results_list) < 1:
break
i += 1
|
ctogle/dilapidator | src/dilap/topology/wiregraph.py | Python | mit | 7,123 | 0.021059 | import dilap.core.base as db
import dilap.core.plotting as dtl
import matplotlib.pyplot as plt
import pdb
###############################################################################
# wire graph class (purely topological)
#
# wire graph is a simple data structure
# for vertices and edges connecting them
# it also provides a means for ordering edges
#
class wiregraph(db.base):
###################################
### fundamental topological methods
###################################
# add a new intersection to the graph
def av(self,**vkws):
j = self.vcnt
i = (j,vkws)
self.vs.append(i)
self.rings[j] = {}
self.orings[j] = []
self.vcnt += 1
return j
# remove vertex u
def rv(self,u):
i = self.vs[u]
ur = self.rings[u]
for v in list(ur.keys()):self.re(u,v)
self.vs[u] = None
del self.rings[u]
del self.orings[u]
return i
# and a new road to the graph
def ae(self,u,v,**ekws):
ur,vr = self.rings[u],self.rings[v]
uor,vor = self.orings[u],self.orings[v]
m = self.ecnt
r = (m,ekws)
self.es.append(r)
self.elook[(u,v)] = m
self.elook[(v,u)] = m
if not v in ur:ur[v] = r
if not u in vr:vr[u] = r
urcnt = len(uor)
uor.append(v)
'''#
if urcnt < 2:uor.append(v)
else:
w = uor[0]
nea = self.ea(u,w,v)
f = False
for eax in range(1,urcnt):
if nea < self.ea(u,w,uor[eax]):
uor.insert(eax,v)
f = True
break
if not f:uor.append(v)
'''#
vrcnt = len(vor)
vor.append(u)
'''#
if vrcnt < 2:vor.append(u)
else:
w = vor[0]
nea = self.ea(v,w,u)
f = False
for eax in range(1,vrcnt):
if nea < vor[eax]:
vor.insert(eax,u)
f = True
break
if not f:vor.append(u)
'''#
self.ecnt += 1
#self.plotxy(l = 200)
#plt.show()
return m
# remove an edge between u and v
def re(self,u,v):
r = self.rings[u][v]
if r is None:return r
self.es[r[0]] = None
del self.elook[(u,v)]
del self.elook[(v,u)]
del self.rings[u][v]
del self.rings[v][u]
if v in self.orings[u]:self.orings[u].remove(v)
if u in self.orings[v]:self.orings[v].remove(u)
return r
###################################
### additional topological methods
###################################
# add a new vertex and edge to another vertex
def mev(self,ov,vkws,ekws):
nv = self.av(**vkws)
ne = self.ae(ov,nv,**ekws)
return nv,ne
# split an edge/road into two edges/roads
def se(self,u,v,w):
ruv = self.re(u,v)
ruw = self.ae(u,w,**ruv[1])
rwv = self.ae(w,v,**ruv[1])
return ruw,rwv
###################################
### edge ordering mechanisms
###################################
# provide an ordering mechanism for edges in the graph
# the default behavoir is the order of edge addition
def ea(self,u,w,v):
return 0
def cw(self,u,v):
#uor = self.orings[u]
vor = self.orings[v]
uori = vor.index(u)
ror = vor[uori+1:]+vor[:uori]
if ror:tip = ror[0]
else:tip = u
return tip
def ccw(self,u,v):
#uor = self.orings[u]
vor = self.orings[v]
uori = vor.index(u)
ror = vor[uori+1:]+vor[:uori]
if ror:tip = ror[-1]
else:tip = u
return tip
# return a list of vertex indices which form a loop
# the first edge will be from u to v, turns of direction
# d (clockwise or counterclockwise) form the loop
def loop(self,u,v,d = 'cw',usematch = False):
if not v in self.rings[u]:raise ValueError
lp = [u,v]
c = 0
while True:
c += 1
if c > self.vcnt*5:
print('LOOPWARNING',d,u,v,len(lp))
return self.loop(u,v,d,True)
if d == 'cw': tip = self.cw( lp[-2],lp[-1])
elif d == 'ccw':tip = self.ccw(lp[-2],lp[-1])
else:raise ValueError
lp.append(tip)
if lp[-1] == lp[1] and lp[-2] == lp[0]:
lp.pop(-1)
lp.pop(-1)
return lp
if usematch:
lseqmatch = seqmatch(lp)
if lseqmatch[0]:
print('LOOPWARNINGRECONCILED')
if lseqmatch[0]:
lp.pop(-1)
lp.pop(-1)
return lp
# return a list of all unique loops of the graph
def uloops(self,d = 'cw'):
loops = {}
unfn = [x for x in range(self.ecnt)]
for vx in range( | self.vcnt):
v = self.vs[vx]
if v is None:continue
for ox in self.orings[vx]:
r = self.rings[vx][ox]
rx,rkws = r
if rx in unfn:
| #lp = self.loop(vx,ox,'ccw')
lp = self.loop(vx,ox,d)
lpk = tuple(set(lp))
#if not lpk in loops:
if newloopkey(lpk,loops):
loops[lpk] = lp
unfn.remove(rx)
else:continue
if not unfn:break
lps = [loops[lpk] for lpk in loops]
return lps
###################################
def __init__(self):
self.vs = []
self.vcnt = 0
self.es = []
self.ecnt = 0
self.elook = {}
self.rings = {}
self.orings = {}
###################################
def newloopkey(key,loops):
keylen = len(key)
for loop in loops:
looplen = len(loop)
if loop == key:return False
elif set(loop) == set(key):return False
return True
def seqmatch(l = list(range(10))+list(range(5))):
#print('seqmatch: %s' % str(l))
longest,longestseq = None,None
gperm = lambda lx : l[lx-n:]+l[:lx] if lx-n < 0 else l[lx-n:lx]
for n in range(2,int(len(l)+1/2)):
fnd = False
perms = [gperm(lx) for lx in range(len(l))]
uniq = []
for p in perms:
if not p in uniq:
uniq.append(p)
else:
longest = n
fnd = True
plen,ulen = len(perms),len(uniq)
if plen == ulen:pass
elif plen > ulen:
for p in uniq:
if perms.count(p) > 1:
longestseq = p
#print('found repeated permutation: %s' % p)
else:pdb.set_trace()
if not fnd:break
#print('seqmatch (of length %d) had longest match: %s' % (len(l),str(longest)))
#print(n)
return longest,longestseq
|
hkhamm/proj7-freetimes | busy_times.py | Python | artistic-2.0 | 6,156 | 0 | import arrow
from dateutil import tz
import flask
import CONFIG
START_TIME = CONFIG.START_TIME
END_TIME = CONFIG.END_TIME
def get_busy_times(events):
"""
Gets a list of busy times calculated from the list of events.
:param events: a list of calendar events.
:return: a list of busy times in ascending order.
"""
begin_date = arrow.get(flask.session["begin_date"]).replace(
hours=+START_TIME)
end_date = arrow.get(flask.session['end_date']).replace(hours=+END_TIME)
busy_dict = get_busy_dict(events, begin_date, end_date)
busy = get_busy_list(busy_dict)
return busy
def get_busy_dict(events, begin_date, end_date):
"""
Fills a dictionary with possible busy times from the list of events.
:param events: a list of calendar events.
:param begin_date: is the start of the selected time interval.
:param end_date: is the end of the selected time interval.
:return: a dict of events representing possible busy times.
"""
busy_dict = {}
# print('busy times')
for event in events:
available = is_available(event)
event_start, event_end, is_all_day = get_start_end_datetime(even | t)
day_start = event_start.replace(hour=START_TIME, minute=0)
day_end = event_end.replace(hour=END_TIME, minute=0)
# all day events that either begin or end in the time interval
if ((begin_date <= event_start <= end_date or
begin_date <= event_end <= end_date) and
not available and is_all_day):
if day_start < begin_date:
| event['start']['dateTime'] = begin_date.isoformat()
else:
event['start']['dateTime'] = day_start.isoformat()
if event_end > end_date:
event['end']['dateTime'] = end_date.isoformat()
else:
event['end']['dateTime'] = day_end.replace(days=-1).isoformat()
# print('0 {} - {}'.format(event['start']['dateTime'],
# event['end']['dateTime']))
busy_dict[event['start']['dateTime']] = event
# events completely within individual days and the time interval
elif (begin_date <= event_start <= end_date and
begin_date <= event_end <= end_date and
not available and not is_all_day):
if event_start < day_start:
event['start']['dateTime'] = day_start.isoformat()
if event_end > day_end:
event['end']['dateTime'] = day_end.isoformat()
# print('1 {} - {}'.format(event['start']['dateTime'],
# event['end']['dateTime']))
busy_dict[event['start']['dateTime']] = event
# print()
return busy_dict
def get_busy_list(busy_dict):
"""
Removes or combines the possible busy times from the busy dictionary and
returns a sorted list.
:param busy_dict: a dict of events representing possible busy times.
:return: a sorted list of events representing busy times.
"""
busy = []
remove_list = []
for i in sorted(busy_dict):
for j in sorted(busy_dict):
event = busy_dict[i]
event_start = arrow.get(event['start']['dateTime'])
event_end = arrow.get(event['end']['dateTime'])
event_end_time = event_end.format('HH:mm')
other_event = busy_dict[j]
other_event_start = arrow.get(other_event['start']['dateTime'])
other_event_end = arrow.get(other_event['end']['dateTime'])
other_event_start_time = other_event_start.format('HH:mm')
other_event_start_mod = other_event_start.replace(days=-1,
hour=END_TIME)
if event != other_event:
if (other_event_start >= event_start and
other_event_end <= event_end):
remove_list.append(other_event)
if (event_end_time == '17:00' and
other_event_start_time == '09:00' and
event_end == other_event_start_mod):
event['end']['dateTime'] = other_event['end']['dateTime']
remove_list.append(other_event)
if event_end == other_event_start:
event['end']['dateTime'] = other_event['end']['dateTime']
remove_list.append(other_event)
for i in sorted(busy_dict):
if busy_dict[i] not in remove_list:
busy.append(busy_dict[i])
return busy
def get_events(service):
"""
Gets a list of events from the Google calendar service.
:param service: is the Google service from where the calendar is retrieved.
:return: a list of events.
"""
events = []
for cal_id in flask.session['checked_calendars']:
cal_items = service.events().list(calendarId=cal_id).execute()
for cal_item in cal_items['items']:
events.append(cal_item)
return events
def is_available(event):
"""
Checks if the event has the transparency attribute.
:param event: is the event to check.
:return: True if it is transparent and False if not
"""
if 'transparency' in event:
available = True
else:
available = False
return available
def get_start_end_datetime(event):
"""
Gets the event's start and end as arrow objects.
:param event: is the event to check.
:return: a 2-tuple of the events start and end as an arrow objects.
"""
is_all_day = False
if 'dateTime' in event['start']:
event_start = arrow.get(
event['start']['dateTime']).replace(tzinfo=tz.tzlocal())
event_end = arrow.get(
event['end']['dateTime']).replace(tzinfo=tz.tzlocal())
else:
event_start = arrow.get(
event['start']['date']).replace(tzinfo=tz.tzlocal())
event_end = arrow.get(
event['end']['date']).replace(tzinfo=tz.tzlocal())
is_all_day = True
return event_start, event_end, is_all_day
|
CMPUT-466-551-ML-Project/NILM-Project | nilm/preprocess.py | Python | gpl-2.0 | 4,748 | 0.000632 | """
A collection of data pre-processing algorithms
"""
import numpy as np
from scipy.optimize import minimize
from nilm.evaluation import mean_squared_error
def solve_constant_energy(aggregated, device_activations):
"""
Invert the indicator matrix, solving for the constant energy of each
device. We return the constant power for each device, an | d the mean squared
error.
"""
def objective(power, total, matrix):
"""Objective function for the mi | nimization."""
return np.sum((total - np.dot(matrix, power)) ** 2)
matrix = np.column_stack(device_activations)
p0 = np.zeros(matrix.shape[1])
bounds = [(0, np.inf)] * matrix.shape[1]
solution = minimize(objective, p0, args=(aggregated, matrix),
method='SLSQP', bounds=bounds)
error = mean_squared_error(np.dot(matrix, solution.x), aggregated)
return (solution.x, error)
def confidence(data):
"""A Heuristic for how usable our current estimate of data is."""
if len(data) == 0:
return np.inf
mean = data.mean(axis=None)
variance = ((data - mean) ** 2).mean(axis=None)
return variance / len(data)
def only_device(device_idx, time_idx, indicator_matrix):
"""
Returns True if the device is the only device active at a certain time.
"""
devices_on = np.where(indicator_matrix[time_idx, :])[0]
return (device_idx in devices_on) and (len(devices_on) == 1)
def sort_data(aggregated, devices, indicator_matrix):
"""
Generates usable samples for each device, where that
device was the only device active at a single time period.
"""
data = [[] for _ in xrange(len(devices))]
for d in xrange(len(devices)):
for t in xrange(len(devices[d].times)):
if only_device(d, t, indicator_matrix):
data[d].append(aggregated[t])
return np.array([np.array(d) for d in data])
def changed_devices(devices, time_idx, indicator_matrix):
"""
Returns all devices whose I/O state changed between at the given time
period.
"""
return [d for d in xrange(len(devices)) if
(indicator_matrix[time_idx, d] != indicator_matrix[time_idx-1, d])]
def get_changed_data(aggregated, devices, indicator_matrix):
"""
Generates data for each device by the step inference method, calculating
the change in energy usage as a single device changes.
"""
data = [[] for _ in xrange(len(devices))]
for t in xrange(1, len(devices[0].times)):
changed = changed_devices(devices, t, indicator_matrix)
if len(changed) == 1:
power_diff = abs(aggregated[t] - aggregated[t-1])
data[changed[0]].append(power_diff)
return np.array([np.array(d) for d in data])
def confidence_estimator(aggregated, devices, data_sorter,
threshold=np.float32(0.0)):
"""
Given a time series of aggregated data, time series of devices, and a matrix
of on/off indicators, computes the best power estimators by the confidence
interval subtraction method. Data obtained for the confidence interval
measure is obtained via the data_sorter function, which is currently
implemented to either take samples from immediate changes in power from a
single device switching on (get_changed_data) or taking samples from when a
single device is on (sort_data). The program assumes that the time data
between the series and indicators is the same, but does not assume how it
is distributed. This function assumes that every device will be able to be
calculated at some point. If not, this function is not able to estimate the
programs accurately.
"""
if len(devices) == 0:
return {}
indicator_matrix = np.column_stack([d.indicators(threshold) for
d in devices])
data = data_sorter(aggregated, devices, indicator_matrix)
# Pick data to remove according to some heuristic
heuristic = lambda x: confidence(data[x])
choice = min(range(len(devices)), key=heuristic)
if heuristic(choice) == np.inf:
# Need to pick a better approach, try
# generating more data using level technique.
mean_choice = np.float32(0.0)
else:
mean_choice = data[choice].mean(axis=None)
new_aggregated = aggregated - indicator_matrix[:, choice] * mean_choice
new_aggregated = np.clip(new_aggregated, np.float32(0.0), np.inf)
new_devices = devices[:choice] + devices[choice+1:]
calculated_means = confidence_estimator(new_aggregated, new_devices,
data_sorter)
calculated_means[devices[choice].name] = mean_choice
return calculated_means
|
takeshineshiro/nova | nova/tests/functional/v3/test_keypairs.py | Python | apache-2.0 | 9,503 | 0.000105 | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo_config import cfg
from nova.objects import keypair as keypair_obj
from nova.tests.functional.v3 import api_sample_base
from nova.tests.unit import fake_crypto
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.legacy_v2.extensions')
class KeyPairsSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
request_api_version = None
sample_dir = "keypairs"
expected_delete_status_code = 202
expected_post_status_code = 200
_api_version = 'v2'
def _get_flags(self):
f = super(KeyPairsSampleJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.keypairs.Keypairs')
return f
def generalize_subs(self, subs, vanilla_regexes):
subs['keypair_name'] = 'keypair-[0-9a-f-]+'
return subs
def test_keypairs_post(self):
return self._check_keypairs_post()
def _check_keypairs_post(self, **kwargs):
"""Get api sample of key pairs post request."""
key_name = 'keypair-' + str(uuid.uuid4())
subs = dict(keypair_name=key_name, **kwargs)
response = self._do_post('os-keypairs', 'keypairs-post-req', subs,
api_version=self.request_api_version)
subs = self._get_regexes()
subs['keypair_name'] = '(%s)' % key_name
self._verify_response('keypairs-post-resp', subs, response,
self.expected_post_status_code)
# NOTE(maurosr): return the key_name is necessary cause the
# verification returns the label of the last compared information in
# the response, | not necessarily the key name.
return key_name
def test_keypairs_import_key_post(self):
public_key = fake_crypto. | get_ssh_public_key()
self._check_keypairs_import_key_post(public_key)
def _check_keypairs_import_key_post(self, public_key, **kwargs):
# Get api sample of key pairs post to import user's key.
key_name = 'keypair-' + str(uuid.uuid4())
subs = {
'keypair_name': key_name,
'public_key': public_key
}
subs.update(**kwargs)
response = self._do_post('os-keypairs', 'keypairs-import-post-req',
subs, api_version=self.request_api_version)
subs = self._get_regexes()
subs['keypair_name'] = '(%s)' % key_name
self._verify_response('keypairs-import-post-resp', subs, response,
self.expected_post_status_code)
def test_keypairs_list(self):
# Get api sample of key pairs list request.
key_name = self.test_keypairs_post()
response = self._do_get('os-keypairs',
api_version=self.request_api_version)
subs = self._get_regexes()
subs['keypair_name'] = '(%s)' % key_name
self._verify_response('keypairs-list-resp', subs, response, 200)
def test_keypairs_get(self):
# Get api sample of key pairs get request.
key_name = self.test_keypairs_post()
response = self._do_get('os-keypairs/%s' % key_name,
api_version=self.request_api_version)
subs = self._get_regexes()
subs['keypair_name'] = '(%s)' % key_name
self._verify_response('keypairs-get-resp', subs, response, 200)
def test_keypairs_delete(self):
# Get api sample of key pairs delete request.
key_name = self.test_keypairs_post()
response = self._do_delete('os-keypairs/%s' % key_name,
api_version=self.request_api_version)
self.assertEqual(self.expected_delete_status_code,
response.status_code)
class KeyPairsV22SampleJsonTest(KeyPairsSampleJsonTest):
request_api_version = '2.2'
expected_post_status_code = 201
expected_delete_status_code = 204
# NOTE(gmann): microversion tests do not need to run for v2 API
# so defining scenarios only for v2.2 which will run the original tests
# by appending '(v2_2)' in test_id.
scenarios = [('v2_2', {})]
_api_version = 'v2'
def test_keypairs_post(self):
# NOTE(claudiub): overrides the method with the same name in
# KeypairsSampleJsonTest, as it is used by other tests.
return self._check_keypairs_post(
keypair_type=keypair_obj.KEYPAIR_TYPE_SSH)
def test_keypairs_post_x509(self):
return self._check_keypairs_post(
keypair_type=keypair_obj.KEYPAIR_TYPE_X509)
def test_keypairs_post_invalid(self):
key_name = 'keypair-' + str(uuid.uuid4())
subs = dict(keypair_name=key_name, keypair_type='fakey_type')
response = self._do_post('os-keypairs', 'keypairs-post-req', subs,
api_version=self.request_api_version)
self.assertEqual(400, response.status_code)
def test_keypairs_import_key_post(self):
# NOTE(claudiub): overrides the method with the same name in
# KeypairsSampleJsonTest, since the API sample expects a keypair_type.
public_key = fake_crypto.get_ssh_public_key()
self._check_keypairs_import_key_post(
public_key, keypair_type=keypair_obj.KEYPAIR_TYPE_SSH)
def test_keypairs_import_key_post_x509(self):
public_key = fake_crypto.get_x509_cert_and_fingerprint()[0]
public_key = public_key.replace('\n', '\\n')
self._check_keypairs_import_key_post(
public_key, keypair_type=keypair_obj.KEYPAIR_TYPE_X509)
def _check_keypairs_import_key_post_invalid(self, keypair_type):
key_name = 'keypair-' + str(uuid.uuid4())
subs = {
'keypair_name': key_name,
'keypair_type': keypair_type,
'public_key': fake_crypto.get_ssh_public_key()
}
response = self._do_post('os-keypairs', 'keypairs-import-post-req',
subs, api_version=self.request_api_version)
self.assertEqual(400, response.status_code)
def test_keypairs_import_key_post_invalid_type(self):
self._check_keypairs_import_key_post_invalid(
keypair_type='fakey_type')
def test_keypairs_import_key_post_invalid_combination(self):
self._check_keypairs_import_key_post_invalid(
keypair_type=keypair_obj.KEYPAIR_TYPE_X509)
class KeyPairsV210SampleJsonTest(KeyPairsSampleJsonTest):
ADMIN_API = True
request_api_version = '2.10'
expected_post_status_code = 201
expected_delete_status_code = 204
scenarios = [('v2_10', {})]
_api_version = 'v2'
def test_keypair_create_for_user(self):
subs = {
'keypair_type': keypair_obj.KEYPAIR_TYPE_SSH,
'public_key': fake_crypto.get_ssh_public_key(),
'user_id': "fake"
}
self._check_keypairs_post(**subs)
def test_keypairs_post(self):
return self._check_keypairs_post(
keypair_type=keypair_obj.KEYPAIR_TYPE_SSH,
user_id="admin")
def test_keypairs_import_key_post(self):
# NOTE(claudiub): overrides the method with the same name in
# KeypairsSampleJsonTest, since the API sample expects a keypair_type.
public_key = fake_crypto.get_ssh_public_key()
self._check_keypairs_import_key_post(
public_key, keypair_type=keypair_obj.KEYPAIR_TYPE_SSH,
u |
nirmeshk/oh-mainline | mysite/bugsets/tests.py | Python | agpl-3.0 | 19,396 | 0.000155 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
# vim: set ai et ts=4 sw=4:
# This file is part of OpenHatch.
# Copyright (C) 2014 Elana Hashman
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Imports {{{
import mysite.bugsets.views
import mysite.bugsets.models
from mysite.base.tests import TwillTests
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.utils.http import urlencode
from django.utils.unittest import expectedFailure
# }}}
class BasicBugsetMainViewTests(TwillTests):
fixtures = ['user-paulproteus', 'person-paulproteus']
def test_bugset_names_load(self):
mysite.bugsets.models.BugSet.objects.create(name="best event")
mysite.bugsets.models.BugSet.objects.create(name="bestest event")
url = reverse(mysite.bugsets.views.main_index)
response = self.client.get(url)
self.assertEqual(200, response.status_code)
self.assertContains(response, "best event")
self.assertContains(response, "bestest event")
def test_bugset_view_link(self):
s = mysite.bugsets.models.BugSet.objects.create(name="best event")
url = reverse(mysite.bugsets.views.main_index)
response = self.client.get(url)
self.assertEqual(200, response.status_code)
self.assertContains(response, s.get_absolute_url())
def test_user_not_logged_in(self):
s = mysite.bugsets.models.BugSet.objects.create(name="best event")
url = reverse(mysite.bugsets.views.main_index)
response = self.client.get(url)
self.assertEqual(200, response.status_code)
self.assertNotContains(response, s.get_edit_url())
self.assertContains(response,
'if you want to create or edit a bug set.')
def test_user_logged_in(self):
client = self.login_with_client()
s = mysite.bugsets. | models.BugSet.objects.create(name="best event")
url = reverse(mysite.bugsets.views.main_index)
response = client.get(url)
self.assertEqual(200, response.status_code)
| self.assertContains(response, s.get_edit_url())
self.assertNotContains(response,
'if you want to create or edit a bug set.')
class BasicBugsetListViewTests(TwillTests):
def test_bugset_listview_load(self):
s = mysite.bugsets.models.BugSet.objects.create(name="test event")
b = mysite.bugsets.models.AnnotatedBug.objects.create(
url="http://openhatch.org/bugs/issue995",
)
s.bugs.add(b)
url = reverse(mysite.bugsets.views.list_index, kwargs={
'pk': s.pk,
'slug': '',
})
response = self.client.get(url)
self.assertEqual(200, response.status_code)
self.assertContains(response, "test event")
self.assertContains(response, "http://openhatch.org/bugs/issue995")
def test_bugset_listview_load_with_custom_name(self):
s = mysite.bugsets.models.BugSet.objects.create(name="test event")
b = mysite.bugsets.models.AnnotatedBug.objects.create(
url="http://openhatch.org/bugs/issue995",
)
s.bugs.add(b)
url = reverse(mysite.bugsets.views.list_index, kwargs={
'pk': s.pk,
'slug': 'best-event', # this can be anything!
})
response = self.client.get(url)
self.assertEqual(200, response.status_code)
self.assertContains(response, "test event")
self.assertContains(response, "http://openhatch.org/bugs/issue995")
@expectedFailure
def test_bugset_listview_load_empty(self):
# Create set with no bugs
mysite.bugsets.models.BugSet.objects.create(name="test event")
url = reverse(mysite.bugsets.views.list_index, kwargs={
'pk': 1,
'slug': '',
})
response = self.client.get(url)
self.assertEqual(200, response.status_code)
self.assertContains(response, "test event")
self.assertContains(response, "No bugs!")
@expectedFailure
def test_bugset_listview_load_no_project(self):
# Create set with no bugs
s = mysite.bugsets.models.BugSet.objects.create(name="test event")
b = mysite.bugsets.models.AnnotatedBug.objects.create(
url="http://openhatch.org/bugs/issue995",
)
s.bugs.add(b)
url = reverse(mysite.bugsets.views.list_index, kwargs={
'pk': 1,
'slug': '',
})
response = self.client.get(url)
self.assertEqual(200, response.status_code)
self.assertContains(response, "test event")
self.assertContains(response, "http://openhatch.org/bugs/issue995")
self.assertContains(response, "�") # the no project character
@expectedFailure
def test_bugset_listview_load_with_annotated_bug(self):
# Create set and a bug for it
s = mysite.bugsets.models.BugSet.objects.create(name="test event")
b = mysite.bugsets.models.AnnotatedBug.objects.create(
url="http://openhatch.org/bugs/issue995",
)
# Annotate the bug
b.title = "Add bug set view screen"
b.description = ("Use your Django and HTML/CSS skills to make a nice "
"UI for the bug sets")
b.assigned_to = "Jesse"
b.mentor = "Elana"
b.time_estimate = "2 hours"
b.status = "c"
b.skill_list = "python, html"
# Make a project
p = mysite.search.models.Project.objects.create(
name='openhatch',
display_name='OpenHatch DisplayName',
homepage='http://openhatch.org',
language='Python',
)
p.save()
b.project = p
# Save and associate with bugset
b.save()
s.bugs.add(b)
url = reverse(mysite.bugsets.views.list_index, kwargs={
'pk': 1,
'slug': '',
})
response = self.client.get(url)
self.assertEqual(200, response.status_code)
self.assertContains(response, "test event")
self.assertContains(response, "http://openhatch.org/bugs/issue995")
self.assertContains(response, "OpenHatch DisplayName")
self.assertContains(response, "Add bug set view screen")
self.assertContains(response, ("Use your Django and HTML/CSS skills "
"to make a nice UI for the bug sets"))
self.assertContains(response, "Jesse")
self.assertContains(response, "Elana")
self.assertContains(response, "2 hours")
self.assertContains(response, "claimed")
self.assertContains(response, "python, html")
class SecurityBugsetListViewTests(TwillTests):
def test_will_inplaceedit_allow_us_to_pwn_ourselves(self):
# Asheesh: "total cost of pwnership: 1 test"
# note: user paulproteus has poor password hygiene
u = User.objects.create(username='paulproteus', password='password')
u.save()
self.client.post(
'/inplaceeditform/save/',
{
"app_label": "auth", # the django app
"module_name": "user", # the django table
"field_name": "username", # the field name
"obj_id": u.pk, # the pk
"value": '"LOLPWNED"' # new value
})
self.assertEqual(User.objects.get(pk=u.pk).username, u'paulprot |
4dn-dcic/tibanna | awsf/aws_decode_run_json.py | Python | mit | 12,965 | 0.004165 | #!/usr/bin/python
import json
import sys
import re
downloadlist_filename = "download_command_list.txt"
mountlist_filename = "mount_command_list.txt"
input_yml_filename = "inputs.yml"
env_filename = "env_command_list.txt"
INPUT_DIR = "/data1/input" # data are downloaded to this directory
INPUT_MOUNT_DIR_PREFIX = "/data1/input-mounted-" # data are mounted to this directory + bucket name
def main():
"""reads a run json file and creates three text files:
download command list file (commands to download input files from s3)
input yml file (for cwl/wdl/snakemake run)
env list file (environment variables to be sourced)
"""
# read json file
with open(sys.argv[1], 'r') as json_file:
d = json.load(json_file)
d_input = d["Job"]["Input"]
language = d["Job"]["App"]["language"]
# create a download command list file from the information in json
create_download_command_list(downloadlist_filename, d_input, language)
# create a bucket-mounting command list file
create_mount_command_list(mountlist_filename, d_input)
# create an input yml file to be used on awsem
if language == 'wdl': # wdl
create_input_for_wdl(input_yml_filename, d_input)
elif language == 'snakemake': # snakemake
create_input_for_snakemake(input_yml_filename, d_input)
else: # cwl
create_input_for_cwl(input_yml_filename, d_input)
# create a file that defines environmental variables
create_env_def_file(env_filename, d, language)
def create_mount_command_list(mountlist_filename, d_input):
buckets_to_be_mounted = dict()
for category in ["Input_files_data", "Secondary_files_data"]:
for inkey, v in d_input[category].iteritems():
if v.get("mount", False):
buckets_to_be_mounted[v['dir']] = 1
with open(mountlist_filename, 'w') as f:
for b in buckets_to_be_mounted:
f.write("mkdir -p %s\n" % (INPUT_MOUNT_DIR_PREFIX + b))
f.write("$GOOFYS_COMMAND %s %s\n" % (b, INPUT_MOUNT_DIR_PREFIX + b))
def create_download_command_list(downloadlist_filename, d_input, language):
"""create a download command list file from the information in json"""
with open(downloadlist_filename, 'w') as f:
for category in ["Input_files_data", "Secondary_files_data"]:
for inkey, v in d_input[category].iteritems():
if v.get("mount", False): # do not download if it will be mounted
continue
if inkey.startswith('file://'):
if language not in ['shell', 'snakemake']:
raise Exception('input file has to be defined with argument name for CWL and WDL')
target = inkey.replace('file://', '')
if not target.startswith('/data1/'):
raise Exception('input target directory must be in /data1/')
if not target.startswith('/data1/' + language) and \
not target.startswith('/data1/input') and \
not target.startswith('/data1/out'):
raise Exception('input target directory must be in /data1/input, /data1/out or /data1/%s' % language)
else:
target = ''
target_template = I | NPUT_DIR + "/%s"
data_bucket = v["dir"]
profile = v.get("profile", '')
profile_flag = "--profile " + profile if profile else ''
path1 = v["path"]
rename1 = v.get("rename", None)
unzip = v.get("unzip", None)
if not rename1:
rename1 = pa | th1
if isinstance(path1, list):
for path2, rename2 in zip(path1, rename1):
if isinstance(path2, list):
for path3, rename3 in zip(path2, rename2):
if isinstance(path3, list):
for data_file, rename4 in zip(path3, rename3):
target = target_template % rename4
add_download_cmd(data_bucket, data_file, target, profile_flag, f, unzip)
else:
data_file = path3
target = target_template % rename3
add_download_cmd(data_bucket, data_file, target, profile_flag, f, unzip)
else:
data_file = path2
target = target_template % rename2
add_download_cmd(data_bucket, data_file, target, profile_flag, f, unzip)
else:
data_file = path1
if not target:
target = target_template % rename1
add_download_cmd(data_bucket, data_file, target, profile_flag, f, unzip)
def add_download_cmd(data_bucket, data_file, target, profile_flag, f, unzip):
if data_file:
if data_file.endswith('/'):
data_file = data_file.rstrip('/')
cmd_template = "if [[ -z $(aws s3 ls s3://{0}/{1}/ {3}) ]]; then aws s3 cp s3://{0}/{1} {2} {3}; %s" + \
" else aws s3 cp --recursive s3://{0}/{1} {2} {3}; %s fi\n"
cmd4 = ''
cmd5 = ''
if unzip == 'gz':
cmd4 = "gunzip {2};"
cmd5 = "for f in `find {2} -type f`; do if [[ $f =~ \.gz$ ]]; then gunzip $f; fi; done;"
elif unzip == 'bz2':
cmd4 = "bzip2 -d {2};"
cmd5 = "for f in `find {2} -type f`; do if [[ $f =~ \.bz2$ ]]; then bzip2 -d $f; fi; done;"
cmd = cmd_template % (cmd4, cmd5)
f.write(cmd.format(data_bucket, data_file, target, profile_flag))
def file2cwlfile(filename, dir, unzip):
if unzip:
filename = re.match('(.+)\.{0}$'.format(unzip), filename).group(1)
return {"class": 'File', "path": dir + '/' + filename}
def file2wdlfile(filename, dir, unzip):
if unzip:
filename = re.match('(.+)\.{0}$'.format(unzip), filename).group(1)
return dir + '/' + filename
# create an input yml file for cwl-runner
def create_input_for_cwl(input_yml_filename, d_input):
inputs = d_input.copy()
yml = {}
for category in ["Input_parameters"]:
for item, value in inputs[category].iteritems():
yml[item] = value
for category in ["Input_files_data"]:
for item in inputs[category].keys():
v = inputs[category][item]
if v.get('mount', False):
input_dir = INPUT_MOUNT_DIR_PREFIX + v['dir']
else:
input_dir = INPUT_DIR
if 'mount' in v:
del v['mount']
del v['dir']
if 'profile' in v:
del v['profile']
if 'rename' in v and v['rename']:
if isinstance(v['rename'], list):
v['path'] = v['rename'][:]
else:
v['path'] = v['rename']
del v['rename']
if 'unzip' in v:
unzip = v['unzip']
del v['unzip']
else:
unzip = ''
if isinstance(v['path'], list):
v2 = []
for pi in v['path']:
if isinstance(pi, list):
nested = []
for ppi in pi:
if isinstance(ppi, list):
nested.append([file2cwlfile(pppi, input_dir, unzip) for pppi in ppi])
else:
nested.append(file2cwlfile(ppi, input_dir, unzip))
v2.append(nested)
else:
v2.append(file2cwlfile(pi, input_dir, unzip))
v = v2
yml[item] = v
else:
if unzip:
v['path'] = re.match('(.+)\.{0}$'.format(unzip), v['path']).group(1)
v['path'] = input_dir + '/' + |
DemocracyClub/EveryElection | every_election/apps/elections/migrations/0019_auto_20170110_1329.py | Python | bsd-3-clause | 468 | 0 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-01-10 13:29
from __future__ import unicode_literals
from django.db import mi | gration | s, models
class Migration(migrations.Migration):
dependencies = [("elections", "0018_election_group_type")]
operations = [
migrations.AlterField(
model_name="election",
name="group_type",
field=models.CharField(blank=True, max_length=100, null=True),
)
]
|
google/ml-fairness-gym | spaces/multinomial_test.py | Python | apache-2.0 | 3,523 | 0.002838 | # coding=utf-8
# Copyright 2022 The ML Fairness Gym Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Tests for multinomial_spaces.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from spaces import multinomial
import numpy as np
from six.moves import range
class MultinomialTest(absltest.TestCase):
def setUp(self):
self.n = 15 # number of trials
self.k = 6 # number of categories
self.multinomial_space = multinomial.Multinomial(self.k, self.n)
self.multinomial_space.seed(0)
super(MultinomialTest, self).setUp()
def test_sample_sum(self):
n_trials = 100
samples = [self.multinomial_space.sample() for _ in range(n_trials)]
sums_to_n = [np.sum(sample) == self.n for sample in samples]
self.assertTrue(np.all(sums_to_n))
def test_sample_distribution(self):
n_trials = 100
samples = [self.multinomial_space.sample() for _ in range(n_trials)]
# check roughly uniform distribution by checking means for each category
# are within 3*std dev of the expected mean
expected_mean = float(self.n) / self.k
means = np.mean(samples, axis=0)
std = np.std(means)
near_mean = np.asarray(
[np.abs(mean - expected_mean) < 3.0 * std for mean in means])
self.assertTrue(np.all(near_mean))
def test_contains_correct_n_in_vector(self):
# check a vector is contained even if it has n as one of its values.
n = 1 # number of trials
k = 2 # number of categories
multinomial_space = multinomial.Multinomial(k, n)
is_contained_vector = np.asarray([1, 0], dtype=np.uint32)
self.assertTrue(multinomial_space.contains(is_contained_vector))
def test_contains_correct(self):
is_contained_vector = np.asarray([2, 3, 2, 3, 3, 2], dtype=np.uint32)
self.assertTrue(self.multinomial_space.contains(is_contained_vector))
def test_contains_incorrect_length(self):
# check vector with incorrect length is not contained
not_contained_vector = np.asarray([3, 3, 3, 3, 3], dtype=np.uint32)
self.assertFalse(self.multinomial_space.contains(not_contained_ | vector))
def test_contains_incorrect_sum( | self):
# check vector with incorrect sum is not contained
not_contained_vector = np.asarray([3, 3, 3, 3, 3, 3], dtype=np.uint32)
self.assertFalse(self.multinomial_space.contains(not_contained_vector))
def test_contains_incorrect_dtype(self):
# check vector with wrong dtype is not contained
not_contained_vector = np.asarray([2.0, 3.0, 2.0, 3.0, 3.5, 1.5])
self.assertFalse(self.multinomial_space.contains(not_contained_vector))
def test_contains_samples(self):
n_trials = 100
samples = [self.multinomial_space.sample() for _ in range(n_trials)]
contains_samples = [
self.multinomial_space.contains(sample) for sample in samples
]
self.assertTrue(np.all(contains_samples))
if __name__ == '__main__':
absltest.main()
|
IgorWang/LM4paper | mixlm/visualize.py | Python | gpl-3.0 | 5,856 | 0.001195 | # -*- coding: utf-8 -*-
# Project : LM4paper
# Created by igor on 17-3-14
import os
import sys
import time
import json
import numpy as np
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
from mixlm.lm_train import *
from mixlm.clstmdnn import CLSTMDNN
from bmlm.common import CheckpointLoader
def load_from_checkpoint(saver, logdir):
sess = tf.get_default_session()
ckpt = tf.train.get_checkpoint_state(logdir)
if ckpt and ckpt.model_checkpoint_path:
if os.path.isabs(ckpt.model_checkpoint_path):
saver.restore(sess, ckpt.model_checkpoint_path)
else:
saver.restore(sess, os.path.join(logdir, ckpt.model_checkpoint_path))
return True
return False
class Model():
def __init__(self, logdir):
hps = CLSTMDNN.get_default_hparams().parse(FLAGS.hpconfig)
hps.num_gpus = FLAGS.num_gpus
hps.batch_size = 1
self.word_vocab = Vocabulary.from_file(os.path.join(FLAGS.vocabdir, "1b_word_vocab.txt"))
self.char_vocab = Vocabulary.from_file(os.path.join(FLAGS.vocabdir, "1b_char_vocab.txt"))
with tf.variable_scope("model"):
hps.num_sampled = 0
hps.keep_prob = 1.0
self.model = CLSTMDNN(hps, "eval", "/cpu:0")
if hps.average_params:
print("Averaging parameters for evaluation.")
self.saver = tf.train.Saver(self.model.avg_dict)
else:
self.saver = tf.train.Saver()
# Use only 4 threads for the evaluation
config = tf.ConfigProto(allow_soft_placement=True,
intra_op_parallelism_threads=20,
inter_op_parallelism_threads=1)
self.sess = tf.Session(config=config)
with self.sess.as_default():
if load_from_checkpoint(self.saver, logdir):
global_step = self.model.global_step.eval()
print("Successfully loaded model at step=%s." % global_step)
else:
print("Can't restore model from %s" % logdir)
self.hps = hps
def get_char_embedding(self, char):
id = self.char_vocab.get_id(char)
x = np.zeros(shape=(4, 20, 16))
x[:, :, :] = id
vector = self.sess.run([self.model.char_embedding.outputs],
| feed_dict={self.mo | del.char_x: x})
# print(self.model.char_embedding)
return vector[0][0][0]
def get_word_embedding(self, word):
id = self.word_vocab.get_id(word)
x = np.zeros(shape=(4, 20))
x[:, :] = id
vector = self.sess.run([self.model.word_embedding.outputs],
feed_dict={self.model.word_x: x})
return vector[0][0][0]
def visualize_char(model, path="/home/aegis/igor/LM4paper/tests/textchar.txt", ):
chars = open(path, 'r').read().splitlines()
embedding = np.empty(shape=(len(chars), model.hps.emb_char_size), dtype=np.float32)
for i, char in enumerate(chars):
embedding[i] = model.get_char_embedding(char)
print(embedding)
print(embedding.shape)
logdir = "/data/visualog/char/"
metadata = os.path.join(logdir, "metadata.tsv")
with open(metadata, "w") as metadata_file:
for c in chars:
metadata_file.write("%s\n" % c)
tf.reset_default_graph()
with tf.Session() as sess:
X = tf.Variable([0.0], name='embedding')
place = tf.placeholder(tf.float32, shape=embedding.shape)
set_x = tf.assign(X, place, validate_shape=False)
sess.run(tf.global_variables_initializer())
sess.run(set_x, feed_dict={place: embedding})
saver = tf.train.Saver([X])
saver.save(sess, os.path.join(logdir, 'char.ckpt'))
config = projector.ProjectorConfig()
# One can add multiple embeddings.
embedding = config.embeddings.add()
embedding.tensor_name = X.name
# Link this tensor to its metadata file (e.g. labels).
embedding.metadata_path = metadata
# Saves a config file that TensorBoard will read during startup.
projector.visualize_embeddings(tf.summary.FileWriter(logdir), config)
def visualize_word(model, path="/home/aegis/igor/LM4paper/tests/testdata.txt"):
words = open(path, 'r').read().splitlines()
embedding = np.empty(shape=(len(words), model.hps.emb_word_size), dtype=np.float32)
for i, w in enumerate(words):
embedding[i] = model.get_word_embedding(w)
print(embedding)
print(embedding.shape)
logdir = "/data/visualog/word/"
metadata = os.path.join(logdir, "metadata.tsv")
with open(metadata, "w") as metadata_file:
for w in words:
metadata_file.write("%s\n" % w)
tf.reset_default_graph()
with tf.Session() as sess:
X = tf.Variable([0.0], name='embedding')
place = tf.placeholder(tf.float32, shape=embedding.shape)
set_x = tf.assign(X, place, validate_shape=False)
sess.run(tf.global_variables_initializer())
sess.run(set_x, feed_dict={place: embedding})
saver = tf.train.Saver([X])
saver.save(sess, os.path.join(logdir, 'word.ckpt'))
config = projector.ProjectorConfig()
# One can add multiple embeddings.
embedding = config.embeddings.add()
embedding.tensor_name = X.name
# Link this tensor to its metadata file (e.g. labels).
embedding.metadata_path = metadata
# Saves a config file that TensorBoard will read during startup.
projector.visualize_embeddings(tf.summary.FileWriter(logdir), config)
if __name__ == '__main__':
model = Model(logdir="/data/lmlog/train")
# vector = model.get_word_embedding("hello")
# print(vector)
visualize_word(model, path="/home/aegis/igor/LM4paper/tests/testword.txt")
|
Nicola17/term2048-AI | tests/test_ui.py | Python | mit | 3,717 | 0.001614 | # -*- coding: UTF-8 -*-
try:
import unittest2 as unittest
except ImportError:
import unittest
import sys
import os
import helpers
from term2048 import ui
try:
import argparse as _argparse
except ImportError:
_argparse = None
_argv = sys.argv
_os_system = os.system
class TestUI(unittest.TestCase):
def setUp(self):
self.exit_status = | None
def fake_exit(s):
self.exit_status = s
raise helpers.FakeExit()
self.exit = sys.exit
sys.exit = fake_exit
sys.argv = _argv
self.stdout = sys.stdout
self.output = {}
sys.stdout = helpers.DevNull(self.output)
def tearDown(self):
sys.exit = self.exit
sys.stdout = self.stdout
def test_print_version(self):
try:
ui.print_version_and_exit()
except helpers.FakeExit:
| pass
else:
self.assertFalse(True, "should exit after printing the version")
self.assertEqual(self.exit_status, 0)
def test_parse_args_no_args(self):
sys.argv = ['term2048']
args = ui.parse_cli_args()
self.assertEqual(args, {
'version': False,
'azmode': False,
'mode': None,
})
def test_parse_args_version(self):
sys.argv = ['term2048', '--version']
args = ui.parse_cli_args()
self.assertTrue(args['version'])
def test_parse_args_azmode(self):
sys.argv = ['term2048', '--az']
args = ui.parse_cli_args()
self.assertTrue(args['azmode'])
def test_parse_args_azmode_version(self):
sys.argv = ['term2048', '--az', '--version']
args = ui.parse_cli_args()
self.assertTrue(args['azmode'])
self.assertTrue(args['version'])
def test_parse_args_dark_mode(self):
m = 'dark'
sys.argv = ['term2048', '--mode', m]
args = ui.parse_cli_args()
self.assertEqual(args['mode'], m)
def test_parse_args_light_mode(self):
m = 'light'
sys.argv = ['term2048', '--mode', m]
args = ui.parse_cli_args()
self.assertEqual(args['mode'], m)
def test_argparse_warning(self):
getattr(ui, '__print_argparse_warning')()
self.assertIn('output', self.output)
self.assertRegexpMatches(self.output['output'], r'^WARNING')
def test_start_game_print_version(self):
sys.argv = ['term2048', '--version']
try:
ui.start_game()
except helpers.FakeExit:
pass
else:
self.assertFalse(True, "should exit after printing the version")
self.assertEqual(self.exit_status, 0)
self.assertRegexpMatches(self.output['output'],
r'^term2048 v\d+\.\d+\.\d+$')
class TestUIPy26(unittest.TestCase):
def setUp(self):
self.stdout = sys.stdout
self.output = {}
sys.stdout = helpers.DevNull(self.output)
sys.modules['argparse'] = None
helpers.reload(ui)
ui.debug = True
def system_interrupt(*args):
raise KeyboardInterrupt()
os.system = system_interrupt
def tearDown(self):
sys.stdout = self.stdout
sys.modules['argparse'] = _argparse
ui.debug = False
os.system = _os_system
def test_no_has_argparse(self):
self.assertFalse(getattr(ui, '__has_argparse'))
def test_start_game_print_argparse_warning(self):
ui.start_game()
self.assertIn('output', self.output)
self.assertRegexpMatches(self.output['output'], r'^WARNING')
def test_start_game_loop(self):
ui.debug = False
self.assertEqual(ui.start_game(), None) # interrupted
|
FFMG/myoddweb.piger | monitor/api/python/Python-3.7.2/Lib/test/test_urllib2net.py | Python | gpl-2.0 | 12,710 | 0.002754 | import unittest
from test import support
from test.test_urllib2 import sanepathname2url
import os
import socket
import urllib.error
import urllib.request
import sys
support.requires("network")
TIMEOUT = 60 # seconds
def _retry_thrice(func, exc, *args, **kwargs):
for i in range(3):
try:
return func(*args, **kwargs)
except exc as e:
last_exc = e
continue
raise last_exc
def _wrap_with_retry_thrice(func, exc):
def wrapped(*args, **kwargs):
return _retry_thrice(func, exc, *args, **kwargs)
return wrapped
# bpo-35411: FTP tests of test_urllib2net randomly fail
# with "425 Security: Bad IP connecting" on Travis CI
skip_ftp_test_on_travis = unittest.skipIf('TRAVIS' in os.environ,
'bpo-35411: skip FTP test '
'on Travis CI')
# Connecting to remote hosts is flaky. Make it more robust by retrying
# the connection several times.
_urlopen_with_retry = _wrap_with_retry_thrice(urllib.request.urlopen,
urllib.error.URLError)
class AuthTests(unittest.TestCase):
"""Tests urllib2 authentication features."""
## Disabled at the moment since there is no page under python.org which
## could be used to HTTP authentication.
#
# def test_basic_auth(self):
# import http.client
#
# test_url = "http://www.python.org/test/test_urllib2/basic_auth"
# test_hostport = "www.python.org"
# test_realm = 'Test Realm'
# test_user = 'test.test_urllib2net'
# test_password = 'blah'
#
# # failure
# try:
# _urlopen_with_retry(test_url)
# except urllib2.HTTPError, exc:
# self.assertEqual(exc.code, 401)
# else:
# self.fail("urlopen() should have failed with 401")
#
# # success
# auth_handler = urllib2.HTTPBasicAuthHandler()
# auth_handler.add_password(test_realm, test_hostport,
# test_user, test_password)
# opener = urllib2.build_opener(auth_handler)
# f = opener.open('http://localhost/')
# response = _urlopen_with_retry("http://www.python.org/")
#
# # The 'userinfo' URL component is deprecated by RFC 3986 for security
# # reasons, let's not implement it! (it's already implemented for proxy
# # specification strings (that is, URLs or authorities specifying a
# # proxy), so we must keep that)
# self.assertRaises(http.client.InvalidURL,
# urllib2.urlopen, "http://evil:thing@example.com")
class CloseSocketTest(unittest.TestCase):
def test_close(self):
# calling .close() on urllib2's response objects should close the
# underlying socket
url = "http://www.example.com/"
with support.transient_internet(url):
response = _urlopen_with_retry(url)
sock = response.fp
self.assertFalse(sock.closed)
response.close()
self.assertTrue(sock.closed)
class OtherNetworkTests(unittest.TestCase):
def setUp(self):
if 0: # for debugging
import logging
logger = logging.getLogger("test_urllib2net")
logger.addHandler(logging.StreamHandler())
# XXX The rest of these tests aren't very good -- they don't check much.
# They do sometimes catch some major disasters, though.
@skip_ftp_test_on_travis
def test_ftp(self):
urls = [
'ftp://www.pythontest.net/README',
('ftp://www.pythontest.net/non-existent-file',
None, urllib.error.URLError),
]
self._test_urls(urls, self._extra_handlers())
def test_file(self):
TESTFN = support.TESTFN
f = open(TESTFN, 'w')
try:
f.write('hi there\n')
f.close()
urls = [
'file:' + sanepathname2url(os.path.abspath(TESTFN)),
('file:///nonsensename/etc/passwd', None,
urllib.error.URLError),
]
self._test_urls(urls, self._extra_handlers(), retry=True)
finally:
os.remove(TESTFN)
self.assertRaises(ValueError, urllib.request.urlopen,'./relative_path/to/file')
# XXX Following test depends on machine configurations that are internal
# to CNRI. Need to set up a public server with the right authentication
# configuration for test purposes.
## def test_cnri(self):
## if socket.gethostname() == 'bitdiddle':
## localhost = 'bitdiddle.cnri.reston.va.us'
## elif socket.gethostname() == 'bitdiddle.concentric.net':
## localhost = 'localhost'
## else:
## localhost = None
## if localhost is not None:
## urls = [
## 'file://%s/etc/passwd' % localhost,
## 'http://%s/simple/' % localhost,
## 'http://%s/digest/' % localhost,
## 'http://%s/not/found.h' % localhost,
## ]
## bauth = HTTPBasicAuthHandler()
## bauth.add_password('basic_test_realm', localhost, 'jhylton',
## 'password')
## dauth = HTTPDigestAuthHandler()
## dauth.add_password('digest_test_realm', localhost, 'jhylton',
## 'password')
## self._test_urls(urls, self._extra_handlers()+[bauth, dauth])
def test_urlwithfrag(self):
urlwith_frag = "http://www.pythontest.net/index.html#frag"
with support.transient_internet(urlwith_frag):
req = urllib.request.Request(urlwith_frag)
res = urllib.request.urlopen(req)
self.assertEqual(res.geturl(),
"http://www.pythontest.net/index.html#frag")
def test_redirect_url_withfrag(self):
redirect_url_with_frag = "http://www.pythontest.net/redir/with_frag/"
with support.transient_internet(redirect_url_with_frag):
req = urllib.request.Request(redirect_url_with_frag)
res = urllib.request.urlopen(req)
self.assertEqual(res. | geturl(),
"http://www.pythontest.net/elsewhere/#frag")
def test_custom_headers(self):
url = "http://www.example.com"
with support.transient_internet(url) | :
opener = urllib.request.build_opener()
request = urllib.request.Request(url)
self.assertFalse(request.header_items())
opener.open(request)
self.assertTrue(request.header_items())
self.assertTrue(request.has_header('User-agent'))
request.add_header('User-Agent','Test-Agent')
opener.open(request)
self.assertEqual(request.get_header('User-agent'),'Test-Agent')
@unittest.skip('XXX: http://www.imdb.com is gone')
def test_sites_no_connection_close(self):
# Some sites do not send Connection: close header.
# Verify that those work properly. (#issue12576)
URL = 'http://www.imdb.com' # mangles Connection:close
with support.transient_internet(URL):
try:
with urllib.request.urlopen(URL) as res:
pass
except ValueError as e:
self.fail("urlopen failed for site not sending \
Connection:close")
else:
self.assertTrue(res)
req = urllib.request.urlopen(URL)
res = req.read()
self.assertTrue(res)
def _test_urls(self, urls, handlers, retry=True):
import time
import logging
debug = logging.getLogger("test_urllib2").debug
urlopen = urllib.request.build_opener(*handlers).open
if retry:
urlopen = _wrap_with_retry_thrice(urlopen, urllib.error.URLError)
for url in urls:
with self.subTest(url=url):
if isinstance(url, tuple):
url, req, expected_err = url
else:
req = expected_err = None
wit |
overxfl0w/Inference-Engine | BaseRules.py | Python | gpl-2.0 | 876 | 0.045662 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from BaseReality import BaseReality
import re
class BaseRules(BaseReality):
def _get_rule_precond(self,i):
try:
return re.match("^(.*)\s?->.*$",self._get_data(i)).groups()[0]
except TypeError as te:
print "Error con el parametro a la expresion regular de extraccion de precondicion de una regla"
except:
print "Error con la expresion regular"
def _get_rul | e_effect(self,i):
try:
return re.match(".*->\s?(.*)$",self._get_data(i)).groups()[0]
except TypeError as te:
print "Error con el parametro a la expresion regular de la accion de precondicion de una regla"
except:
print "Error con la expresion regular"
def _extract_set_of_preconditions(self,i):
return self._get_rule_precond(i).split("^")
def _extract_set_of_pos | tconditions(self,i):
return self._get_rule_effect(i).split("^")
|
Mec-iS/semantic-data-chronos | appengine_config.py | Python | apache-2.0 | 441 | 0.004535 | __author__ = 'lorenzo'
#
# http://stackoverflow.com/a/29681061/2536357
#
from google.appengine.ext import vendor
# Add any libraries installed in the "lib" folder.
vendor.add('lib')
# run from the project root:
# pip inst | all -t lib -r requirements.txt
# Uncomment if appstat is on
#def webapp_add_wsgi_middleware(app):
# from go | ogle.appengine.ext.appstats import recording
# app = recording.appstats_wsgi_middleware(app)
# return app
|
knuu/nlp100 | chap06/56.py | Python | mit | 765 | 0.005229 | """
import xml.etree.ElementTree as ET
tree = ET.parse('nlp.xml')
root = tree.getroot()
sentences = []
for sentence in root.iter('sentence'):
s = [word.text for word in sentence.iter('word')]
if s: sentences.append(s)
for mention in root.iter('mention'):
mention = {m.tag: m.text for m in mention.findall('*')}
sid, start, end, text = mention['sentense']-1, mention['start'], mention['end'], mention['text']
sentence = sentences[sid]
sentences[sid] = sentence[:start] + text.split()
print(*sentences, sep='\n')
"""
from bs4 import BeautifulSoup
with open('nlp.xml') as f:
soup = BeautifulSoup(f.read(), "xml")
persons = [token.w | ord.text for token in soup.find_all('to | ken') if token.NER.text == 'PERSON']
print(*persons, sep='\n')
|
mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/eggs/mercurial-2.2.3-py2.7-linux-x86_64-ucs4.egg/mercurial/fileset.py | Python | gpl-3.0 | 13,728 | 0.004589 | # fileset.py - file set queries for mercurial
#
# Copyright 2010 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import parser, error, util, merge, re
from i18n import _
elements = {
"(": (20, ("group", 1, ")"), ("func", 1, ")")),
"-": (5, ("negate", 19), ("minus", 5)),
"not": (10, ("not", 10)),
"!": (10, ("not", 10)),
"and": (5, None, ("and", 5)),
"&": (5, None, ("and", 5)),
"or": (4, None, ("or", 4)),
"|": (4, None, ("or", 4)),
"+": (4, None, ("or", 4)),
",": (2, None, ("list", 2)),
")": (0, None, None),
"symbol": (0, ("symbol",), None),
"string": (0, ("string",), None),
"end": (0, None, None),
}
keywords = set(['and', 'or', 'not'])
globchars = ".*{}[]?/\\"
def tokenize(program):
pos, l = 0, len(program)
while pos < l:
c = program[pos]
if c.isspace(): # skip inter-token whitespace
pass
elif c in "(),-|&+!": # handle simple operators
yield (c, None, pos)
elif (c in '"\'' or c == 'r' and
program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
if c == 'r':
pos += 1
c = program[pos]
decode = lambda x: x
else:
decode = lambda x: x.decode('string-escape')
pos += 1
s = pos
while pos < l: # find closing quote
d = program[pos]
if d == '\\': # skip over escaped characters
pos += 2
continue
if d == c:
yield ('string', decode(program[s:pos]), s)
break
pos += 1
else:
raise error.ParseError(_("unterminated string"), s)
elif c.isalnum() or c in globchars or ord(c) > 127:
# gather up a symbol/keyword
s = pos
pos += 1
while pos < l: # find end of symbol
d = program[pos]
if not (d.isalnum() or d in globchars or ord(d) > 127):
break
pos += 1
sym = program[s:pos]
if sym in keywords: # operator keywords
yield (sym, None, s)
else:
yield ('symbol', sym, s)
pos -= 1
else:
raise error.ParseError(_("syntax error"), pos)
pos += 1
yield ('end', None, pos)
parse = parser.parser(tokenize, elements).parse
def getstring(x, err):
if x and (x[0] == 'string' or x[0] == 'symbol'):
return x[1]
raise error.ParseError(err)
def getset(mctx, x):
if not x:
raise error.ParseError(_("missing argument"))
return methods[x[0]](mctx, *x[1:])
def stringset(mctx, x):
m = mctx.matcher([x])
return [f for f in mctx.subset if m(f)]
def andset(mctx, x, y):
return getset(mctx.narrow(getset(mctx, x)), y)
def orset(mctx, x, y):
# needs optimizing
xl = getset(mctx, x)
yl = getset(mctx, y)
return xl + [f for f in yl if f not in xl]
def notset(mctx, x):
s = set(getset(mctx, x))
return [r for r in mctx.subset if r not in s]
def listset(mctx, a, b):
raise error.ParseError(_("can't use a list in this context"))
def modified(mctx, x):
"""``modified()``
File that is modified according to status.
"""
# i18n: "modified" is a keyword
getargs(x, 0, 0, _("modified takes no arguments"))
s = mctx.status()[0]
return [f for f in mctx.subset if f in s]
def added(mctx, x):
"""``added()``
File that is added according to status.
"""
# i18n: "added" is a keyword
getargs(x, 0, 0, _("added takes no arguments"))
s = mctx.status()[1]
return [f for f in mctx.subset if f in s]
def removed(mctx, x):
"""``removed()``
File that is removed according to status.
"""
# i18n: "removed" is a keyword
getargs(x, 0, 0, _("removed takes no arguments"))
s = mctx.status()[2]
return [f for f in mctx.subset if f in s]
def deleted(mctx, x):
"""``deleted()``
File that is deleted according to status.
"""
# i18n: "deleted" is a keyword
getargs(x, 0, 0, _("deleted takes no arguments"))
s = mctx.status()[3]
return [f for f in mctx.subset if f in s]
def unknown(mctx, x):
"""``unknown()``
File that is unknown according to status. These files will only be
considered if this predicate is used.
"""
# i18n: "unknown" is a keyword
getargs(x, 0, 0, _("unknown takes no arguments"))
s = mctx.status()[4]
return [f for f in mctx.subset if f in s]
def ignored(mctx, x):
"""``ignored()``
File that is ignored according to status. These files will only be
considered if this predicate is used.
"""
# i18n: "ignored" is a keyword
getargs(x, 0, 0, _("ignored takes no arguments"))
s = mctx.status()[5]
return [f for f in mctx.subset if f in s]
def clean(mctx, x):
"""``clean()``
File that is clean according to status.
"""
# i18n: "clean" is a keyword
getargs(x, 0, 0, _("clean takes no arguments"))
s = mctx.status()[6]
return [f for f in mctx.subset if f in s]
def func(mctx, a, b):
if a[0] == 'symbol' and a[1] in symbols:
return symbols[a[1]](mctx, b)
raise error.ParseError(_("not a function: %s") % a[1])
def getlist(x):
if not x:
return []
if x[0] == 'list':
return getlist(x[1]) + [x[2]]
return [x]
def getargs(x, min, max, err):
l = getlist(x)
if len(l) < min or len(l) > max:
raise error.ParseError(err)
return l
def binary(mctx, x):
"""``binary()``
File that appears to be binary (contains NUL bytes).
"""
# i18n: "binary" is a keyword
getargs(x, 0, 0, _("binary takes no arguments"))
return [f for f in mctx.existing() if util.binary(mctx.ctx[f].data())]
def exec_(mctx, x):
"""``exec()``
File that is marked as executable.
"""
# i18n: "exec" is a keyword
getargs(x, 0, 0, _("exec takes no arguments"))
return [f for f in mctx.existing() if mctx.ctx.flags(f) == 'x']
def symlink(mctx, x):
"""``symlink()``
File that is marked as a | symlink.
"""
# i18n: "symlink" is a keyword
getargs(x, 0, 0, | _("symlink takes no arguments"))
return [f for f in mctx.existing() if mctx.ctx.flags(f) == 'l']
def resolved(mctx, x):
"""``resolved()``
File that is marked resolved according to the resolve state.
"""
# i18n: "resolved" is a keyword
getargs(x, 0, 0, _("resolved takes no arguments"))
if mctx.ctx.rev() is not None:
return []
ms = merge.mergestate(mctx.ctx._repo)
return [f for f in mctx.subset if f in ms and ms[f] == 'r']
def unresolved(mctx, x):
"""``unresolved()``
File that is marked unresolved according to the resolve state.
"""
# i18n: "unresolved" is a keyword
getargs(x, 0, 0, _("unresolved takes no arguments"))
if mctx.ctx.rev() is not None:
return []
ms = merge.mergestate(mctx.ctx._repo)
return [f for f in mctx.subset if f in ms and ms[f] == 'u']
def hgignore(mctx, x):
"""``hgignore()``
File that matches the active .hgignore pattern.
"""
getargs(x, 0, 0, _("hgignore takes no arguments"))
ignore = mctx.ctx._repo.dirstate._ignore
return [f for f in mctx.subset if ignore(f)]
def grep(mctx, x):
"""``grep(regex)``
File contains the given regular expression.
"""
pat = getstring(x, _("grep requires a pattern"))
r = re.compile(pat)
return [f for f in mctx.existing() if r.search(mctx.ctx[f].data())]
_units = dict(k=2**10, K=2**10, kB=2**10, KB=2**10,
M=2**20, MB=2**20, G=2**30, GB=2**30)
def _sizetoint(s):
try:
s = s.strip()
for k, v in _units.items():
if s.endswith(k):
return int(float(s[:-len(k)]) * v)
return int(s)
except ValueError:
raise error.ParseError(_("couldn't parse size: %s") % s)
def _sizetomax(s):
try:
s = s.strip()
for k, v |
barmassimo/MIDI-Notebook | src/midi_notebook/midi_notebook_context.py | Python | gpl-3.0 | 18,879 | 0.000742 | # MIDI-Notebook - A prototypal MIDI monitor, looper, and recorder written in Python.
# Copyright (C) 2014 Massimo Barbieri - http://www.massimobarbieri.it
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import time
import threading
import datetime
import os
import sys
import rtmidi_python as rtmidi
from midiutil.MidiFile3 import MIDIFile
class MidiEventTypes():
NOTE_ON = 144
NOTE_OFF = 128
CONTROL_CHANGE = 176
class MidiMessage:
N_MIDI_CHANNELS = 16
def __init__(self, data, time_stamp):
self._data = data
self.time_stamp = time_stamp
def __len__(self):
return len(self._data)
def __getitem__(self, index):
return self._data[index]
def __setitem__(self, index, val):
self._data[index] = val
def __str__(self):
return "{0}, {1:.2f}".format(str(self._data)[1:-1], self.time_stamp)
def clone(self):
return MidiMessage(self._data[:], self.time_stamp)
@property
def type(self):
return self._get_separate_type_and_channel()[0]
@property
def channel(self):
return self._get_separate_type_and_channel()[1]
def _get_separate_type_and_channel(self):
type_channel = self._data[0]
for event_type in (MidiEventTypes.NOTE_ON, MidiEventTypes.NOTE_OFF, MidiEventTypes.CONTROL_CHANGE):
if type_channel >= event_type and type_channel < event_type + self.N_MIDI_CHANNELS:
return (event_type, type_channel - event_type)
return (None, None)
class Loop():
def __init__(self):
self.clean()
def clean(self):
self.is_playback = False
self.is_recording = False
self.start_recording_time = None
self.messages_captured = []
self.duration = None
self.sync_delay = None
self.waiting_for_sync = False
@property
def status(self):
if self.is_recording:
return "recording"
elif self.is_playback:
return "play - {0:.1f}sec".format(self.duration)
elif self.duration is not None:
return "stop - {0:.1f}sec".format(self.duration)
else:
return ""
@property
def is_clean(self):
return self.duration is None
@property
def is_playable(self):
return len(self.messages_captured) >= 2
def start_recording(self):
self.is_playback = False
self.is_recording = True
self.start_recording_time = None
self.messages_captured = []
self.duration = None
self.sync_delay = None
def stop_recording(self):
if not self.is_recording:
return
self.is_recording = False
self.duration = None
if self.start_recording_time is not None:
self.duration = time.time() - self.start_recording_time
class LoopPlayer(threading.Thread):
def __init__(self, context, n):
super().__init__()
self.context = context
self.loop = context.loops[n]
self.loop_index = n
self.is_master_loop = n == 0
self.force_exit_activated = False
def run(self):
try:
self.run_unsafe()
except Exception:
sys.excepthook(*sys.exc_info())
def run_unsafe(self):
# avoid concurrency
loop_messages_captured = self.loop.messages_captured[:]
loop_duration = self.loop.duration
loop_sync_delay = self.loop.sync_delay
if len(loop_messages_captured) < 2:
self.context.write_message("NOTHING TO PLAY. :-(")
return
if loop_sync_delay is None or not self.context.is_sync_active:
loop_messages_captured[0].time_stamp = 0
self.loop.waiting_for_sync = False
else:
loop_messages_captured[0].time_stamp = loop_sync_delay
self.loop.waiting_for_sync = True
if self.context.midi_out is None:
if self.context.output_port is None:
self.context.write_message("Please select a MIDI output port.")
return
self.context.midi_out = rtmidi.MidiOut()
self.context.midi_out.open_port(self.context.output_port)
while (True):
self.context.loop_sync.acquire()
if (self.is_master_loop):
self.context.last_loop_sync = time.time()
self.context.loop_sync.notify_all()
else:
if self.context.is_sync_active:
self.context.loop_sync.wait()
self.loop.waiting_for_sync = False
self.context.loop_sync.release()
total_time = sum(float(m.time_stamp)
for m in loop_messages_captured[1:])
for m in loop_messages_captured:
if not self.loop.is_playback:
if not self.is_master_loop:
return # master loop is never ended, only muted
if self.force_exit_activated:
return
time.sleep(float(m.time_stamp))
if self.loop.is_playback:
self.context.midi_out.send_message(m)
self.context.capture_message(
m, loop_index=self.loop_index) # loopback!
time.sleep(loop_duration - total_time)
def force_exit(self):
self.force_exit_activated = True
class MetaSingleton(type):
instance = None
def __call__(cls, *args, **kw):
if cls.instance is None:
cls.instance = super(MetaSingleton, cls).__call__(*args, **kw)
return cls.instance
class MidiNotebookContext(metaclass=MetaSingleton):
def __init__(self, configuration):
self.long_pause = configuration['long_pause']
self.midi_file_name = configuration['midi_file_name']
self.bpm = configuration['bpm']
self.monitor = configuration['monitor']
self.write_message_function = configuration.get(
'write_message_function', None)
self.loop_toggle_message_signature = configuration[
'loop_toggle_message_signature']
self.last_event = time.time()
self.messages_captured = []
self.midi_in_ports = []
self.inp | ut_port = None
self._output_port = None
self.midi_out = None
self.n_loops = 4
self.loops = [Loop() for n in range(self.n_loops)]
self.last_toggle_loop = [0 for n in range(self.n_loops)]
self.loop_sync = threading.Condition()
self.last_loop_sync = None
self.loop_th | reads = [None for n in range(self.n_loops)]
def clean_all(self):
self.last_event = time.time()
self.messages_captured = []
for n, l in enumerate(self.loops):
self.clean_loop(n)
if not self.loop_threads[n] is None:
self.loop_threads[n].force_exit()
self.loop_threads[n] = None
self.last_toggle_loop = [0 for n in range(self.n_loops)]
self.loop_sync = threading.Condition()
self.last_loop_sync = None
self.loop_threads = [None for n in range(self.n_loops)]
@property
def is_sync_active(self):
return self.last_loop_sync is not None
def write_message(self, message):
if (self.write_message_function is not None):
self.write_message_function(message)
def write_midi_message(self, message, position, recording):
result = ' '
for n in range(self.n_loops):
if n == position:
r |
AndrewAnnex/SpiceyPy | src/spiceypy/spiceypy.py | Python | mit | 503,480 | 0.000449 | """
The MIT License (MIT)
Copyright (c) [2015-2021] [Andrew Annex]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import warnings
from contextlib import contextmanager
from datetime import datetime, timezone
import functools
import ctypes
from typing import Callable, Iterator, Iterable, Optional, Tuple, Union, Sequence
import numpy
from numpy import ndarray, str_
from .utils import support_types as stypes
from .utils.libspicehelper import libspice
from . import config
from .utils.callbacks import (
UDFUNC,
UDFUNS,
UDFUNB,
UDSTEP,
UDREFN,
UDREPI,
UDREPU,
UDREPF,
UDBAIL,
SpiceUDFUNS,
SpiceUDFUNB,
)
from .utils.support_types import (
Cell_Char,
Cell_Bool,
Cell_Time,
Cell_Double,
Cell_Int,
Ellipse,
Plane,
SpiceCell,
SpiceCellPointer,
SpiceDLADescr,
SpiceDSKDescr,
| SpiceEKAttDsc,
SpiceEKSegSum,
)
__author__ = "AndrewAnnex"
################################################################################
OptionalInt = Optional[int]
_default_len_out = 256
_SPICE_EK_MAXQSEL = 100 # Twice the 50 in gcc-linux-64
_SPICE_EK_EKRCEX_ROOM_DEFAULT = 100 # Enough?
def warn_deprecated_args(**kwargs) -> | None: # pragma: no cover
keys = list(kwargs.keys())
values = list(kwargs.values())
if any(values):
varnames = ", ".join(keys)
warnings.warn(
f"Specifying any of: {varnames} will be deprecated as of SpiceyPy 5.0.0",
DeprecationWarning,
stacklevel=2,
)
pass
def check_for_spice_error(f: Optional[Callable]) -> None:
"""
Internal decorator function to check spice error system for failed calls
:param f: function
:raise stypes.SpiceyError:
"""
if failed():
short = getmsg("SHORT", 26)
explain = getmsg("EXPLAIN", 100).strip()
long = getmsg("LONG", 1841).strip()
traceback = qcktrc(200)
reset()
raise stypes.dynamically_instantiate_spiceyerror(
short=short, explain=explain, long=long, traceback=traceback
)
def spice_error_check(f):
"""
Decorator for spiceypy hooking into spice error system.
If an error is detected, an output similar to outmsg
:return:
"""
@functools.wraps(f)
def with_errcheck(*args, **kwargs):
try:
res = f(*args, **kwargs)
check_for_spice_error(f)
return res
except BaseException:
raise
return with_errcheck
def spice_found_exception_thrower(f: Callable) -> Callable:
"""
Decorator for wrapping functions that use status codes
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
res = f(*args, **kwargs)
if config.catch_false_founds:
found = res[-1]
if isinstance(found, bool) and not found:
raise stypes.NotFoundError(
"Spice returns not found for function: {}".format(f.__name__),
found=found,
)
elif stypes.is_iterable(found) and not all(found):
raise stypes.NotFoundError(
"Spice returns not found in a series of calls for function: {}".format(
f.__name__
),
found=found,
)
else:
actualres = res[0:-1]
if len(actualres) == 1:
return actualres[0]
else:
return actualres
else:
return res
return wrapper
@contextmanager
def no_found_check() -> Iterator[None]:
"""
Temporarily disables spiceypy default behavior which raises exceptions for
false found flags for certain spice functions. All spice
functions executed within the context manager will no longer check the found
flag return parameter and the found flag will be included in the return for
the given function.
For Example bodc2n in spiceypy is normally called like::
name = spice.bodc2n(399)
With the possibility that an exception is thrown in the even of a invalid ID::
name = spice.bodc2n(-999991) # throws a SpiceyError
With this function however, we can use it as a context manager to do this::
with spice.no_found_check():
name, found = spice.bodc2n(-999991) # found is false, no exception raised!
Within the context any spice functions called that normally check the found
flags will pass through the check without raising an exception if they are false.
"""
current_catch_state = config.catch_false_founds
config.catch_false_founds = False
yield
config.catch_false_founds = current_catch_state
@contextmanager
def found_check() -> Iterator[None]:
"""
Temporarily enables spiceypy default behavior which raises exceptions for
false found flags for certain spice functions. All spice
functions executed within the context manager will check the found
flag return parameter and the found flag will be removed from the return for
the given function.
For Example bodc2n in spiceypy is normally called like::
name = spice.bodc2n(399)
With the possibility that an exception is thrown in the even of a invalid ID::
name = spice.bodc2n(-999991) # throws a SpiceyError
With this function however, we can use it as a context manager to do this::
with spice.found_check():
found = spice.bodc2n(-999991) # will raise an exception!
Within the context any spice functions called that normally check the found
flags will pass through the check without raising an exception if they are false.
"""
current_catch_state = config.catch_false_founds
config.catch_false_founds = True
yield
config.catch_false_founds = current_catch_state
def found_check_off() -> None:
"""
Method that turns off found catching
"""
config.catch_false_founds = False
def found_check_on() -> None:
"""
Method that turns on found catching
"""
config.catch_false_founds = True
def get_found_catch_state() -> bool:
"""
Returns the current found catch state
:return:
"""
return config.catch_false_founds
def cell_double(cell_size: int) -> SpiceCell:
return stypes.SPICEDOUBLE_CELL(cell_size)
def cell_int(cell_size: int) -> SpiceCell:
return stypes.SPICEINT_CELL(cell_size)
def cell_char(cell_size: int, length: int) -> SpiceCell:
return stypes.SPICECHAR_CELL(cell_size, length)
def cell_bool(cell_size: int) -> SpiceCell:
return stypes.SPICEBOOL_CELL(cell_size)
def cell_time(cell_size) -> SpiceCell:
return stypes.SPICETIME_CELL(cell_size)
################################################################################
# A
@spice_error_check
def appndc(
item: Union[str, Iterable[str], ndarray, str_], cell: Union[Cell_Char, SpiceCell]
) -> None:
"""
Append an item to a character cell.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/appndc_c.html
:param item: The item to append.
:param cell: The cell to append to.
"""
asse |
google/grr | grr/server/grr_response_server/gui/api_plugins/vfs_test.py | Python | apache-2.0 | 44,205 | 0.004594 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""This modules contains tests for VFS API handlers."""
import binascii
import io
import os
from unittest import mock
import zipfile
from absl import app
from grr_response_core.lib import factory
from grr_response_core.lib import rdfvalue
from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs
from grr_response_core.lib.rdfvalues import crypto as rdf_crypto
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_server import data_store
from grr_response_server import decoders
from grr_response_server import flow
from grr_response_server import flow_base
from grr_response_server.databases import db
from grr_response_server.flows.general import discovery
from grr_response_server.flows.general import filesystem
from grr_response_server.flows.general import transfer
from grr_response_server.gui import api_test_lib
from grr_response_server.gui.api_plugins import vfs as vfs_plugin
from grr_response_server.rdfvalues import objects as rdf_objects
from grr.test_lib import fixture_test_lib
from grr.test_lib import flow_test_lib
from grr.test_lib import notification_test_lib
from grr.test_lib import test_lib
from grr.test_lib import vfs_test_lib
class VfsTestMixin(object):
"""A helper mixin providing methods to prepare files and flows for testing."""
time_0 = rdfvalue.RDFDatetime(42)
time_1 = time_0 + rdfvalue.Durati | on.From(1, rdfvalue.DAYS)
time_2 = time_1 + rdfvalue.Duration.From(1, rdfvalue.DAYS)
# TODO(hanuszczak): This fu | nction not only contains a lot of code duplication
# but is also a duplication with `gui_test_lib.CreateFileVersion(s)`. This
# should be refactored in the near future.
def CreateFileVersions(self, client_id, file_path):
"""Add a new version for a file."""
path_type, components = rdf_objects.ParseCategorizedPath(file_path)
client_path = db.ClientPath(client_id, path_type, components)
with test_lib.FakeTime(self.time_1):
vfs_test_lib.CreateFile(client_path, "Hello World".encode("utf-8"))
with test_lib.FakeTime(self.time_2):
vfs_test_lib.CreateFile(client_path, "Goodbye World".encode("utf-8"))
def CreateRecursiveListFlow(self, client_id):
flow_args = filesystem.RecursiveListDirectoryArgs()
return flow.StartFlow(
client_id=client_id,
flow_cls=filesystem.RecursiveListDirectory,
flow_args=flow_args)
def CreateMultiGetFileFlow(self, client_id, file_path):
pathspec = rdf_paths.PathSpec(
path=file_path, pathtype=rdf_paths.PathSpec.PathType.OS)
flow_args = transfer.MultiGetFileArgs(pathspecs=[pathspec])
return flow.StartFlow(
client_id=client_id,
flow_cls=transfer.MultiGetFile,
flow_args=flow_args)
class ApiGetFileDetailsHandlerTest(api_test_lib.ApiCallHandlerTest,
VfsTestMixin):
"""Test for ApiGetFileDetailsHandler."""
def setUp(self):
super().setUp()
self.handler = vfs_plugin.ApiGetFileDetailsHandler()
self.client_id = self.SetupClient(0)
self.file_path = "fs/os/c/Downloads/a.txt"
self.CreateFileVersions(self.client_id, self.file_path)
def testRaisesOnEmptyPath(self):
args = vfs_plugin.ApiGetFileDetailsArgs(
client_id=self.client_id, file_path="")
with self.assertRaises(ValueError):
self.handler.Handle(args, context=self.context)
def testRaisesOnRootPath(self):
args = vfs_plugin.ApiGetFileDetailsArgs(
client_id=self.client_id, file_path="/")
with self.assertRaises(ValueError):
self.handler.Handle(args, context=self.context)
def testRaisesIfFirstComponentNotInAllowlist(self):
args = vfs_plugin.ApiGetFileDetailsArgs(
client_id=self.client_id, file_path="/analysis")
with self.assertRaises(ValueError):
self.handler.Handle(args, context=self.context)
def testRaisesOnNonexistentPath(self):
args = vfs_plugin.ApiGetFileDetailsArgs(
client_id=self.client_id, file_path="/fs/os/foo/bar")
with self.assertRaises(vfs_plugin.FileNotFoundError):
self.handler.Handle(args, context=self.context)
def testHandlerReturnsNewestVersionByDefault(self):
# Get file version without specifying a timestamp.
args = vfs_plugin.ApiGetFileDetailsArgs(
client_id=self.client_id, file_path=self.file_path)
result = self.handler.Handle(args, context=self.context)
# Should return the newest version.
self.assertEqual(result.file.path, self.file_path)
self.assertAlmostEqual(
result.file.age,
self.time_2,
delta=rdfvalue.Duration.From(1, rdfvalue.SECONDS))
def testHandlerReturnsClosestSpecificVersion(self):
# Get specific version.
args = vfs_plugin.ApiGetFileDetailsArgs(
client_id=self.client_id,
file_path=self.file_path,
timestamp=self.time_1)
result = self.handler.Handle(args, context=self.context)
# The age of the returned version might have a slight deviation.
self.assertEqual(result.file.path, self.file_path)
self.assertAlmostEqual(
result.file.age,
self.time_1,
delta=rdfvalue.Duration.From(1, rdfvalue.SECONDS))
def testResultIncludesDetails(self):
"""Checks if the details include certain attributes.
Instead of using a (fragile) regression test, we enumerate important
attributes here and make sure they are returned.
"""
args = vfs_plugin.ApiGetFileDetailsArgs(
client_id=self.client_id, file_path=self.file_path)
result = self.handler.Handle(args, context=self.context)
attributes_by_type = {}
attributes_by_type["VFSFile"] = ["STAT"]
attributes_by_type["AFF4Stream"] = ["HASH", "SIZE"]
attributes_by_type["AFF4Object"] = ["TYPE"]
details = result.file.details
for type_name, attrs in attributes_by_type.items():
type_obj = next(t for t in details.types if t.name == type_name)
all_attrs = set([a.name for a in type_obj.attributes])
self.assertContainsSubset(attrs, all_attrs)
def testIsDirectoryFlag(self):
# Set up a directory.
dir_path = "fs/os/Random/Directory"
path_type, components = rdf_objects.ParseCategorizedPath(dir_path)
client_path = db.ClientPath(self.client_id, path_type, components)
vfs_test_lib.CreateDirectory(client_path)
args = vfs_plugin.ApiGetFileDetailsArgs(
client_id=self.client_id, file_path=self.file_path)
result = self.handler.Handle(args, context=self.context)
self.assertFalse(result.file.is_directory)
args = vfs_plugin.ApiGetFileDetailsArgs(
client_id=self.client_id, file_path=dir_path)
result = self.handler.Handle(args, context=self.context)
self.assertTrue(result.file.is_directory)
class ApiListFilesHandlerTest(api_test_lib.ApiCallHandlerTest, VfsTestMixin):
"""Test for ApiListFilesHandler."""
def setUp(self):
super().setUp()
self.handler = vfs_plugin.ApiListFilesHandler()
self.client_id = self.SetupClient(0)
self.file_path = "fs/os/etc"
def testDoesNotRaiseIfFirstCompomentIsEmpty(self):
args = vfs_plugin.ApiListFilesArgs(client_id=self.client_id, file_path="")
self.handler.Handle(args, context=self.context)
def testDoesNotRaiseIfPathIsRoot(self):
args = vfs_plugin.ApiListFilesArgs(client_id=self.client_id, file_path="/")
self.handler.Handle(args, context=self.context)
def testRaisesIfFirstComponentIsNotAllowlisted(self):
args = vfs_plugin.ApiListFilesArgs(
client_id=self.client_id, file_path="/analysis")
with self.assertRaises(ValueError):
self.handler.Handle(args, context=self.context)
def testHandlerListsFilesAndDirectories(self):
fixture_test_lib.ClientFixture(self.client_id)
# Fetch all children of a directory.
args = vfs_plugin.ApiListFilesArgs(
client_id=self.client_id, file_path=self.file_path)
result = self.handler.Handle(args, context=self.context)
self.assertLen(result.items, 4)
for item in result.items:
# Check that all files are really in the right directory.
self.assertIn(self.file_path, item.path)
def testHandlerFiltersDirectoriesIfFlagIsSet(self |
raildo/python-keystoneclient | python-keystoneclient-0.4.1.7.gdca1d42/keystoneclient/openstack/common/jsonutils.py | Python | apache-2.0 | 6,464 | 0 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
JSON related utilities.
This module provides a few things:
1) A handy function for getting an object down to something that can be
JSON serialized. See to_primitive().
2) Wrappers around loads() and dumps(). The dumps() wrapper will
automatically use to_primitive() for you if needed.
3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson
is available.
'''
import datetime
import functools
import inspect
import itertools
import json
try:
import xmlrpclib
except ImportError:
# NOTE(jd): xmlrpclib is not shipped with Python 3
xmlrpclib = None
import six
from keystoneclient.openstack.common import gettextutils
from keystoneclient.openstack.common import importutils
from keystoneclient.openstack.common import timeutils
netaddr = importutils.try_import("netaddr")
_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
inspect.isfunction, inspect.isgeneratorfunction,
inspect.isgenerator, inspect.istraceback, inspect.isframe,
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
inspect.isabstract]
_simple_types = (six.string_types + six.integer_types
+ (type(None), bool, float))
def to_primitive(value, convert_instances=False, convert_datetime=True,
level=0, max_depth=3):
"""Convert a complex object into primitives.
Handy for JSON serialization. We can optionally handle instances,
but since this is a recursive function, we could have cyclical
data structures.
To handle cyclical data structures we could track the actual objects
visited in a set, but not all objects are hashable. Instead we just
track the depth of the object inspections and don't go too deep.
Therefore, convert_instances=True is lossy ... be aware.
"""
# handle obvious types first - order of basic types determined by running
# full tests on nova project, resulting in the following counts:
# 572754 <type 'NoneType'>
# 460353 <type 'int'>
# 379632 <type 'unicode'>
# 274610 <type 'str'>
# 199918 <type 'dict'>
# 114200 <type 'datetime.datetime'>
# 51817 <type 'bool' | >
# 26164 <type 'list'>
# 6491 <type 'float'>
# 283 <type 'tuple'>
# 19 <type 'long'>
if isinstance(value, _simple_types):
return value
if isinstance(value, datetime.datetime) | :
if convert_datetime:
return timeutils.strtime(value)
else:
return value
# value of itertools.count doesn't get caught by nasty_type_tests
# and results in infinite loop when list(value) is called.
if type(value) == itertools.count:
return six.text_type(value)
# FIXME(vish): Workaround for LP bug 852095. Without this workaround,
# tests that raise an exception in a mocked method that
# has a @wrap_exception with a notifier will fail. If
# we up the dependency to 0.5.4 (when it is released) we
# can remove this workaround.
if getattr(value, '__module__', None) == 'mox':
return 'mock'
if level > max_depth:
return '?'
# The try block may not be necessary after the class check above,
# but just in case ...
try:
recursive = functools.partial(to_primitive,
convert_instances=convert_instances,
convert_datetime=convert_datetime,
level=level,
max_depth=max_depth)
if isinstance(value, dict):
return dict((k, recursive(v)) for k, v in value.iteritems())
elif isinstance(value, (list, tuple)):
return [recursive(lv) for lv in value]
# It's not clear why xmlrpclib created their own DateTime type, but
# for our purposes, make it a datetime type which is explicitly
# handled
if xmlrpclib and isinstance(value, xmlrpclib.DateTime):
value = datetime.datetime(*tuple(value.timetuple())[:6])
if convert_datetime and isinstance(value, datetime.datetime):
return timeutils.strtime(value)
elif isinstance(value, gettextutils.Message):
return value.data
elif hasattr(value, 'iteritems'):
return recursive(dict(value.iteritems()), level=level + 1)
elif hasattr(value, '__iter__'):
return recursive(list(value))
elif convert_instances and hasattr(value, '__dict__'):
# Likely an instance of something. Watch for cycles.
# Ignore class member vars.
return recursive(value.__dict__, level=level + 1)
elif netaddr and isinstance(value, netaddr.IPAddress):
return six.text_type(value)
else:
if any(test(value) for test in _nasty_type_tests):
return six.text_type(value)
return value
except TypeError:
# Class objects are tricky since they may define something like
# __iter__ defined but it isn't callable as list().
return six.text_type(value)
def dumps(value, default=to_primitive, **kwargs):
return json.dumps(value, default=default, **kwargs)
def loads(s):
return json.loads(s)
def load(s):
return json.load(s)
try:
import anyjson
except ImportError:
pass
else:
anyjson._modules.append((__name__, 'dumps', TypeError,
'loads', ValueError, 'load'))
anyjson.force_implementation(__name__)
|
jkimbo/freight | freight/vcs/base.py | Python | apache-2.0 | 1,988 | 0 | from __future__ import absolute_import, unicode_literals
import os
import os.path
from freight.constants import PROJECT_ROOT
from freight.exceptions import CommandError
class UnknownRevision(CommandError):
pass
class Vcs(object):
ssh_connect_path = os.path.join(PROJECT_ROOT, 'bin', 'ssh-connect')
def __init__(self, workspace, url, username=None):
self.url = url
self.username = username
self.workspace = workspace
self._path_exists = None
@property
def path(self):
return self.workspace.path
def get_default_env(self):
return {}
def run(self, command, capture=False, workspace=None, *args, **kwargs):
if workspace is None:
workspace = self.workspace
if not self.exists(workspace=workspace):
kwargs.setdefault('cwd', None)
env = kwargs.pop('env', {})
for key, value in self.get_default_env().iteritems():
env.set | default(key, value)
env. | setdefault('FREIGHT_SSH_REPO', self.url)
kwargs['env'] = env
if capture:
handler = workspace.capture
else:
handler = workspace.run
rv = handler(command, *args, **kwargs)
if isinstance(rv, basestring):
return rv.strip()
return rv
def exists(self, workspace=None):
if workspace is None:
workspace = self.workspace
return os.path.exists(workspace.path)
def clone_or_update(self):
if self.exists():
self.update()
else:
self.clone()
def clone(self):
raise NotImplementedError
def update(self):
raise NotImplementedError
def checkout(self, ref):
raise NotImplementedError
def describe(self, ref):
"""
Given a `ref` return the fully qualified version.
"""
raise NotImplementedError
def get_default_revision(self):
raise NotImplementedError
|
anton44eg/fixturegen | fixturegen/generator.py | Python | mit | 2,782 | 0 | from __future__ import absolute_import
from functools import partial
from pkg_resources import Requirement, resource_filename
import re
from mako.template import Template
from sqlalchemy import MetaData, select, create_engine, text
from sqlalchemy.exc import ArgumentError
from fixturegen.exc import (
NoSuchTable,
WrongDSN,
WrongNamingColumn,
NonValidRowClassName
)
_FIXTURE_TEMPLATE = 'fixturegen/templates/fixture.mako'
valid_class_name_re = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*$')
def sqlalchemy_data(table, dsn, limit=None, where=None, order_by=None):
try:
engine = create_engine(dsn)
except ArgumentError:
raise WrongDSN
metadata = MetaData()
metadata.reflect(bind=engine)
try:
mapped_table = metadata.tables[table]
except KeyError:
raise NoSuchTable
query = select(mapped_table.columns)
if where:
query = query.where(whereclause=text(where))
if order_by:
query = query.order_by(text(order_by))
if limit:
query = query.limit(limit)
columns = [column.name for column in mapped_table.columns]
rows = engine.execute(query).fetchall()
return table, tuple(columns), tuple(rows)
def get_row_class_name(row, table_name, naming_column_ids):
class_name | = '{0}_{1}'.format(table_name, '_'
.join((str(row[i]).replace('-', '_')
for i in naming_column_ids)))
if valid_class_name_re.match(class_name):
return class_name
raise NonValidRowClassName(class_name)
def generate(table, columns, rows, with_import=True,
fixture_class_name=None, row_naming_columns=No | ne):
if not row_naming_columns:
try:
naming_column_ids = [columns.index('id')]
except ValueError:
raise WrongNamingColumn()
else:
try:
naming_column_ids = [columns.index(column_name)
for column_name in row_naming_columns]
except ValueError:
raise WrongNamingColumn()
row_class_name = partial(get_row_class_name, table_name=table,
naming_column_ids=naming_column_ids)
if not fixture_class_name:
camel_case_table = table.replace('_', ' ').title().replace(' ', '')
fixture_class_name = camel_case_table + 'Data'
filename = resource_filename(Requirement.parse('fixturegen'),
_FIXTURE_TEMPLATE)
template = Template(filename=filename)
return template.render(table=table, columns=columns,
rows=rows, with_import=with_import,
fixture_class_name=fixture_class_name,
row_class_name=row_class_name)
|
peschkaj/adventofcode | python/09.py | Python | mit | 1,075 | 0.003721 | import pprint
from itertools import permutations
places = set()
route_table = dict()
filename = '09.txt'
def add_to_route_table(start, dest, distance):
places.add(start)
places.add(dest)
if not start in route_table:
route_table[start] = dict()
if not dest in route_table[start]:
route_table[start][dest] = int(distance)
with open(filename) as f:
for line in f.readlines():
line = line.strip()
(start, trash, dest, trash2, distance) = line.split(' ')
add_to_route_table(start, dest, distance)
add_to_route_table(dest, start, distance)
shortest_distance, longest_distance = None, None
for path in permutations(places):
current_length = 0
for source, dest in zip(path, p | ath[1:]):
current_length += route_table[source][dest]
if not shortest_distan | ce or shortest_distance > current_length:
shortest_distance = current_length
if not longest_distance or longest_distance < current_length:
longest_distance = current_length
print shortest_distance, longest_distance
|
HewlettPackard/python-proliant-sdk | examples/Redfish/ex14_sessions.py | Python | apache-2.0 | 2,690 | 0.012639 | # Copyright 2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS I | S" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import urlparse
from _redfishobject import | RedfishObject
from redfish.rest.v1 import ServerDownOrUnreachableError
def ex14_sessions(redfishobj, login_account, login_password):
sys.stdout.write("\nEXAMPLE 14: Create/Use/Delete a user session\n")
new_session = {"UserName": login_account, "Password": login_password}
response = redfishobj.redfish_post("/redfish/v1/Sessions", new_session)
redfishobj.error_handler(response)
if response.status == 201:
session_uri = response.getheader("location")
session_uri = urlparse.urlparse(session_uri)
sys.stdout.write("\tSession " + session_uri.path + " created\n")
x_auth_token = response.getheader("x-auth-token")
sys.stdout.write("\tSession key " + x_auth_token + " created\n")
# Delete the created session
sessresp = redfishobj.redfish_delete(session_uri.path)
redfishobj.error_handler(sessresp)
else:
sys.stderr.write("ERROR: failed to create a session.\n")
if __name__ == "__main__":
# When running on the server locally use the following commented values
# iLO_https_url = "blobstore://."
# iLO_account = "None"
# iLO_password = "None"
# When running remotely connect using the iLO secured (https://) address,
# iLO account name, and password to send https requests
# iLO_https_url acceptable examples:
# "https://10.0.0.100"
# "https://f250asha.americas.hpqcorp.net"
iLO_https_url = "https://10.0.0.100"
iLO_account = "admin"
iLO_password = "password"
# Create a REDFISH object
try:
REDFISH_OBJ = RedfishObject(iLO_https_url, iLO_account, iLO_password)
except ServerDownOrUnreachableError, excp:
sys.stderr.write("ERROR: server not reachable or doesn't support " \
"RedFish.\n")
sys.exit()
except Exception, excp:
raise excp
ex14_sessions(REDFISH_OBJ, "admin", "admin123")
|
jasonkuster/incubator-beam | sdks/python/apache_beam/examples/snippets/snippets_test.py | Python | apache-2.0 | 28,632 | 0.005204 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for all code snippets used in public docs."""
import glob
import gzip
import logging
import os
import tempfile
import unittest
import uuid
import apache_beam as beam
from apache_beam import coders
from apache_beam import pvalue
from apache_beam import typehints
from apache_beam.transforms.util import assert_that
from apache_beam.transforms.util import equal_to
from apache_beam.utils.pipeline_options import TypeOptions
from apache_beam.examples.snippets import snippets
# pylint: disable=expression-not-assigned
from apache_beam.test_pipeline import TestPipeline
# Protect against environments where apitools library is not available.
# pylint: disable=wrong-import-order, wrong-import-position
try:
from apitools.base.py import base_api
except ImportError:
base_api = None
# pylint: enable=wrong-import-order, wrong-import-position
# Protect against environments where datastore library is not available.
# pylint: disable=wrong-import-order, wrong-import-position
try:
from google.cloud.proto.datastore.v1 import datastore_pb2
except ImportError:
datastore_pb2 = None
# pylint: enable=wrong-import-order, wrong-import-position
class ParDoTest(unittest.TestCase):
"""Tests for model/par-do."""
def test_pardo(self):
# Note: "words" and "ComputeWordLengthFn" are referenced by name in
# the text of the doc.
words = ['aa', 'bbb', 'c']
# [START model_pardo_pardo]
class ComputeWordLengthFn(beam.DoFn):
def process(self, element):
return [len(element)]
# [END model_pardo_pardo]
# [START model_pardo_apply]
# Apply a ParDo to the PCollection "words" to compute lengths for each word.
word_lengths = words | beam.ParDo(ComputeWordLengthFn())
# [END model_pardo_apply]
self.assertEqual({2, 3, 1}, set(word_lengths))
def test_pardo_yield(self):
words = ['aa', 'bbb', 'c']
# [START model_pardo_yield]
class ComputeWordLengthFn(beam.DoFn):
def process(self, element):
yield len(element)
# [END model_pardo_yield]
word_lengths = words | beam.ParDo(ComputeWordLengthFn())
self.assertEqual({2, 3, 1}, set(word_lengths))
def test_pardo_using_map(self):
words = ['aa', 'bbb', 'c']
# [START model_pardo_using_map]
word_lengths = words | beam.Map(len)
# [END model_pardo_using_map]
self.assertEqual({2, 3, 1}, set(word_lengths))
def test_pardo_using_flatmap(self):
words = ['aa', 'bbb', 'c']
# [START model_pardo_using_flatmap]
word_lengths = words | beam.FlatMap(lambda word: [len(word)])
# [END model_pardo_using_flatmap]
self.assertEqual({2, 3, 1}, set(word_lengths))
def test_pardo_using_flatmap_yield(self):
words = ['aA', 'bbb', 'C']
# [START model_pardo_using_flatmap_yield]
def capitals(word):
for letter in word:
if 'A' <= letter <= 'Z':
yield letter
all_capitals = words | beam.FlatMap(capitals)
# [END model_pardo_using_flatmap_yield]
self.assertEqual({'A', 'C'}, set(all_capitals))
def test_pardo_with_label(self):
# pylint: disable=line-too-long
words = ['aa', 'bbc', 'defg']
# [START model_pardo_with_label]
result = words | 'CountUniqueLetters' >> beam.Map(
lambda word: len(set(word)))
# [END model_pardo_with_label]
self.assertEqual({1, 2, 4}, set(result))
def test_pardo_side_input(self):
p = TestPipeline()
words = p | 'start' >> beam.Create(['a', 'bb', 'ccc', 'dddd'])
# [START model_pardo_side_input]
# Callable takes additional arguments.
def filter_using_length(word, lower_bound, upper_bound=float('inf')):
if lower_bound <= len(word) <= upper_bound:
yield word
# Construct a deferred side input.
avg_word_len = (words
| beam.Map(len)
| beam.CombineGlobally(beam.combiners.MeanCombineFn()))
# Call with explicit side inputs.
small_words = words | 'small' >> beam.FlatMap(filter_using_length, 0, 3)
# A single deferred side input.
larger_than_average = (words | 'large' >> beam.FlatMap(
filter_using_length,
lower_bound=pvalue.AsSingleton(avg_word_len)))
# Mix and match.
small_but_nontrivial = words | bea | m.FlatMap(filter_using_length,
lower_bound=2,
upper_bound=pvalue.AsSingleton(
| avg_word_len))
# [END model_pardo_side_input]
beam.assert_that(small_words, beam.equal_to(['a', 'bb', 'ccc']))
beam.assert_that(larger_than_average, beam.equal_to(['ccc', 'dddd']),
label='larger_than_average')
beam.assert_that(small_but_nontrivial, beam.equal_to(['bb']),
label='small_but_not_trivial')
p.run()
def test_pardo_side_input_dofn(self):
words = ['a', 'bb', 'ccc', 'dddd']
# [START model_pardo_side_input_dofn]
class FilterUsingLength(beam.DoFn):
def process(self, element, lower_bound, upper_bound=float('inf')):
if lower_bound <= len(element) <= upper_bound:
yield element
small_words = words | beam.ParDo(FilterUsingLength(), 0, 3)
# [END model_pardo_side_input_dofn]
self.assertEqual({'a', 'bb', 'ccc'}, set(small_words))
def test_pardo_with_side_outputs(self):
# [START model_pardo_emitting_values_on_side_outputs]
class ProcessWords(beam.DoFn):
def process(self, element, cutoff_length, marker):
if len(element) <= cutoff_length:
# Emit this short word to the main output.
yield element
else:
# Emit this word's long length to a side output.
yield pvalue.SideOutputValue(
'above_cutoff_lengths', len(element))
if element.startswith(marker):
# Emit this word to a different side output.
yield pvalue.SideOutputValue('marked strings', element)
# [END model_pardo_emitting_values_on_side_outputs]
words = ['a', 'an', 'the', 'music', 'xyz']
# [START model_pardo_with_side_outputs]
results = (words | beam.ParDo(ProcessWords(), cutoff_length=2, marker='x')
.with_outputs('above_cutoff_lengths', 'marked strings',
main='below_cutoff_strings'))
below = results.below_cutoff_strings
above = results.above_cutoff_lengths
marked = results['marked strings'] # indexing works as well
# [END model_pardo_with_side_outputs]
self.assertEqual({'a', 'an'}, set(below))
self.assertEqual({3, 5}, set(above))
self.assertEqual({'xyz'}, set(marked))
# [START model_pardo_with_side_outputs_iter]
below, above, marked = (words
| beam.ParDo(
ProcessWords(), cutoff_length=2, marker='x')
.with_outputs('above_cutoff_lengths',
'marked strings',
main='below_cutoff_strings'))
# [END model_pardo_with_side_outputs_iter]
self.assertEqual({'a', 'an'}, set(below))
self.assertEqual({3, 5}, set(above))
self.assertEqual({'xyz'}, set(marked))
def test_pardo_with_undeclared_side_outputs(self):
numbers = [1, 2, 3, 4, 5, 10, 20]
# [START model_pardo_with_side_outputs_undeclared]
def even_odd(x):
yield pvalue.SideOutputValue |
ereOn/loaded | loaded/main.py | Python | lgpl-3.0 | 1,773 | 0.000564 | """
Scripts.
"""
import click
import logging
import platform
import six
from tornado.ioloop import (
IOLoop,
PeriodicCallback,
)
from .server import agent_application
@click.group()
@click.option('-d', '--debug/--no-debug', default=False)
@click.pass_context
def main_loaded(ctx, debug):
"""
Loaded build agent.
"""
ctx.obj = {}
if debug:
click.secho("Running in debug mode.", fg='cyan')
ctx.obj['DEBUG'] = debug
logging.basicConfig()
@main_loaded.command(help="Run a Loaded agent locally.")
@click.option('--port', type=int, default=9995, help="The port to listen on.")
@click.option(
'--address',
type=six.text_type,
default='0.0.0.0',
help="The address to listen on.",
)
@click.pass_context
def agent(ctx, port, address):
if not ctx.obj['DEBUG']:
logging.getLogger('tornado.access').setLevel(logging.ERROR)
agent_application.listen(port=port, address=address)
click.echo(agent_application.default_host)
click.echo(
"Started web server on {address}:{port}".format(
address=address,
port=port,
),
)
if platform.sy | stem() == 'Windows':
# On Windows, select call can't be interrupted by | SIGINT so we add
# a periodic callback that will wake-up the event loop and raise
# the KeyboardInterrupt if needed.
periodic_callback = PeriodicCallback(lambda: None, 100)
periodic_callback.start()
try:
IOLoop.current().start()
except KeyboardInterrupt as ex:
click.secho(
"Received Ctrl+C: shutting down web server...",
fg='yellow',
bold=True,
)
finally:
IOLoop.current().stop()
click.echo("Web server stopped.")
|
webpadonline/webpadonline.github.io | build.py | Python | bsd-3-clause | 7,419 | 0.011187 | #!/usr/bin/python3
import json
import os
import re
import shutil
import subprocess
import sys
import urllib.parse
import urllib.request
APP_NAME = 'Text'
IS_APP = True
BASE_DIR = os.path.dirname(sys.argv[0])
SOURCE_DIR = BASE_DIR
BUILD_DIR = os.path.join(BASE_DIR, 'build')
FILES = [
'index.html',
'_locales/en/messages.json',
'css/app.css',
'css/theme-dark.css',
'css/theme-default.css',
'css/theme-light.css',
'icon/16x16.png',
'icon/32x32.png',
'icon/48x48.png',
'icon/64x64.png',
'icon/96x96.png',
'icon/128x128.png',
'icon/256x256.png',
'images/arrow-down.svg',
'images/arrow-up.svg',
'images/check_no_box.png',
'images/check_no_box_white.png',
'images/close.svg',
'images/close-tab.svg',
'images/maximize.svg',
'images/menu.svg',
'images/search.svg',
'images/minimize.svg',
'lib/analytics/google-analytics-bundle.js',
'lib/CodeMirror/lib/codemirror.css',
'lib/jquery-1.8.3.min.js'
]
MANIFEST = 'manifest.json'
INDEX_HTML = 'index.html'
TARGET_JS = 'js/all.js'
TARGET_JS_INCLUDE = ('<script src="' + TARGET_JS + '" type="text/javascript">'
'</script>')
JS_INCLUDES = re.compile(r'(<!-- JS -->.*<!-- /JS -->)', flags=re.M | re.S)
JS_SRC = re.compile(r'<script src="([^"]*)" type="text/javascript">')
CLOSURE_URL = 'http://closure-compiler.appspot.com/compile'
BACKGROUND_EXTERNS = os.path.join(SOURCE_DIR, 'js/externs.js')
JS_EXTERNS = None
EXTERNS_URLS = [
'https://closure-compiler.googlecode.com' +
'/svn/trunk/contrib/externs/jquery-1.8.js',
'https://closure-compiler.googlecode.com' +
'/git/contrib/externs/google_analytics_api.js'
]
SKIP_JS_FILES = []
USE_LOCALIZED | _NAME = False
COMPILATION_LEVEL = 'SIMPLE_OPTIMIZATIONS'
BACKGROUND_COMPILATION_LEVEL = 'ADVANCED_OPTIMIZATIONS'
debug_build = False
def parse_command_line():
global debug_build
for option in sys.argv[1:]:
if option == '-d':
debug_build = True
else:
raise Exception('Unknown command line option: ' + option)
def delete(*paths):
for path in paths:
if os.path.isd | ir(path):
print('Deleting', path)
shutil.rmtree(path, ignore_errors=True)
elif os.path.isfile(path):
print('Deleting', path)
os.remove(path)
def copy_files(src, dst, files):
for f in files:
print('Copying', f)
full_path = os.path.join(src, f)
target_path = os.path.join(dst, f)
os.makedirs(os.path.dirname(target_path), exist_ok=True)
shutil.copy(full_path, target_path)
def get_version():
version = subprocess.check_output(['git', 'describe'],
universal_newlines=True)
match = re.compile('v(\d+(?:\.\d+))(?:-(\d+)-g.*)?').match(version)
version = match.group(1)
if match.group(2):
version += '.' + match.group(2)
return version
def process_manifest(out_dir, version):
manifest = json.load(open(os.path.join(SOURCE_DIR, MANIFEST)))
if USE_LOCALIZED_NAME:
manifest['name'] = '__MSG_extName__'
manifest['file_handlers']['text']['title'] = '__MSG_extName__'
else:
manifest['name'] = APP_NAME
manifest['file_handlers']['text']['title'] = APP_NAME
manifest['version'] = version
if IS_APP:
background_js = manifest['app']['background']['scripts']
else:
background_js = manifest['background']['scripts']
background_libs = set(f for f in background_js if f.startswith('lib'))
background_js = set(background_js) - background_libs
background_libs.add('js/background.js')
if IS_APP:
manifest['app']['background']['scripts'] = list(background_libs)
else:
manifest['background']['scripts'] = list(background_libs)
json.dump(manifest, open(os.path.join(out_dir, MANIFEST), 'w'), indent=2)
return list(background_js)
def process_index(out_dir):
html = open(os.path.join(SOURCE_DIR, INDEX_HTML)).read()
match = JS_INCLUDES.search(html)
if not match:
print('Can\'t find JS includes in index.html.')
exit(1)
js_includes = match.group(1)
html = JS_INCLUDES.sub(TARGET_JS_INCLUDE, html)
open(os.path.join(out_dir, INDEX_HTML), 'w').write(html)
js_files = []
for match in JS_SRC.finditer(js_includes):
js_files.append(match.group(1))
return js_files
def print_errors(errors, js_files):
for error in errors:
if error['file'].lower().find('externs') >= 0:
filename = error['file']
else:
fileno = int(error['file'][6:])
filename = js_files[fileno]
if 'error' in error:
text = error['error']
else:
text = error['warning']
print(filename + ':' + str(error['lineno']) + ' ' + text)
print(error['line'])
def compile_js(out_path, js_files, level, externs):
print('Compiling JavaScript code.')
params = [
('compilation_level', level),
('language', 'ECMASCRIPT5'),
('output_format', 'json'),
('output_info', 'statistics'),
('output_info', 'warnings'),
('output_info', 'errors'),
('output_info', 'compiled_code')
]
if debug_build:
params.append(('formatting', 'pretty_print'))
js_code = ['/** @define {boolean} */\nvar DEBUG = true;']
else:
js_code = ['/** @define {boolean} */\nvar DEBUG = false;']
for js_file in js_files:
if os.path.basename(js_file) not in SKIP_JS_FILES:
js_code.append(open(os.path.join(SOURCE_DIR, js_file)).read())
if externs:
params.append(('js_externs', open(externs).read()))
for url in EXTERNS_URLS:
params.append(('externs_url', url))
for code in js_code:
params.append(('js_code', code))
params = bytes(urllib.parse.urlencode(params, encoding='utf8'), 'utf8')
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
print('Connecting', CLOSURE_URL)
out = urllib.request.urlopen(CLOSURE_URL, data=params)
result = json.loads(out.read().decode('utf8'))
if 'errors' in result and len(result['errors']):
print('Errors:')
print_errors(result['errors'], js_files)
print()
if 'warnings' in result and len(result['warnings']):
print('Warnings:')
print_errors(result['warnings'], js_files)
print()
print('Writing', out_path)
os.makedirs(os.path.dirname(out_path), exist_ok=True)
open(out_path, 'w').write(result['compiledCode'])
def main():
parse_command_line()
version = get_version()
dir_name = APP_NAME + '-' + version
if debug_build:
dir_name += '-dbg'
print(dir_name)
out_dir = os.path.join(BUILD_DIR, dir_name)
archive_path = out_dir + '.zip'
delete(out_dir, archive_path)
copy_files(SOURCE_DIR, out_dir, FILES)
background_js_files = process_manifest(out_dir, version)
compile_js(os.path.join(out_dir, 'js', 'background.js'),
background_js_files,
BACKGROUND_COMPILATION_LEVEL,
BACKGROUND_EXTERNS)
js_files = process_index(out_dir)
compile_js(os.path.join(out_dir, TARGET_JS),
js_files,
COMPILATION_LEVEL,
JS_EXTERNS)
print('Archiving', archive_path)
shutil.make_archive(out_dir, 'zip',
root_dir=os.path.abspath(BUILD_DIR),
base_dir=dir_name,
verbose=True)
if __name__ == '__main__':
main()
|
mpi4py/mpi4py | demo/mandelbrot/mandelbrot-seq.py | Python | bsd-2-clause | 849 | 0.005889 | import numpy as np
import time
tic = time.time | ()
x1 = -2.0
x2 = 1.0
y1 = -1.0
y2 = 1.0
w = 150
h = 100
maxit = 127
def mandelbrot(x, y, maxit):
c = x + y*1j
z = 0 + 0j
it = 0
while abs(z) < 2 and it < maxit:
z = z**2 + c
it += 1
return it
dx = (x2 - x1) / w
dy = (y2 - y1) / h
C = np.empty([h, w], dtype='i')
for k in np.arange(h):
y = y1 + k * dy
for j in | np.arange(w):
x = x1 + j * dx
C[k, j] = mandelbrot(x, y, maxit)
M = C
toc = time.time()
print('wall clock time: %8.2f seconds' % (toc-tic))
# eye candy (requires matplotlib)
if 1:
try:
from matplotlib import pyplot as plt
plt.imshow(M, aspect='equal')
try:
plt.nipy_spectral()
except AttributeError:
plt.spectral()
plt.pause(2)
except:
pass
|
prusnak/bitcoin | test/functional/p2p_unrequested_blocks.py | Python | mit | 13,045 | 0.002606 | #!/usr/bin/env python3
# Copyright (c) 2015-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test processing of unrequested blocks.
Setup: two nodes, node0 + node1, not connected to each other. Node1 will have
nMinimumChainWork set to 0x10, so it won't process low-work unrequested blocks.
We have one P2PInterface connection to node0 called test_node, and one to node1
called min_work_node.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance for node0, but node1 should skip processing due to
nMinimumChainWork.
Node1 is unused in tests 3-7:
3. Mine a block that forks from the genesis block, and deliver to test_node.
Node0 should not process this block (just accept the header), because it
is unrequested and doesn't have more or equal work to the tip.
4a,b. Send another two blocks that build on the forking block.
Node0 should process the second block but be stuck on the shorter chain,
because it's missing an intermediate block.
4c.Send 288 more blocks on the longer chain (the number of blocks ahead
we currently store).
Node0 should process all but the last block (too far ahead in height).
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
the shorter chain.
6. Send Node0 an inv for the height 3 block produced in #4 above.
Node0 should figure out that Node0 has the missing height 2 block and send a
getdata.
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
8. Create a fork which is invalid at a height longer than the current chain
(ie to which the node will try to reorg) but which has headers built on top
of the invalid block. Check that we get disconnected if we send more headers
on the chain the node now knows to be invalid.
9. Test Node1 is able to sync when connected to node0 (which should have sufficient
work on its chain).
"""
import time
from test_framework.blocktools import create_block, create_coinbase, create_tx_with_script
from test_framework.messages import CBlockHeader, CInv, MSG_BLOCK, msg_block, msg_headers, msg_inv
from test_framework.p2p import p2p_lock, P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class AcceptBlockTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [[], ["-minimumchainwork=0x10"]]
def setup_network(self):
self.setup_nodes()
def run_test(self):
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
min_work_node = self.nodes[1].add_p2p_connection(P2PInterface())
# 1. Have nodes mine a block (leave IBD)
[self.generate(n, 1, sync_fun=self.no_op) for n in self.nodes]
tips = [int("0x" + n.getbestblockhash(), 0) for n in self.nodes]
# 2. Send one block that builds on each tip.
# This should be accepted by node0
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = int(time.time()) + 1
for i in range(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
blocks_h2[i].solve()
block_time += 1
test_node.send_and_ping(msg_block(blocks_h2[0]))
min_work_node.send_and_ping(msg_block(blocks_h2[1]))
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 1)
self.log.info("First height 2 block accepted by node0; correctly rejected by node1")
# 3. Send another block that builds on genesis.
block_h1f = create_block(int("0x" + self.nodes[0].getblockhash(0), 0), create_coinbase(1), block_time)
block_time += 1
block_h1f.solve()
test_node.send_and_ping(msg_block(block_h1f))
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h1f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert tip_entry_found
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_h1f.hash)
# 4. Send another two block that build on the fork.
block_h2f = create_block(block_h1f.sha256, create_coinbase(2), block_time)
block_time += 1
block_h2f.solve()
test_node.send_and_ping(msg_block(block_h2f))
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h2f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert tip_entry_found
# But this block should be accepted by node since it has equal work.
self.nodes[0].getblock(block_h2f.hash)
self.log.info("Second height 2 block accepted, but not reorg'ed to")
# 4b. Now send another block that builds on the forking chain.
block_h3 = create_block(block_h2f.sha256, create_coinbase(3), block_h2f.nTime+1)
block_h3.solve()
test_node.send_and_ping(msg_block(block_h3))
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h3.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert tip_entry_found
self.nodes[0].getblock(block_h3.hash)
# But this block should be accepted by node since it has more work.
self.nodes[0].getblock(block_h3.hash)
self.log.info("Unrequested more-work block accepted")
# 4c. Now mine 288 more blocks and deliver; a | ll should be processed but
# the last (height-too-high) on node (as long as it is not missing any headers)
tip = block_h3
all_blocks = []
for i in range(288):
next_block = create_block(tip.sha256, create_coinbase(i + 4), tip.nTime+1)
next_block.solve()
all_blocks.append(next_block)
tip = next_block
# Now send the block at height 5 and check that it wasn't accepted (missing header)
test_node.send_and_ping(msg_bl | ock(all_blocks[1]))
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, all_blocks[1].hash)
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblockheader, all_blocks[1].hash)
# The block at height 5 should be accepted if we provide the missing header, though
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(all_blocks[0]))
test_node.send_message(headers_message)
test_node.send_and_ping(msg_block(all_blocks[1]))
self.nodes[0].getblock(all_blocks[1].hash)
# Now send the blocks in all_blocks
for i in range(288):
test_node.send_message(msg_block(all_blocks[i]))
test_node.sync_with_ping()
# Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead
for x in all_blocks[:-1]:
self.nodes[0].getblock(x.hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash)
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
# The node should have requested the blocks at some point, so
# disconnect/reconnect first
self.nodes[0].disconnect_p2ps()
self.nodes[1].disconnect_p2ps()
test_node = self.nodes[0].add_p2p_connectio |
plotly/python-api | packages/python/plotly/plotly/validators/layout/scene/zaxis/_color.py | Python | mit | 446 | 0.002242 | import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(self, plotly_name="color", parent_name="layout.scene.zaxis", **kwargs):
| super(ColorValidator, self).__init__(
plotly_name=plotly_name,
| parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
WikiWatershed/model-my-watershed | deployment/cfn/tile_delivery_network.py | Python | apache-2.0 | 6,804 | 0 | from troposphere import (
Parameter,
Ref,
Output,
Tags,
GetAtt,
Join,
cloudfront as cf,
route53 as r53,
s3
)
from cfn.utils.constants import (
AMAZON_S3_HOSTED_ZONE_ID,
AMAZON_S3_WEBSITE_DOMAIN,
)
from majorkirby import StackNode
class TileDeliveryNetwork(StackNode):
INPUTS = {
'Tags': ['global:Tags'],
'Region': ['global:Region'],
'StackType': ['global:StackType'],
'PublicHostedZoneName': ['global:PublicHostedZoneName'],
'PrivateHostedZoneId': ['global:PrivateHostedZoneId',
'PrivateHostedZone:PrivateHostedZoneId'],
'PrivateHostedZoneName': ['global:PrivateHostedZoneName'],
'GlobalNotificationsARN': ['global:GlobalNotificationsARN'],
}
DEFAULTS = {
'Tags': {},
'Region': 'us-east-1',
'StackType': 'Staging',
}
ATTRIBUTES = {'StackType': 'StackType'}
def set_up_stack(self):
super(TileDeliveryNetwork, self).set_up_stack()
self.default_tags = self.get_input('Tags').copy()
self.region = self.get_input('Region')
self.add_description('Tile delivery network stack for MMW')
# Parameters
self.public_hosted_zone_name = self.add_parameter(Parameter(
'PublicHostedZoneName', Type='String',
Description='Route 53 public hosted zone name'
), 'PublicHostedZoneName')
self.private_hosted_zone_id = self.add_parameter(Parameter(
'PrivateHostedZoneId', Type='String',
Description='Route 53 private hosted zone ID'
), 'PrivateHostedZoneId')
self.private_hosted_zone_name = self.add_parameter(Parameter(
'PrivateHostedZoneName', Type='String',
Description='Route 53 private hosted zone name'
), 'PrivateHostedZoneName')
self.notification_topic_arn = self.add_parameter(Parameter(
'GlobalNotificationsARN', Type='String',
Description='ARN for an SNS topic to broadcast notifications'
), 'GlobalNotificationsARN')
blue_tile_distribution, \
green_tile_distribution = self.create_cloudfront_distributions()
self.create_s3_resources()
self.add_output(Output('BlueTileServerDistributionEndpoint',
Value=GetAtt(blue_tile_distribution,
'DomainName')))
self.add_output(Output('GreenTileServerDistributionEndpoint',
Value=GetAtt(green_tile_distribution,
'DomainName')))
def create_cloudfront_distributions(self):
blue_tile_distribution = self.add_resource(cf.Distribution(
'tileDistributionBlue',
DistributionConfig=cf.DistributionConfig(
Origins=[
cf.Origin(
Id='tileOriginId',
DomainName=Join('.',
['tile-cache',
Ref(self.public_hosted_zone_name)]),
CustomOriginConfig=cf.CustomOriginConfig(
OriginProtocolPolicy='http-only'
)
)
],
DefaultCacheBehavior=cf.DefaultCacheBehavior(
ForwardedValues=cf.ForwardedValues(QueryString=True),
TargetOriginId='tileOriginId',
ViewerProtocolPolicy='allow-all'
),
Enabled=True
)
))
green_tile_distribution = self.add_resource(cf.Distribution(
'tileDistributionGreen',
DistributionConfig=cf.DistributionConfig(
Origins=[
cf.Origin(
Id='tileOriginId',
DomainName=Join('.',
['tile-cache',
Ref(self.public_hosted_zone_name)]),
CustomOriginConfig=cf.CustomOriginConfig(
OriginProtocolPolicy='http-only'
)
)
| ],
DefaultCacheBehavior=cf.DefaultCacheBehavior(
ForwardedValues=cf.ForwardedValues(QueryString=True),
TargetOriginId='tileOriginId',
ViewerProtocolPolicy='allow-all'
),
Enabled=True
)
))
return blue_tile_distri | bution, green_tile_distribution
def create_s3_resources(self):
s3_bucket = self.add_resource(s3.Bucket(
's3TileCacheBucket',
BucketName=Join('.', ['tile-cache',
Ref(self.public_hosted_zone_name)]),
AccessControl=s3.PublicRead,
CorsConfiguration=s3.CorsConfiguration(
CorsRules=[
s3.CorsRules(
AllowedOrigins=['*'],
AllowedMethods=['GET'],
MaxAge=3000,
AllowedHeaders=['*'],
)
]
)
))
self.add_resource(s3.BucketPolicy(
's3TileCacheBucketPolicy',
Bucket=Ref(s3_bucket),
PolicyDocument={
'Statement': [{
'Action': ['s3:GetObject'],
'Effect': 'Allow',
'Resource': {
'Fn::Join': ['', [
'arn:aws:s3:::',
Ref(s3_bucket),
'/*'
]]
},
'Principal': '*'
}]
}
))
self.add_resource(r53.RecordSetGroup(
'dnsPublicRecordsCache',
HostedZoneName=Join('', [Ref(self.public_hosted_zone_name), '.']),
RecordSets=[
r53.RecordSet(
'dnsTileServersCache',
AliasTarget=r53.AliasTarget(
AMAZON_S3_HOSTED_ZONE_ID,
AMAZON_S3_WEBSITE_DOMAIN,
True,
),
Name=Join('', ['tile-cache.',
Ref(self.public_hosted_zone_name), '.']),
Type='A'
)
]
))
def get_tags(self, **kwargs):
"""Helper method to return Troposphere tags + default tags
Args:
**kwargs: arbitrary keyword arguments to be used as tags
"""
kwargs.update(self.default_tags)
return Tags(**kwargs)
|
remico/vision_0 | goods2excel.py | Python | gpl-3.0 | 5,421 | 0.001301 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" Parse a goods database table and create an ms excel's workbook from it
"""
__author__ = 'remico'
from xml.etree.ElementTree import ElementTree
from abc import ABC, abstractmethod
from bs4 import BeautifulSoup, Tag
import xlsxwriter
import sys, os, glob
class IBuilder(ABC):
@abstractmethod
def convert_articul(self, text):
pass
@abstractmethod
def convert_sizes(self, text):
pass
@abstractmethod
def convert_description(self, text):
pass
@abstractmethod
def convert_price(self, text):
pass
@abstractmethod
def convert_price_retail(self, text):
pass
@abstractmethod
def increment_row(self):
pass
class XlsxBuilder(IBuilder):
def __init__(self):
self.filename = "output_.xlsx"
self.book = xlsxwriter.Workbook(self.filename)
self.sheet = self.book.add_worksheet("goods")
self.fill_header()
self.current_row = 2 # there is the header in the first row
self.cell_format = self.book.add_format()
self.cell_format.set_text_wrap()
self.cell_format.set_align('vjustify')
# self.cell_format.set_align('top')
def fill_header(self):
header_format = self.book.add_format()
header_format.set_align('center')
header_format.set_align('vcenter')
header_format.set_bg_color('yellow')
header_format.set_bold()
self.sheet.write_string('A1', 'Артикул')
self.sheet.write_string('B1', 'Описание')
self.sheet.write_string('C1', 'Цена')
self.sheet.write_string(' | D1', 'Розничная цена')
self.sheet.write_string('E1', 'Размеры')
self.sheet.set_column('A:A', 50)
self.sheet.set_column('B:B', 80)
self.s | heet.set_column('C:C', 20)
self.sheet.set_column('D:D', 20)
self.sheet.set_column('E:E', 20)
self.sheet.set_row(0, 25, header_format)
self.sheet.set_default_row(35)
def get_result(self):
self.book.close()
print("'%s' created" % self.filename)
return self.book
def increment_row(self):
self.current_row += 1
def convert_articul(self, text=""):
cleantext = text.replace('"', '"') if text is not None else ""
self.sheet.write('A%d' % self.current_row, cleantext, self.cell_format)
def convert_description(self, text=""):
cleantext = ""
if text is not None:
soup = BeautifulSoup(text)
rows = []
# utilize the direct child objects
for tag in soup.children:
if not isinstance(tag, Tag):
continue
# parse an html table
if tag.name == 'table':
for row in tag.find_all('tr'):
r = ' '.join([col.get_text().strip()
for col in row.find_all('td')])
rows.append(r)
# parse simple html paragraphs
else:
rows.append(tag.get_text().strip())
cleantext = "\n".join(rows).strip()
self.sheet.write('B%d' % self.current_row, cleantext, self.cell_format)
def convert_price(self, text=""):
self.sheet.write('C%d' % self.current_row, text, self.cell_format)
def convert_price_retail(self, text=""):
self.sheet.write('D%d' % self.current_row, text, self.cell_format)
def convert_sizes(self, text=""):
self.sheet.write('E%d' % self.current_row, text, self.cell_format)
class GoodsReader(object):
def __init__(self, filename, IBuilder_builder):
self.doc = ElementTree(file=filename)
self.database = self.doc.find("database")
if self.database is None:
raise LookupError("It seems that the input file is not a dump of "
"'gloowi_goods' database table")
print("Database: '%s'" % self.database.get("name"))
self.builder = IBuilder_builder
def parse_goods(self):
goods = self.database.findall('table')
len_ = len(goods)
denominator_ = 20
part_ = len_ // denominator_
records = ({column.get('name'): column.text
for column in item.getiterator('column')}
for item in goods)
for i, rec in enumerate(records):
self.builder.convert_articul(rec['name'])
self.builder.convert_description(rec['content'])
self.builder.convert_price(rec['price'])
self.builder.convert_price_retail(rec['price_retail'])
self.builder.convert_sizes(rec['har_size'])
self.builder.increment_row()
# indicate progress
if not i % part_:
print('#', end='' if i < part_*denominator_ else '\n')
sys.stdout.flush()
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: %s <xmlFile>" % (sys.argv[0],))
sys.exit(-1)
# clear garbage
for file in glob.glob("output_*.xlsx"):
os.remove(file)
print("'%s' removed" % file)
input_file = sys.argv[1]
try:
builder = XlsxBuilder()
parser = GoodsReader(input_file, builder)
parser.parse_goods()
finally:
builder.get_result()
|
ycl2045/nova-master | nova/tests/virt/libvirt/test_imagebackend.py | Python | apache-2.0 | 26,862 | 0.000261 | # Copyright 2012 Grid Dynamics
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import fixtures
from oslo.config import cfg
import inspect
from nova import exception
from nova.openstack.common import units
from nova.openstack.common import uuidutils
from nova import test
from nova.tests import fake_processutils
from nova.tests.virt.libvirt import fake_libvirt_utils
from nova.virt.libvirt import imagebackend
CONF = cfg.CONF
class _ImageTestCase(object):
INSTANCES_PATH = '/instances_path'
def mock_create_image(self, image):
def create_image(fn, base, size, *args, **kwargs):
fn(target=base, *args, **kwargs)
image.create_image = create_image
def setUp(self):
super(_ImageTestCase, self).setUp()
self.flags(disable_process_locking=True,
instances_path=self.INSTANCES_PATH)
self.INSTANCE = {'name': 'instance',
'uuid': uuidutils.generate_uuid()}
self.NAME = 'fake.vm'
self.TEMPLATE = 'template'
self.OLD_STYLE_INSTANCE_PATH = \
fake_libvirt_utils.get_instance_path(self.INSTANCE, forceold=True)
self.PATH = os.path.join(
fake_libvirt_utils.get_instance_path(self.INSTANCE), self.NAME)
# TODO(mikal): rename template_dir to base_dir and template_path
# to cached_image_path. This will be less confusing.
self.TEMPLATE_DIR = os.path.join(CONF.instances_path, '_base')
self.TEMPLATE_PATH = os.path.join(self.TEMPLATE_DIR, 'template')
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.imagebackend.libvirt_utils',
fake_libvirt_utils))
def test_cache(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(False)
os.path.exists(self.PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
imagebackend.fileutils.ensure_tree(self.TEMPLATE_DIR)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_image_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
os.path.exists(self.PATH).AndReturn(True)
os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.cache(None, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_base_dir_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
os.path.exists(self.PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_template_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
os.path.exists(self.PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_prealloc_image(self):
CONF.set_override('preallocate_images', 'space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self.stubs)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stubs.Set(os.path, 'exists', lambda _: True)
self.stubs.Set(os, 'access', lambda p, w: True)
# Call twice to verify testing fallocate is only called once.
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(),
['fallocate -n -l 1 %s.fallocate_test' % self.PATH,
'fallocate -n -l %s %s' % (self.SIZE, self.PATH),
'fallocate -n -l %s %s' % (self.SIZE, self.PATH)])
def test_prealloc_image_without_write_access(self):
CONF.set_override('preallocate_images', 'space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self.stubs)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stubs.Set(image, 'check_image_exists', lambda: True)
self.stubs.Set(image, '_can_fallocate', lambda: True)
self.stubs.Set(os.path, 'exists', lambda _: True)
self.stubs.Set(os, 'access', lambda p, w: False)
# Testing fallocate is only called when user has write access.
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(), [])
class RawTestCase(_ImageTestCase, test.NoDBTestCase):
SIZE = 1024
def setUp(self):
self.image_class = imagebackend.Raw
super(RawTestC | ase, self).setUp()
self.stubs.Set(imagebackend.Raw, 'correct_format', lambda _: None)
def | prepare_mocks(self):
fn = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(imagebackend.utils.synchronized,
'__call__')
self.mox.StubOutWithMock(imagebackend.libvirt_utils, 'copy_image')
self.mox.StubOutWithMock(imagebackend.disk, 'extend')
return fn
def test_create_image(self):
fn = self.prepare_mocks()
fn(target=self.TEMPLATE_PATH, max_size=None, image_id=None)
imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH, self.PATH)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None, image_id=None)
self.mox.VerifyAll()
def test_create_image_generated(self):
fn = self.prepare_mocks()
fn(target=self.PATH)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None)
self.mox.VerifyAll()
def test_create_image_extend(self):
fn = self.prepare_mocks()
fn(max_size=self.SIZE, target=self.TEMPLATE_PATH, image_id=None)
imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH, self.PATH)
imagebackend.disk.extend(self.PATH, self.SIZE, use_cow=False)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEM |
justinasjaronis/hpn | united_geonames/migrations/0015_auto__add_field_geonamesmatchinglogmatchedplaces_remark.py | Python | gpl-3.0 | 9,334 | 0.007821 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'GeoNamesMatchingLogMatchedPlaces.remark'
db.add_column('united_geonames_geonamesmatchinglogmatchedplaces', 'remark', self.gf('django.db.models.fields.TextField')(null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'GeoNamesMatchingLogMatchedPlaces.remark'
db.delete_column('united_geonames_geonamesmatchinglogmatchedplaces', 'remark')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 7, 25, 14, 53, 19, 34425)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 7, 25, 14, 53, 19, 34316)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'united_geonames.geonamesmatchinglogmatch': {
'Meta': {'ordering': "['-matching_index']", 'object_name': 'GeoNamesMatchingLogMatch'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True'}),
'display_for_users': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'matching_index': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '233', 'null': 'True', 'blank': 'True'}),
'number_of_alternatives': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'})
},
'united_geonames.geonamesmatchinglogmatchedplaces': {
'Meta': {'object_name': 'GeoNamesMatchingLogMatchedPlaces'},
'best_match': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'geographical_distance': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'matchinglogmatch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'matched'", 'null': 'True', 'to': "orm['united_geonames.GeoNamesMatchingLogMatch']"}),
'ngram_distance': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'percentage': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'remark': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'united_geoname': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['united_geonames.UnitedGeoName']", 'null': 'True', 'blank': 'True'})
},
'united_geonames.unitedgeoname': {
'Meta': {'objec | t_name': 'UnitedGeoName'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'main_name': ('django.db.models.fields.CharField', [], {'max_length': '300'})
| },
'united_geonames.unitedgeonamesynonim': {
'Meta': {'object_name': 'UnitedGeoNameSynonim'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True'}),
'coordinates': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'spatial_index': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'subregion': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'synonim_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'synonim_content_type_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'synonim_name': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'synonim_object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'united_geoname': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'geonames'", 'null': 'True', 'to': "orm['united_geonames.UnitedGeoName']"})
},
'united_geonames.usergeoname': {
'Meta': {'object_name': 'UserGeoName'},
'coordinates': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'spatial_index': ' |
diraol/trilhasp | trilhasp/trilhasp/wsgi.py | Python | agpl-3.0 | 391 | 0.002558 | """
WSGI config for trilhasp project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https: | //docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.envir | on.setdefault("DJANGO_SETTINGS_MODULE", "trilhasp.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
mlosch/nnadapter | emu/keras.py | Python | mit | 8,953 | 0.002904 | from __future__ import absolute_import
import os
import numpy as np
from emu.nnadapter import NNAdapter
from keras.models import model_from_json, model_from_yaml, Model, Sequential
from keras import applications
from keras import backend
from collections import OrderedDict
from emu.docutil import doc_inherit
imagenet_mean = np.array([103.939, 116.779, 123.68])
imagenet_std = np.array([1., 1., 1.])
class KerasAdapter(NNAdapter):
"""
Overrides the NNAdapter to load and read Keras models.
An installation of Keras is required.
"""
def __init__(self, model_cfg, model_weights, mean, std, inputsize, keep_outputs=None, use_gpu=False):
"""
Initializes the adapter with a pretrained model from a filepath or keras-model identifier
(see https://keras.io/applications/#available-models).
Parameters
----------
model_cfg : String or dict or list
Model configuration defining the architecture.
Available options:
- Use a model identifier to define the model configuration (e.g. 'ResNet50', 'VGG19').
- Use a file path to a yaml or json formatted file to define the model configuration.
- Use a dict to define the model configuration (e.g. as from keras_model.get_config())
- Use a list to define the model configuration of a Sequential model (e.g. as from keras_model.get_config())
model_weights : String
Available options:
- File path to a HDF5 save file containing all weights
- Identifier of data set (e.g. 'imagenet') if model identifier is used for model_cfg
mean : ndarray or String
Mean definition via array or filepath to .npy tensor.
std : ndarray or String
Standard deviation definition via array or filepath to .npy tensor.
inputsize : tuple or list
Target input data dimensionality of format: (height, width, channels).
Used for rescaling given data in preprocess step.
keep_outputs : list, tuple or set
List of layer identifier strings to keep during a feed forward call to enable later access via
get_layeroutput().
By default no layer outputs but the last are kept.
Consolidate get_layers() to identify layers.
use_gpu : bool
Flag to enable gpu use. Default: False
"""
self.base_model = self._load_model_config(model_cfg, model_weights)
if os.path.exists(model_weights):
self.base_model.load_weights(model_weights)
cfg = self.base_model.get_config()
self.layers = OrderedDict()
for layer in cfg['layers']:
self.layers[layer['name']] = layer['class_name']
if keep_outputs is None:
self.keep_outputs = []
else:
self.keep_outputs = keep_outputs
self.output_map = OrderedDict()
for name, _ in self.layers.items():
if name in self.keep_outputs:
self.output_map[name] = len(self.output_map)
if self.layers.keys()[-1] not in self.output_map:
self.output_map[self.layers.keys()[-1]] = len(self.output_map)
self.model = Model(inputs=self.base_model.input,
outputs=[self.base_model.get_layer(name).output for name in self.output_map.keys()])
self.mean = self._load_mean_std(mean)
self.std = self._load_mean_std(std)
self.inputsize = inputsize
self.use_gpu = use_gpu
self.nomean_warn = True
self.nostd_warn = True
data_format = backend.image_data_format()
if data_format == 'channels_first':
self.dimorder = 'chw'
else:
self.dimorder = 'hwc'
self.blobs = []
@staticmethod
def _load_mean_std(handle):
"""
Loads mean/std values from a .npy file or returns the identity if already a numpy array.
Parameters
----------
handle : Can be either a numpy array or a filepath as string
Returns
----------
mean/std : Numpy array expressing mean/std
"""
if type(handle) == str:
if handle.endswith('.npy'):
return np.load(handle)
else:
raise ValueError('Unknown file format. Known formats: .npy')
elif type(handle) == np.ndarray:
return handle
elif handle is not None:
raise ValueError('Unknown format. Expected .npy file or numpy array.')
@staticmethod
def _load_model_config(model_cfg, model_weights):
if type(model_cfg) == str:
if not os.path.exists(model_cfg):
try:
class_ = getattr(applications, model_cfg)
return class_(weights=model_weights)
except AttributeError:
available_mdls = [attr for attr in dir(applications) if callable(getattr(applications, attr))]
raise ValueError('Could not load pretrained model with key {}. '
'Available models: {}'.format(model_cfg, ', '.join(available_mdls)))
with open(model_cfg, 'r') as fileh:
try:
return model_from_json(fileh)
except ValueError:
pass
try:
return model_from_yaml(fileh)
except ValueError:
pass
raise ValueError('Could not load model from configuration file {}. '
'Make sure the path is correct and the file format is yaml or json.'.format(model_cfg))
elif type(model_cfg) == dict:
return Model.from_config(model_cfg)
elif type(model_cfg) == list:
return Sequential.from_config(model_cfg)
raise ValueError('Could not load model from configuration object of type {}.'.format(type(model_cfg)))
@doc_inherit
def get_layeroutput(self, layer):
assert len(self.blobs) > 0, 'Forward has not been called. Layer outputs are not ready.'
if layer not in self.output_map:
if layer in self.get_layers():
raise ValueError('Layer with id {} does exist in the architecture '
'but has not been cached due to the output filter: [{}]'.format(
layer,
','.join(self.keep_outputs),
))
else:
raise ValueError('Layer with id {} does not exist.'.format(layer))
return self.blobs[self.output_map[layer]]
@doc_inherit
def get_layerparams(self, layer):
params = self.model.get_layer(layer).get_weights()
if len(params) == 1:
return tuple(params[0], None)
else:
return tuple(params)
@doc_inherit
def set_weights(self, layer, weights):
L = self.model.get_layer(layer)
_, bias = L.get_weights()
L.set_weights((weights, bias))
@doc_inherit
def set_bias(self, layer, bias):
L = self.model.get_layer(layer)
weights, _ = L.get_weights()
L.set_weights((weights, bias))
@doc_inherit
def get_layers(self):
return self.layers
@doc_inherit
def model_description(self):
return self.model.name + '\n\t' + \
'\n\t'.join([': '.join(entry) for entry in self.layers.items()])
def preprocess(self, listofimages):
| """
Preprocess a list of imag | es to be used with the neural network.
Parameters
----------
listofimages : List of strings or list of ndarrays, shape (Height, Width, Channels)
The list may contain image filepaths and image ndarrays.
For ndarrays, the shape (Height, Width, Channels) has to conform with the input size defined at
object construction.
ndarrays have to be normalized to 1.
Returns
-------
output : ndarray
Preprocessed batc |
jobiols/odoo-argentina | l10n_ar_afipws_fe/models/res_currency.py | Python | agpl-3.0 | 3,043 | 0 | # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import models, api, _
from openerp.exceptions import UserError
import logging
_logger = logging.getLogger(__name__)
class ResCurrency(models.Model):
_inherit = "res.currency"
@api.multi
def action_get_pyafipws_currencies(self):
return self.get_pyafipws_currencies()
@api.model
def get_pyafipws_currencies(self, afip_ws='wsfex', company=False):
# if not company, then we search one that uses argentinian localization
if not company:
company = self.env['res.company'].search(
[('localization', '=', 'argentina')],
limit=1)
if not company:
raise UserError(_(
'No company found using argentinian localization'))
ws = company.get_connection(afip_ws).connect()
if afip_ws == 'wsfex':
ret = ws.GetParamMon(sep=" ")
elif afip_ws == 'wsfe':
ret = ws.ParamGetTiposMonedas(sep=" ")
elif afip_ws == 'wsbfe':
ret = ws.GetParamMon()
else:
raise UserError(_('AFIP WS %s not implemented') % (
afip_ws))
msg = (_("Authorized Currencies on AFIP%s\n. \nObservations: %s") % (
'\n '.join(ret), ".\n".join([ws.Excepcion, ws.ErrMsg, ws.Obs])))
raise UserError(msg)
@api.multi
def action_get_pyafipws_currency_rate(self):
raise UserError(self.get_pyafipws_curren | cy_rate()[1])
@api.multi
# def get_pyafipws_currency_rate(self, afip_ws='wsfex', company=False):
def get_pyafipws_currency_rate(self, afip_ws='wsfe', company=False):
self.ensure_one()
# if not company, then we search one that uses argentinian localization
if not company:
company = self.env['res.company'].search(
[('localization', '=', 'argentina')],
limit=1)
if not company:
raise Use | rError(_(
'No company found using argentinian localization'))
if not self.afip_code:
raise UserError(_('No AFIP code for currency %s') % self.name)
ws = company.get_connection(afip_ws).connect()
# deberia implementarse igual para wsbfe pero nos da un error
# BFEGetPARAM_Ctz not found in WSDL
# if afip_ws in ["wsfex", 'wsbfe']:
if afip_ws == "wsfex":
rate = ws.GetParamCtz(self.afip_code)
elif afip_ws == "wsfe":
rate = ws.ParamGetCotizacion(self.afip_code)
else:
raise UserError(_('AFIP WS %s not implemented') % (
afip_ws))
msg = (_("Currency rate for %s: %s.\nObservations: %s") % (
self.name, rate, ".\n".join([ws.Excepcion, ws.ErrMsg, ws.Obs])))
return (float(rate), msg)
|
shiquanwang/numba | numba/tests/test_mandelbrot.py | Python | bsd-2-clause | 1,319 | 0.002274 | """
>>> image = np.zeros((50, 75), dtype=np.uint8)
>>> numpy_image = image.copy()
>>> image = create_fractal(-2.0, 1.0, -1.0, 1.0, image, 20)
>>> numpy_image = create_fractal.py_func(-2.0, 1.0, -1.0, 1.0, numpy_image, 20)
>>> assert np.allclose(image, numpy_image)
"""
from numba import *
import numpy as np
@autojit(nopython=True)
def mandel(x, y, max_iters):
"""
Given the real and imaginary parts of a complex number,
determine if it is a candidate for membership in the Mandelbrot
set given a fixed number of iterations.
"""
i = 0
c = complex(x,y)
z = 0.0j
for i in range(max_iters):
z = z*z + c
if (z.real*z.real + z.imag*z.imag) >= 4:
return i
return 255
@autojit
def create_fractal(min_x, max_x, min_y, max_y, image, iters):
with nopython:
height = image.shape[0]
width = imag | e.shape[1]
pixel_size_x = (max_x - min_x) / width
pixel_size_y = (max_y - min_y) / height
for x in range(width):
real = mi | n_x + x * pixel_size_x
for y in range(height):
imag = min_y + y * pixel_size_y
color = mandel(real, imag, iters)
image[y, x] = color
return image
if __name__ == "__main__":
import numba
numba.testing.testmod()
|
asadoughi/quark | quark/db/migration/alembic/versions/3b467be51e43_composite_primary_key_port_ip_.py | Python | apache-2.0 | 3,036 | 0.001318 | """composite primary key port ip association
Revision ID: 3b467be51e43
Revises: 26e984b48a0d
Create Date: 2014-10-09 15:42:58.104964
"""
# revision identifiers, used by Alembic.
revision = '3b467be51e43'
down_revision = '26e984b48a0d'
import contextlib
import itertools
from alembic import op
import sqlalchemy as sa
def _drop_foreign_key(op, table, fk):
op.drop_constraint(fk.name, table.name, type_="foreignkey")
def _create_foreign_key(op, table, fk):
ref_tbl_name = fk.column.table.name
ref_tbl_col = fk.column.name
op.create_foreign_key(fk.name, table.name, ref_tbl_name,
[fk.parent.name], [ref_tbl_col])
def _alter_foreign_keys(op, table, action, fk_constraints):
actions = {
"drop": _drop_foreign_key,
"create": _create_foreign_key
}
# NOTE(thomasem): Flatten list of ForeignKeys we want to work on from the
# ForeignKeyConstraint objects that may contain multiple ForeignKey
# objects.
[actions[action](op, table, fk) for fk
in itertools.chain.from_iterable([c.elements for c in fk_constraints])]
@contextlib.contextmanager
def _foreign_keys_dropped(op, table):
fk_constraints = [c for c in table.constraints
if isinstance(c, sa.schema.ForeignKeyConstraint)]
_alter_foreign_keys(op, table, "drop", fk_constraints)
yield
_alter_foreign_keys(op, table, "create", fk_constraints)
def upgrade():
metadata = sa.MetaData(bind=op.get_bind())
table = sa.Table('quark_port_ip_address_associations', metadata,
autoload=True)
with _foreign_keys_dropped(op, table):
op.alter_column('quark_port_ip_address_associations', 'ip_address_id',
existing_type=sa.String(36), nullable=False)
op.alter_column('quark_port_ip_address_associations', 'port_id',
existing_type=sa.String(36), nullable=False)
op.create_primary_key("pk_quark_port_ip_address_associat | ions",
"quark_port_ip_address_associations",
['port_id', 'ip | _address_id'])
def downgrade():
metadata = sa.MetaData(bind=op.get_bind())
table = sa.Table('quark_port_ip_address_associations', metadata,
autoload=True)
# NOTE(thomasem): Unfortunately we cannot remove primary keys for columns
# that have a ForeignKeyConstraint defined. So, we can temporarily remove
# them and add them back as soon as the PrimaryKeyConstraint is removed.
with _foreign_keys_dropped(op, table):
op.drop_constraint("pk_quark_port_ip_address_associations",
"quark_port_ip_address_associations",
type_="primary")
op.alter_column('quark_port_ip_address_associations', 'port_id',
existing_type=sa.String(36), nullable=True)
op.alter_column('quark_port_ip_address_associations', 'ip_address_id',
existing_type=sa.String(36), nullable=True)
|
2nd47/UofT-Projects | CSC369/Virtual Memory/traceprogs/fastslim.py | Python | gpl-2.0 | 2,771 | 0.018766 | #!/usr/bin/python
# This program processes an address trace generated by the Valgrind lackey tool
# to create a reduced trace according to the Fastslim-Demand algorithm
# described in " | FastSlim: prefetch-safe trace reduction for I/O cache
# simulation" by Wei Jin, Xiaobai Sun, and Jeffrey S. Chase in ACM Transactions
# on Modeling and Computer Simulation, Vol. 11, No. 2 (April 2001),
# pages 125-160. http://doi.acm.org/10.1145/384169.384170
import fileinput
import sys
import argparse
from | operator import attrgetter
class TraceItem(object):
def __init__(self, reftype, pg, tstamp):
self.reftype = reftype
self.pg = pg
self.tstamp = tstamp
self.marked = False
def __eq__(self, other):
return self.pg == other.pg
def __repr__(self):
return self.reftype + " " + format(self.pg*4096,'x')
def __hash__(self):
return hash(self.pg)
ts = 0 # "timestamp" (entry number in original trace)
tracebuffer = set() # The set of entries in the buffer
toprint = [] # The list of entries waiting to be printed in order
# Emit in timestamp order may have to hold onto items until the trace buffer
# is emptied, because there may be marked items in the trace buffer with
# earlier timestamps that have to appear in the output first.
# So, we put entries into a list as they are first seen and then
# emit_marked adds all marked items to the list.
# The list is then sorted by timestamp and printed.
def emit_marked_in_ts_order():
for ti in tracebuffer:
if ti.marked:
toprint.append(ti)
toprint.sort(key=attrgetter('tstamp'))
for ti in toprint:
print ti
tracebuffer.clear()
del toprint[:]
# Parse command line arguments
parser = argparse.ArgumentParser(description="Reduce address trace from valgrind using fastslim-demand algorithm.")
parser.add_argument('-k', '--keepcode', action='store_true', help="include code pages in compressed trace")
parser.add_argument('-b', '--buffersize', type=int, default=4, help="number of entries in trace buffer")
parser.add_argument('tracefile', nargs='?', default="-")
args = parser.parse_args()
# Process input trace
for line in fileinput.input(args.tracefile):
if line[0] == '=':
continue
reftype = line[0:2].strip()
if reftype == "I" and args.keepcode == False:
continue
addrstr = line.split(',')[0][3:].strip()
try:
addr = int(addrstr, 16)
except ValueError:
#print "This does not appear to be valgrind output, skipping: " + line
continue
pg = addr / 4096
ti = TraceItem(reftype,pg,ts)
if ti in tracebuffer:
ti.marked = True
ti.tstamp = ts
else:
if (len(tracebuffer) == args.buffersize):
emit_marked_in_ts_order()
toprint.append(ti)
tracebuffer.add(ti)
ts = ts + 1
|
addition-it-solutions/project-all | addons/account_analytic_default/__openerp__.py | Python | agpl-3.0 | 1,764 | 0.001134 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is fre | e software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is | distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Account Analytic Defaults',
'version': '1.0',
'category': 'Accounting & Finance',
'description': """
Set default values for your analytic accounts.
==============================================
Allows to automatically select analytic accounts based on criterions:
---------------------------------------------------------------------
* Product
* Partner
* User
* Company
* Date
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/accounting',
'depends': ['sale_stock'],
'data': [
'security/ir.model.access.csv',
'security/account_analytic_default_security.xml',
'account_analytic_default_view.xml'
],
'demo': [],
'installable': True,
'auto_install': False,
}
|
lukius/ptc | ptc/cblock.py | Python | mit | 5,507 | 0.005266 | import threading
from buffer import DataBuffer
from seqnum import SequenceNumber
class PTCControlBlock(object):
def __init__(self, send_seq, receive_seq, send_window, receive_window):
self.snd_wnd = send_window
self.snd_nxt = send_seq.clone()
self.snd_una = send_seq.clone()
self.rcv_nxt = receive_seq.clone()
self.rcv_wnd = receive_window
self.snd_wl1 = receive_seq.clone()
self.snd_wl2 = send_seq.clone()
self.in_buffer = DataBuffer(start_index=receive_seq.clone())
self.out_buffer = DataBuffer(start_index=send_seq.clone())
self.lock = threading.RLock()
def get_snd_nxt(self):
return self.snd_nxt
def get_snd_una(self):
return self.snd_una
def get_snd_wnd(self):
return self.snd_wnd
def get_snd_wl1(self):
return self.snd_wl1
def get_snd_wl2(self):
return self.snd_wl2
def get_rcv_nxt(self):
return self.rcv_nxt
def get_rcv_wnd(self):
return self.rcv_wnd
def increment_snd_nxt(self):
with self:
self.snd_nxt += 1
def increment_snd_una(self):
with self:
self.snd_una += 1
def increment_rcv_nxt(self):
with self:
self.rcv_nxt += 1
def process_incoming(self, packet, ignore_payload=False):
self.process_ack(packet)
if not ignore_payload:
self.process_payload(packet)
def process_payload(self, packet):
if self.payload_is_accepted(packet):
seq_lo, seq_hi = packet.get_seq_interval()
payload = packet.get_payload()
lower = max(self.rcv_nxt, seq_lo)
upper = min(self.rcv_nxt + self.rcv_wnd, seq_hi)
# Honor RCV_WND by dropping those bytes that go below it
# or beyond it.
effective_payload = payload[lower-seq_lo:upper-seq_lo]
self.in_buffer.add_chunk(lower, effective_payload)
if lower == self.rcv_nxt:
# We should advance rcv_nxt since the lower end of the chunk
# just added matches its old value. The buffer tracks this
# value as data is inserted and removed.
self.rcv_nxt = self.in_buffer.get_last_index()
# Decrease window until data is removed from the buffer.
self.rcv_wnd = self.rcv_wnd - len(effective_payload)
def process_ack(self, packet):
ack_number = packet.get_ack_number()
if self.ack_is_accepted(ack_number):
self.snd_una = ack_number
if self.should_update_window(ack_number):
self.update_window(packet)
def ack_is_accepted(self, ack_number):
# Accept only if SND_UNA < ACK <= SND_NXT
return SequenceNumber.a_lt_b_leq_c(self.snd_una, ack_number,
self.snd_nxt)
def payload_is_accepted(self, packet):
seq_lo, seq_hi = packet.get_seq_interval()
first_byte, last_byte = seq_lo, seq_hi-1
f | irst_ok = SequenceNumber.a_leq_b_leq_c(self.rcv_nxt,
first_byte,
self.rcv_nxt+ | self.rcv_wnd)
last_ok = SequenceNumber.a_leq_b_leq_c(self.rcv_nxt, last_byte,
self.rcv_nxt+self.rcv_wnd)
return last_byte >= first_byte and (first_ok or last_ok)
def should_update_window(self, ack_number):
# TODO: add tests for this.
# RFC 1122, p.94 (correction to RFC 793).
return SequenceNumber.a_leq_b_leq_c(self.snd_una, ack_number,
self.snd_nxt)
def update_window(self, packet):
seq_number = packet.get_seq_number()
ack_number = packet.get_ack_number()
if self.snd_wl1 < seq_number or \
(self.snd_wl1 == seq_number and self.snd_wl2 <= ack_number):
self.snd_wnd = packet.get_window_size()
self.snd_wl1 = seq_number
self.snd_wl2 = ack_number
def usable_window_size(self):
upper_limit = self.snd_una + self.snd_wnd
# If the upper window limit is below SND_NXT, we must return 0.
if SequenceNumber.a_leq_b_leq_c(self.snd_una, self.snd_nxt,
upper_limit):
return upper_limit - self.snd_nxt
else:
# TODO: add test!
return 0
def has_data_to_send(self):
return not self.out_buffer.empty()
def to_out_buffer(self, data):
self.out_buffer.put(data)
def from_in_buffer(self, size):
data = self.in_buffer.get(size)
# Window should grow now, since data has been consumed.
with self:
self.rcv_wnd += len(data)
return data
def extract_from_out_buffer(self, size):
usable_window = self.usable_window_size()
size = min(size, usable_window)
data = self.out_buffer.get(size)
self.snd_nxt += len(data)
return data
def flush_buffers(self):
self.in_buffer.flush()
self.out_buffer.flush()
def __enter__(self, *args, **kwargs):
return self.lock.__enter__(*args, **kwargs)
def __exit__(self, *args, **kwargs):
return self.lock.__exit__(*args, **kwargs) |
plxaye/chromium | src/chrome/test/functional/media_stream_infobar.py | Python | apache-2.0 | 3,390 | 0.00295 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import pyauto_functional
import pyauto
import webrtc_test_base
class MediaStreamInfobarTest(webrtc_test_base.WebrtcTestBase):
"""Performs basic tests on the media stream infobar.
This infobar is used to grant or deny access to WebRTC capabilities for a
webpage. If a page calls the getUserMedia function the infobar will ask the
user if it is OK for the webpage to use the webcam or microphone on the user's
machine. These tests ensure that the infobar works as intended.
"""
def ExtraChromeFlags(self):
"""Adds flags to the Chrome command line."""
extra_flags = ['--enable-media-stream']
return pyauto.PyUITest.ExtraChromeFlags(self) + extra_flags
def testAllowingUserMedia(self):
"""Test that selecting 'accept' gives us a media stream.
When the user clicks allow, the javascript should have the success callback
called with a media stream.
"""
self.assertEquals('ok-got-stream',
self._TestGetUserMedia(with_action='accept'))
def testDenyingUserMedia(self):
"""Tests that selecting 'cancel' actually denies access to user media.
When the user clicks deny in the user media bar, the javascript should have
the error callback called with an error specification instead of the success
callback with a media stream. This is important since the user should be
able to deny the javascript to access the webcam.
"""
# Error 1 = Permission denied
self.assertEquals('failed-with-err | or-1',
self._TestGetUserMedia(with_action='cancel'))
def testDismissingUserMedia(self):
"""Dismiss should be treated just like deny, which is described above."""
# Error 1 = Perm | ission denied
self.assertEquals('failed-with-error-1',
self._TestGetUserMedia(with_action='dismiss'))
def testConsecutiveGetUserMediaCalls(self):
"""Ensures we deal appropriately with several consecutive requests."""
self.assertEquals('failed-with-error-1',
self._TestGetUserMedia(with_action='dismiss'))
self.assertEquals('failed-with-error-1',
self._TestGetUserMedia(with_action='cancel'))
self.assertEquals('ok-got-stream',
self._TestGetUserMedia(with_action='accept'))
self.assertEquals('failed-with-error-1',
self._TestGetUserMedia(with_action='cancel'))
self.assertEquals('ok-got-stream',
self._TestGetUserMedia(with_action='accept'))
self.assertEquals('failed-with-error-1',
self._TestGetUserMedia(with_action='dismiss'))
def _TestGetUserMedia(self, with_action):
"""Runs getUserMedia in the test page and returns the result."""
url = self.GetFileURLForDataPath('webrtc', 'webrtc_jsep01_test.html')
self.NavigateToURL(url)
self.assertEquals('ok-requested', self.ExecuteJavascript(
'getUserMedia("{ audio: true, video: true, }")'))
self.WaitForInfobarCount(1)
self.PerformActionOnInfobar(with_action, infobar_index=0)
self.WaitForGetUserMediaResult(tab_index=0)
return self.GetUserMediaResult(tab_index=0)
if __name__ == '__main__':
pyauto_functional.Main()
|
misterwindupbird/IBO | ego/utils/performance.py | Python | mit | 2,167 | 0.00323 | #!/usr/bin/env python
# encoding: utf-8
# Copyright (C) 2010, 2011 by Eric Brochu
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
performance.py
Created by Eric on 2010-02-15.
"""
import sys
import os
from numpy import *
from scipy.special import erfinv
from ego.utils.optimize import cdirect
class MuBound(object):
def __init__(self, GP, delta):
super(MuBound, self).__init__()
self.GP = GP
self.vscale = sqrt(2) * erfinv(2*delta-1) # probit of delta
def objective(self, x):
"""
negative value of Gdelta for a given point in the GP domain
"""
mu, sig2 = self.GP.posterior(x)
return -(mu + sig2 * self.vscale)
def Gdelta(GP, testfunc, firstY, delta= | 0.01, maxiter=10, **kwargs):
"""
given a GP, find the max and argmax of G_delta, the confidence-bounded
prediction of the max of the response surface
" | ""
assert testfunc.maximize
mb = MuBound(GP, delta)
_, optx = cdirect(mb.objective, testfunc.bounds, maxiter=maxiter, **kwargs)
opt = max(testfunc.f(optx), firstY)
return (opt-firstY) / (-testfunc.minimum-firstY)
|
UnionGospelMission/UGM-Database | Sandbox/SecureDict.py | Python | gpl-3.0 | 1,147 | 0.015693 | def interactiveConsole(a,b=None):
'''
Useful function for debugging
Placing interactiveConsole(locals(),globals()) into code will
drop into an interactive console when run
'''
| import code
d = {}
if b:
d.update(b)
d.update(a)
c=code.InteractiveConsole(locals=d)
c.interact()
class SecureDict(object):
__slots__ = ('__items__',)
def __init__(self, *a, **kw):
self.__items__ = dict(*a, **kw)
| def getItem(self, item, default = None):
if default!=None:
return self.__items__.get(item,default)
try:
return self.__items__[item]
except KeyError:
raise KeyError('Key Error: %s'%item)
def setItem(self, item, value):
self.__items__[item] = value
def __len__(self):
return len(self.__items__)
def __repr__(self):
return 'SecureDict(%r)' % self.__items__
__str__ = __repr__
def keys(self):
return self.__items__.keys()
def values(self):
return self.__items__.values()
def pop(self,key):
return self.__items__.pop(key)
|
JonathanAlvarado/bioclimatica | nom020/nom/forms.py | Python | mit | 1,697 | 0.041937 | #-*- coding: utf-8 -*-
from django import forms
from django.db.models import Q
from nom.models import soluciones, estados, ciudades
class data_form(forms.Form):
state_choices = [ ('0', 'Escoge un estado'), ] + [(edo.id, edo.estado) for edo in estados.objects.all()]
city_choices = [ ('0', 'Escoge una ciudad') ]
#techo_choices = [('', 'Escoge un material'), ] + [(sol.id, sol.nombre) for sol in soluciones.objects.filter(tipo="techo")]
#muro_choices = [('', 'Escoge un material'), ] + [(sol.id, sol.nombre) for sol in soluciones.objects.filter( Q(tipo="muro") | Q(tipo="ventana") | Q(tipo="piso") )]
nfloor_choices = (
('0', 'Pisos de la construcción'), ('1', '1 a 3 pisos'),
('4', 'Más de 3 pisos'),
)
house_part_choices = [ ('0', 'Parte de la casa') ,('techo', 'Techo'),
('ventana', 'Ventana'), ('piso', 'Piso'), ('pared', 'Pared'),
]
ubication_choices = [ ('0', 'Escoge...') ]
material_choices = [ ('0', 'Escoge un material') ]
state = forms.C | hoiceField( choices = state_choices, label = 'Estado' )
city = forms.ChoiceField( choices = city_choices, label = 'Ciudad' )
#nfloor = forms.ChoiceField( choices = nfloor_choices, label = | 'Total de pisos' )
#house_part = forms.ChoiceField( choices = house_part_choices, label = 'Parte de la casa' )
#ubication = forms.ChoiceField( choices = ubication_choices, label = 'Ubicación' )
#material = forms.ChoiceField( choices = material_choices, label = 'Material' )
#area = forms.FloatField(label = "Área", max_value = 9000)
'''
#solutions = [['muroNorte','Ejemplo Muro',20,'Muro Masivo'],['muroNorte','Ejemplo Muro',20,'Muro Masivo']]
#email = forms.EmailField(required=False)
#message = forms.CharField()'''
|
rspavel/spack | var/spack/repos/builtin/packages/libint/package.py | Python | lgpl-2.1 | 7,364 | 0.001358 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack import *
TUNE_VARIANTS = (
'none',
'cp2k-lmax-4',
'cp2k-lmax-5',
'cp2k-lmax-6',
'cp2k-lmax-7',
'molgw-lmax-4',
'molgw-lmax-5',
'molgw-lmax-6',
'molgw-lmax-7',
)
class Libint(AutotoolsPackage):
"""Libint is a high-performance library for computing
Gaussian integrals in quantum mechanics.
"""
homepage = "https://github.com/evaleev/libint"
url = "https://github.com/evaleev/libint/archive/v2.1.0.tar.gz"
version('2.6.0', sha256='4ae47e8f0b5632c3d2a956469a7920896708e9f0e396ec10071b8181e4c8d9fa')
version('2.4.2', sha256='86dff38065e69a3a51d15cfdc638f766044cb87e5c6682d960c14f9847e2eac3')
version('2.4.1', sha256='0513be124563fdbbc7cd3c7043e221df1bda236a037027ba9343429a27db8ce4')
version('2.4.0', sha256='52eb16f065406099dcfaceb12f9a7f7e329c9cfcf6ed9bfacb0cff7431dd6019')
version('2.2.0', sha256='f737d485f33ac819d7f28c6ce303b1f3a2296bfd2c14f7c1323f8c5d370bb0e3')
version('2.1.0', sha256='43c453a1663aa1c55294df89ff9ece3aefc8d1bbba5ea31dbfe71b2d812e24c8')
version('1.1.6', sha256='f201b0c621df678cfe8bdf3990796b8976ff194aba357ae398f2f29b0e2985a6')
version('1.1.5', sha256='ec8cd4a4ba1e1a98230165210c293632372f0e573acd878ed62e5ec6f8b6174b')
variant('fortran', default=False,
description='Build & install Fortran bindings')
variant('tune', default='none', multi=False,
values=TUNE_VARIANTS,
description='Tune libint for use with the given package')
# Build dependencies
depends_on('autoconf@2.52:', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
# Libint 2 dependencies
depends_on('boost', when='@2:')
depends_on('gmp', when='@2:')
for tvariant in TUNE_VARIANTS[1:]:
conf | licts('tune={0}'.format(tvariant), when='@:2.5.99',
msg=('for versions prior to 2.6, tuning for specific'
'codes/configurations is not supported'))
def url_for_version(self, version):
base_url = "https://github.com/evaleev/libint/archive"
if version == Version('1.0.0'):
return "{0}/LIBINT_1_00.tar.gz".format(bas | e_url)
elif version < Version('2.1.0'):
return "{0}/release-{1}.tar.gz".format(base_url, version.dashed)
else:
return "{0}/v{1}.tar.gz".format(base_url, version)
def autoreconf(self, spec, prefix):
libtoolize()
aclocal('-I', 'lib/autoconf')
autoconf()
if '@2.6.0:' in spec:
# skip tarball creation and removal of dir with generated code
filter_file(r'^(export::.*)\s+tgz$', r'\1', 'export/Makefile')
@property
def optflags(self):
flags = '-O2'
# Optimizations for the Intel compiler, suggested by CP2K
# See ../libxc/package.py for rationale and doc.
if '%intel' in self.spec:
flags += ' -xSSE4.2 -axAVX,CORE-AVX2 -ipo'
return flags
def setup_build_environment(self, env):
# Set optimization flags
env.set('CFLAGS', self.optflags)
env.set('CXXFLAGS', self.optflags)
# Change AR to xiar if we compile with Intel and we
# find the executable
if '%intel' in self.spec and which('xiar'):
env.set('AR', 'xiar')
def configure_args(self):
config_args = ['--enable-shared']
optflags = self.optflags
# Optimization flag names have changed in libint 2
if self.version < Version('2.0.0'):
config_args.extend([
'--with-cc-optflags={0}'.format(optflags),
'--with-cxx-optflags={0}'.format(optflags)
])
else:
config_args.extend([
'--with-cxx-optflags={0}'.format(optflags),
'--with-cxxgen-optflags={0}'.format(optflags)
])
# Options required by CP2K, removed in libint 2
if self.version < Version('2.0.0'):
config_args.extend([
'--with-libint-max-am=5',
'--with-libderiv-max-am1=4'
])
if '@2.6.0:' in self.spec:
config_args += ['--with-libint-exportdir=generated']
tune_value = self.spec.variants['tune'].value
if tune_value.startswith('cp2k'):
lmax = int(tune_value.split('-lmax-')[1])
config_args += [
'--enable-eri=1',
'--enable-eri2=1',
'--enable-eri3=1',
'--with-max-am={0}'.format(lmax),
'--with-eri-max-am={0},{1}'.format(lmax, lmax - 1),
'--with-eri2-max-am={0},{1}'.format(lmax + 2, lmax + 1),
'--with-eri3-max-am={0},{1}'.format(lmax + 2, lmax + 1),
'--with-opt-am=3',
# keep code-size at an acceptable limit,
# cf. https://github.com/evaleev/libint/wiki#program-specific-notes:
'--enable-generic-code',
'--disable-unrolling',
]
if tune_value.startswith('molgw'):
lmax = int(tune_value.split('-lmax-')[1])
config_args += [
'--enable-1body=1',
'--enable-eri=0',
'--enable-eri2=0',
'--enable-eri3=0',
'--with-multipole-max-order=0',
'--with-max-am={0}'.format(lmax),
'--with-eri-max-am={0}'.format(lmax),
'--with-eri2-max-am={0}'.format(lmax),
'--with-eri3-max-am={0}'.format(lmax),
'--with-opt-am=2',
'--enable-contracted-ints',
# keep code-size at an acceptable limit,
# cf. https://github.com/evaleev/libint/wiki#program-specific-notes:
'--enable-generic-code',
'--disable-unrolling',
]
return config_args
@property
def build_targets(self):
if '@2.6.0:' in self.spec:
return ['export']
return []
@when('@2.6.0:')
def install(self, spec, prefix):
"""
Starting from libint 2.6.0 we're using the 2-stage build
to get support for the Fortran bindings, required by some
packages (CP2K notably).
"""
# upstream says that using configure/make for the generated code
# is deprecated and one should use CMake, but with the currently
# recent 2.7.0.b1 it still doesn't work
with working_dir(os.path.join(self.build_directory, 'generated')):
# straight from the AutotoolsPackage class:
options = [
'--prefix={0}'.format(prefix),
'--enable-shared',
'--with-cxx-optflags={0}'.format(self.optflags),
]
if '+fortran' in spec:
options += ['--enable-fortran']
configure = Executable('./configure')
configure(*options)
make()
make('install')
|
SANDAG/pandana | pandana/testing.py | Python | agpl-3.0 | 129 | 0 | import os
import pytest
skipiftravis = pytest.mar | k.skipif(
os.environ.get('TRAVIS') == 'true', reason='skip on Travis-CI' | )
|
aglassman/beer | django_beer/django_beer/tests.py | Python | gpl-2.0 | 1,261 | 0.037272 | from django.core.urlresolvers import reverse
from rest_framework import status
from rest_framework.test import force_authenticate
|
from rest_framework.test import APITestCase
from rest_framework.test import APIRequestFactory
from beer_api.models import *
class TestGlassTypePost(APITestCase):
def test_auth_fail(self):
u | rl = '/glass_types/'
data = {
'glass_type': 'Mug',
'description': 'A cup with a handle.'
}
response = self.client.post(url,data,format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(response.data['detail'],
'Authentication credentials were not provided.')
def test_auth_pass(self):
user = User.objects.create_user('test','test@test.com','testpass')
self.client.login(username='test',password='testpass')
url = '/glass_types/'
data = {
'glass_type': 'Mug',
'description': 'A cup with a handle.'
}
response = self.client.post(url,data,format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
response = self.client.get('/glass_types/1/')
self.assertEqual(response.data['glass_type'],data['glass_type'])
self.assertEqual(response.data['description'],data['description'])
|
BennerLab/atg | tests/test_alignment_helper.py | Python | gpl-3.0 | 5,435 | 0.004416 | import pytest
import os
import sys
import shlex
from atg.util import align
BOWTIE2_LOG = os.path.join(os.path.dirname(__file__), 'data', 'bowtie2_SE.log')
KALLISTO_LOG = os.path.join(os.path.dirname(__file__), 'data', 'kallisto_run.json')
class TestSTARCommand:
def test_star_arg_parsing(self):
arguments = shlex.split(' -k -t 16 /directory/genomeIndex output_directory a.fastq b.fastq')
parser = align.STARAligner.get_argument_parser()
star_argument_namespace = parser.parse_args(arguments)
assert star_argument_namespace.index == '/directory/genomeIndex'
assert star_argument_namespace.fastq == ['a.fastq', 'b.fastq']
assert star_argument_namespace.keep_unmapped
assert star_argument_namespace.threads == 16
def test_star_command_construction(self, capsys):
correct_output = 'STAR --genomeDir /directory/genomeIndex --runThreadN 16 ' \
'--readFilesIn a_1.fastq --outFileNamePrefix output_directory/a. --outSAMtype BAM ' \
'SortedByCoordinate --genomeLoad LoadAndKeep --limitBAMsortRAM 50000000000 ' \
'--outBAMsortingBinsN 31\n' \
'STAR --genomeDir /directory/genomeIndex --runThreadN 16 ' \
'--readFilesIn b_1.fastq --outFileNamePrefix output_directory/b. --outSAMtype BAM ' \
'SortedByCoordinate --genomeLoad LoadAndKeep --limitBAMsortRAM 50000000000 ' \
'--outBAMsortingBinsN 31'
arguments = shlex.split('-t 16 -c /directory/genomeIndex output_directory a_1.fastq b_1.fastq')
parser = align.STARAligner.get_argument_parser()
args = parser.parse_args(arguments)
aligner = align.STARAligner()
aligner.align_reads(**vars(args))
captured = capsys.readouterr()
aligner_output = captured.out.strip()
assert aligner_output == correct_output
def test_star_command_construction_gzip(self, capsys):
# self.maxDiff = None
correct_output = 'STAR --genomeDir /directory/genomeIndex --runThreadN 16 ' \
'--readFilesIn a_1.fastq.gz --outFileNamePrefix output_directory/a. --outSAMtype BAM ' \
'SortedByCoordinate --readFilesCommand gunzip -c --genomeLoad LoadAndKeep ' \
'--limitBAMsortRAM 50000000000 --outBAMsortingBinsN 31\n' \
'STAR --genomeDir /directory/genomeIndex --runThreadN 16 ' \
'--readFilesIn b_1.fastq.gz --outFileNamePrefix output_directory/b. --outSAMtype BAM ' \
'SortedByCoordinate --readFilesCommand gunzip -c --genomeLoad LoadAndKeep ' \
'--limitBAMsortRAM 50000000000 --outBAMsortingBinsN 31'
arguments = shlex.split('-c -t 16 /directory/genomeIndex output_directory a_1.fastq.gz b_1.fastq.gz')
parser = align.STARAligner.get_argument_parser()
args = parser.parse_args(arguments)
aligner = align.STARAligner()
aligner.align_reads(**vars(args))
captured = capsys.readouterr()
aligner_output = captured.out.strip()
assert aligner_output == correct_output
class TestBowtie2Command:
def test_log_parsing(self):
log_series = align.Bowtie2Aligner.parse_log(BOWTIE2_LOG)
assert log_series['Total reads'] == 12494316
assert log_series['Unmapped'] == 72860
assert log_series['Uniquely mapped'] == 7693710
assert log_series['Multimapped'] == 4727746
assert log_series[2:5].sum() == log_series['Total reads']
class TestKallistoCommand:
def test_paired_end(self, capsys):
correct_output = 'kallisto quant -i /directory/transcriptome.tdx --bias --rf-stranded -t 8 ' \
'-o output_directory/a a_1_R1.fastq.gz a_1_R2.fastq.gz a_2_R1.fastq.gz a_2_R2.fastq.gz'
arguments = shlex.split('-c -t 8 /directory/transcriptome.tdx output_directory a_1_R1.fastq.gz a_1_R2.fastq.gz '
'a_2_R1.fastq.gz a_2_R2.fastq.gz')
parser = align.KallistoAligner.get_argument_parser()
args = parser.parse_args(arguments)
aligner = align.KallistoAligner()
aligner.align_reads(**vars(args))
captured = capsys.readouterr()
aligner_output = captured.out.strip()
assert aligner_output == correct_output
def test_single_end(self, capsys):
correct_output = 'kallisto quant -i /directory/transcriptome.tdx --bias --rf-stranded -t 8 ' \
'-o output_directory/a a_1_R1.fastq.gz a_2_R1.fastq.gz --single -l 200 -s 30'
arguments = shlex.split('-c -t 8 /directory/transcriptome.tdx output_directory a_1_R1.fastq.gz '
'a_2_R1.fastq.gz')
parser = align.KallistoAligner.get_argument_parser()
args = parser.parse_args(arguments)
aligner = align.KallistoAligner()
aligner.align_reads(**vars(args))
captured = capsys.readouterr()
aligner_output = captured.out.strip()
assert aligner_out | put == correct_output
def test_log_parsing(self):
| log_series = align.KallistoAligner.parse_log(KALLISTO_LOG)
assert log_series['n_unique'] == 16090035
assert log_series['n_processed'] == 44891704
assert log_series['n_pseudoaligned'] == 36521130
|
elba7r/frameworking | frappe/installer.py | Python | mit | 12,017 | 0.026213 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# called from wnf.py
# lib/wnf.py --install [rootpassword] [dbname] [source]
from __future__ import unicode_literals
import os, json, sys, subprocess, shutil
import frappe
import frappe.database
import getpass
import importlib
from frappe.model.db_schema import DbManager
from frappe.model.sync import sync_for
from frappe.utils.fixtures import sync_fixtures
from frappe.website import render
from frappe.desk.doctype.desktop_icon.desktop_icon import sync_from_app
from frappe.utils.password import create_auth_table
from frappe.modules.utils import sync_customizations
def install_db(root_login="root", root_password=None, db_name=None, source_sql=None,
admin_password=None, verbose=True, force=0, site_config=None, reinstall=False):
make_conf(db_name, site_config=site_config)
frappe.flags.in_install_db = True
if reinstall:
frappe.connect(db_name=db_name)
dbman = DbManager(frappe.local.db)
dbman.create_database(db_name)
else:
frappe.local.db = get_root_connection(root_login, root_password)
frappe.local.session = frappe._dict({'user':'Administrator'})
create_database_and_user(force, verbose)
frappe.conf.admin_password = frappe.conf.admin_password or admin_password
frappe.connect(db_name=db_name)
check_if_ready_for_barracuda()
import_db_from_sql(source_sql, verbose)
remove_missing_apps()
create_auth_table()
create_list_settings_table()
frappe.flags.in_install_db = False
def create_database_and_user(force, verbose):
db_name = frappe.local.conf.db_name
dbman = DbManager(frappe.local.db)
if force or (db_name not in dbman.get_database_list()):
dbman.delete_user(db_name)
dbman.drop_database(db_name)
else:
raise Exception("Database %s already exists" % (db_name,))
dbman.create_user(db_name, frappe.conf.db_password)
if verbose: print "Created user %s" % db_name
dbman.create_database(db_name)
if verbose: print "Created database %s" % db_name
dbman.grant_all_privileges(db_name, db_name)
dbman.flush_privileges()
if verbose: print "Granted privileges to user %s and database %s" % (db_name, db_name)
# close root connection
frappe.db.close()
def create_list_settings_table():
frappe.db.sql_ddl("""create table if not exists __ListSettings (
`user` VARCHAR(180) NOT NULL,
`doctype` VARCHAR(180) NOT NULL,
`data` TEXT,
UNIQUE(user, doctype)
) ENGINE=InnoDB DEFAULT CHARSET=utf8""")
def import_db_from_sql(source_sql, verbose):
if verbose: print "Starting database import..."
db_name = frappe.conf.db_name
if not source_sql:
source_sql = os.path.join(os.path.dirname(frappe.__file__), 'data', 'Framework.sql')
DbManager(frappe.local.db).restore_database(db_name, source_sql, db_name, frappe.conf.db_password)
if verbose: print "Imported from database %s" % source_sql
def get_root_connection(root_login='root', root_password=None):
if not frappe.local.flags.root_connection:
if root_login:
if not root_password:
root_password = frappe.conf.get("root_password") or None
if not root_password:
root_password = getpass.getpass("MySQL root password: ")
frappe.local.flags.root_connection = frappe.database.Database(user=root_login, password=root_password)
return frappe.local.flags.root_connection
def install_app(name, verbose=False, set_as_patched=True):
frappe.flags.in_install = name
frappe.clear_cache()
app_hooks = frappe.get_hooks(app_name=name)
installed_apps = frappe.get_installed_apps()
# install pre-requisites
if app_hooks.required_apps:
for app in app_hooks.required_apps:
install_app(app)
frappe.flags.in_install = name
frappe.clear_cache()
if name not in frappe.get_all_apps():
raise Exception("App not in apps.txt")
if name in installed_apps:
frappe.msgprint("App {0} already installed".format(name))
return
print "Installing {0}...".format(name)
if name != "frappe":
frappe.only_for("System Manager")
for before_install in app_hooks.before_install or []:
out = frappe.get_attr(before_install)()
if out==False:
return
if name != "frappe":
add_module_defs(name)
sync_for(name, force=True, sync_everything=True, verbose=verbose, reset_permissions=True)
sync_from_app(name)
add_to_installed_apps(name)
frappe.get_doc('Portal Settings', 'Portal Settings').sync_menu()
if set_as_patched:
set_all_patches_as_completed(name)
for after_install in app_hooks.after_install or []:
frappe.get_attr(after_install)()
sync_fixtures(name)
sync_customizations(name)
frappe.flags.in_install = False
def add_to_installed_apps(app_name, rebuild_website=True):
installed_apps = frappe.get_installed_apps()
if not app_name in installed_apps:
installed_apps.append(app_name)
frappe.db.set_global("installed_apps", json.dumps(installed_apps))
frappe.db.commit()
post_install(rebuild_website)
def remove_from_installed_apps(app_name):
installed_apps = frappe.get_installed_apps()
if app_name in installed_apps:
installed_apps.remove(app_name)
frappe.db.set_global("installed_apps", json.dumps(installed_apps))
frappe.db.commit()
if frappe.flags.in_install:
post_install()
def remove_app(app_name, dry_run=False, yes=False):
"""Delete app and all linked to the app's module with the app."""
if not dry_run and not yes:
confirm = raw_input("All doctypes (including custom), modules related to this app will be deleted. Are you sure you want to continue (y/n) ? ")
if confirm!="y":
return
from frappe.utils.backups import scheduled_backup
print "Backing up..."
scheduled_backup(ignore_files=True)
drop_doctypes = []
# remove modules, doctypes, roles
for module_name in frappe.get_module_list(app_name):
for doctype in frappe.get_list("DocType", filters={"module": module_name},
fields=["name", "issingle"]):
print "removing DocType {0}...".format(doctype.name)
if not dry_run:
frappe.delete_doc("DocType", doctype.name)
if not doctype.issingle:
drop_doctypes.append(doctype.name)
# remove reports, pages and web forms
for doctype in ("Report", "Page", "Web Form"):
for record in frappe.get_list(doctype, filters={"module": module_name}):
print "removing {0} {1}...".format(doctype, record.name)
if not dry_run:
frappe.delete_doc(doctype, record.name)
print "removing Module {0}...".format(module_name)
if not dry_run:
frappe.delete_doc("Module Def", module_name)
# delete desktop icons
frappe.db.sql('delete from `tabDesktop Icon` where app=%s', app_name)
remove_from_installed_apps(app_name)
if not dry_run:
# drop tables after a commit
frappe.db.commit()
for doctype in set(drop_doctypes):
frappe.db.sql("drop table `tab{0}`".format(doctype))
def post_install(rebuild_website=False):
if rebuild_website:
render.clear_cache()
init_singles()
frappe.db.commit()
frappe.clear_cache()
def set_all_patches_as_completed(app):
patch_path = os.path.join(frappe.get_pymodule_path(app), "patches.txt")
if os.path.exists(patch_path):
for patch in frappe.get_file_items(patch_path):
frappe.get_doc({
| "doctype": "Patch Log",
"patch": patch
}).insert(ignore_permissions=True)
frappe.db.commit()
def init_singles():
singles = [single['name'] for single in frappe.get_all("DocType", filters={'issingle': True})]
for single in singles:
if not frappe.db.get_singles_dict(single):
doc = frappe.new_doc(single)
doc.flags.ignore_mandatory=True
doc.flags.ignore_validate=True
doc.save()
def make_conf(db_name=None, db_password=None, site_config= | None):
site = frappe.local.site
make_site_config(db_name, db_password, site_config)
sites_path = frappe.local.sites_path
frappe.destroy()
frappe.init(site, sites_path=sites_path)
def make_site_config(db_name=None, db_password=None, site_config=None):
frappe.create_folder(os.path.join(frappe.local.site_path))
site_file = get_site_config_path()
if not os.path.exists(site_file):
if not (site_config and isinstance(site_config, dict)):
site_config = get_conf_params(db_name, db_password)
with open(site_file, "w") as f:
f.write(json.dumps(site_config, indent=1, sort_keys=True))
def update_site_config(key, val |
nlucent/flansible | Flansible/flansible/__init__.py | Python | mit | 2,810 | 0.005338 | import platform
import os
import time
import sys
import json
from threading import Thread
from subprocess import Popen, PIPE
import subprocess
from Queue import Queue, Empty
from datetime import datetime
from ConfigParser import SafeConfigParser
from flask import render_template
from flask import Flask, request, render_template, session, flash, redirect, url_for, jsonify
from flask_httpauth import HTTPBasicAuth
from flask_restful import Resource, Api, reqparse, fields
from flask_restful_swagger import swagger
import celery.events.state
from celery import Celery
from ModelClasses import AnsibleCommandModel, AnsiblePlaybookModel, AnsibleRequestResultModel, AnsibleExtraArgsModel
#Setup queue for celery
io_q = Queue()
app = Flask(__name__)
auth = HTTPBasicAuth()
this_path = sys.path[0]
config = SafeConfigParser()
config.read('config.ini')
ansible_config = SafeConfigParser()
try:
ansible_config.read('/etc/ansible/ansible.cfg')
ansible_default_inventory = config.get("Defaults", "inventory")
except:
ansible_default_inventory = '/etc/ansible/hosts'
app.config['broker_url'] = config.get("Default", "CELERY_BROKER_URL")
app.config['result_backend'] = config.get("Default", "CELERY_RESULT_BACKEND")
str_task_timeout = config.get("Default", "CELERY_TASK_TIMEOUT")
playbook_root = config.get("Default", "playbook_ro | ot")
playbook_filter = config.get("Default", "playbook_filter")
task_timeout = int(str_task_timeout)
api = swagger.docs(Api(app), apiVersion='0.1')
celery = Celery(app.name, broker=app.config['broker_url'], backend=app.config['result_backend'])
celery.control.time_limit('do_long_running_task', soft=900, hard=900, reply=True)
celery.conf.update(app.config)
inventory_access = []
def get_inventory_access(username, i | nventory):
if username == "admin":
return True
result = False
with open("rbac.json") as rbac_file:
rbac_data = json.load(rbac_file)
user_list = rbac_data['rbac']
for user in user_list:
if user['user'] == username:
inventory_list = user['inventories']
if inventory in inventory_list:
result = True
return result
@auth.verify_password
def verify_password(username, password):
result = False
with open("rbac.json") as rbac_file:
rbac_data = json.load(rbac_file)
user_list = rbac_data['rbac']
for user in user_list:
if user['user'] == username:
if user['password'] == password:
result = True
inventory_access = user['inventories']
return result
#routes
import flansible.run_ansible_command
import flansible.run_ansible_playbook
import flansible.ansible_task_output
import flansible.ansible_task_status
import flansible.git
import flansible.list_playbooks
import flansible.front_end
|
andreasscalas/dappforum | samples/vote.py | Python | mit | 734 | 0.023161 | #!/usr/bin/python3
# Copyright (c) 2015 Davide Gessa
# Distr | ibuted under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from libcontractvm import Wallet, WalletExplorer, ConsensusManager
from forum import ForumManager
import sys
import time
consMan = ConsensusManager.ConsensusManager ()
consMan.bootstrap ("http://127.0.0.1:8181")
wallet = WalletExplorer.WalletExplorer (wallet_file='test.wallet')
srMan = ForumManager.ForumManager (consMan, wallet=wallet)
while True:
pollid = input ('Insert t | he id of the poll: ')
choice = input ('Insert the choice for the poll: ')
try:
print ('Broadcasted:', srMan.vote (pollid, choice))
except:
print ('Error.')
|
cuescience/cuescience-shop | shop/tests/support/model_support.py | Python | mit | 1,689 | 0.007697 | from cuescience_shop.models import Client, Address, Order
from natspec_utils.decorators import TextSyntax
from cart.cart import Cart
from django.test.client import Client as TestClient
class ClientTestSupport(object):
def __init__(self, test_case):
self.test_case = test_case
self.client = TestClient()
@TextSyntax("Create address #1 #2 #3 #4", types=["str", "str", "str", "str"], return_type="Address")
def create_address(self, street, number, postcode, city):
address = Address(street=street, number=number, postcode=postcode, city=city)
address.save()
return address
@TextSyntax("Create client #1 #2", types=["str", "str", "Address"], return_type="Client")
def create_client(self | , first_name, last_name, address):
client = Client(first_name=first_name, last_name=last_name, shipping_address=address, billing_address=address)
client.save()
return client
@TextSyntax("Create order", types=["Client"], return_type="Order")
def create_order(self, client):
cart = Cart(self.client)
cart.create_cart()
cart = cart.cart
order = Order(client=client, cart=cart)
order.save()
return order
@TextSyntax(" | Assert client number is #1", types=["str", "Client"])
def assert_client_number(self, client_number, client):
self.test_case.assertEqual(client_number, client.client_number)
@TextSyntax("Assert order number is #1", types=["str", "Order"])
def assert_order_number(self, order_number, order):
self.test_case.assertEqual(order_number, order.order_number)
|
tigeral/polygon | python/code_troopers/model/ActionType.py | Python | unlicense | 217 | 0.004608 | class ActionType:
END_TURN = 0
MOVE = 1
SHOOT = 2
RAISE_STANCE = 3
LOWER_STANCE = 4
THROW_GRENADE = 5
USE | _MEDIKIT = 6
EAT_FIELD_RATION = 7
HEAL | = 8
REQUEST_ENEMY_DISPOSITION = 9 |
McIntyre-Lab/papers | fear_sem_sd_2015/scripts/ggm_graph_network_neighborhoods.py | Python | lgpl-3.0 | 7,295 | 0.006717 | #!/usr/bin/env python
import os
import logging
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import pickle
def setLogger(fname,loglevel):
""" Function to handle error logging """
logging.basicConfig(filename=fname, filemode='w', level=loglevel, format='%(asctime)s - %(levelname)s - %(message)s')
def readData(fname):
""" Importing a large DOT file is slow. This function will read a pickle
file if available. If no pickle, then read DOT and create a pickle for next
time. """
pname = os.path.splitext(fname)[0] + ".gpickle"
try:
# If there is a pickle, unpickle it
logging.info("Unpickling file")
nxGraph = nx.Graph(nx.read_gpickle(pname))
except:
logging.info("No Pickled file, will import DOT")
try:
# No pickle, try the dot file
logging.info("Importing dot file")
nxGraph = nx.Graph(nx.read_dot(fname))
# Make pickle for next time
logging.info("Pickle graph for later use.")
nx.write_gpickle(nxGraph,pname)
except Exception:
logging.exception("Please provide a DOT formated file.")
return(nxGraph)
def getNeighbors(nxGraph,target,geneList):
""" Search the primary and secondary neighborhoods. Return neighbors and counts. """
# Pull primary neighbor from target
primary = nxGraph.neighbors(target)
# Pull secondary neighbors
secondary = list()
for target2 in primary:
secondary.extend(nxGraph.neighbors(target2))
# Calculate the number of primary and secondary neighbors that are in my
# gene list of interest.
primSet = set(primary)
secSet = set(secondary)
sexPrim = primSet & set(geneList)
sexSec = secSet & set(geneList)
numPrimary = len(primSet)
numSecondary = len(secSet)
numPrimEnrich = len(sexPrim)
numSecEnrich = len(sexSec)
numEnrich = numPrimEnrich + numSecEnrich
return(primSet, secSet, sexPrim, sexSec, numPrimary, numSecondary, numPrimEnrich, numSecEnrich, numEnrich)
def getFigName(target, odir):
""" Create an output name for creating all of the neighborhood subgraphs """
figName = target + "_ggm_subgraph.png"
oname = os.path.join(odir, figName)
return(oname)
def graphSubplot(nxGraph, target, primary, secondary, geneList, spliceList, oname):
""" Plot primary and secondary neighborhood subgraph """
subsetList = [target] + list(primary) + list(secondary & set(geneList))
nxSubGraph = nx.Graph(nx.subgraph(nxGraph, subsetList))
labelDict = dict()
colorList = list()
sizeList = list()
for gene in nxSubGraph.nodes_iter():
if gene in spliceList:
# If gene is a splicing factor color it purple if FG otherwise color royal blue
if gene == target:
labelDict[gene] = gene
sizeList.append(3000)
colorList.append('#d000ff')
elif gene in geneList:
labelDict[gene] = gene
sizeList.append(3000)
colorList.append('#007ffd')
elif gene in geneList:
# If gene is in Sex Det color it red if FG otherwise color light blue
if gene == target:
labelDict[gene] = gene
sizeList.append(3000)
colorList.append('red')
else:
labelDict[gene] = gene
sizeList.append(3000)
colorList.append('#aeeeee')
else:
# If gene is not in Sex Det make it small and color it green if 'CG7705' otherwise color grey
if gene == "CG7705":
labelDict[gene] = ''
sizeList.append(300)
colorList.append('#00f554')
else:
labelDict[gene] = ''
sizeList.append(300)
colorList.append('grey')
# Generate layout using force spring algorithm
pos = nx.spring_layout(nxSubGraph, iterations=200, k=.2)
# Plot Subgraph
fig = plt.figure(figsize=(20,20),dpi=300)
nx.draw_networkx_nodes(nxSubGraph,pos,node_size=sizeList,node_color=colorList)
nx.draw_networkx_labels(nxSubGraph,pos,labelDict)
nx.draw_networkx_edges(nxSubGraph,pos)
plt.axis('off')
fig.savefig(oname, format='png', bbox_inches='tight')
plt.close()
def writeHeader(handle):
""" Write a header on to the csv file """
handle.write("gene,primary,secondary,sexPrimary, sexSecondary, num_primary, num_secondary, num_primaryEnrich, num_secondaryEnrich, num_enrich\n")
def writeOutput(gene, primary, secondary, sexPrim, sexSec, numPrimary, numSecondary, numPrimEnrich, numSecEnrich, numEnrich, handle):
" | "" Write output CSV """
prim = '|'.join([str(x) for x in primary])
sec = '|'.join([str(x) | for x in secondary])
pSet = '|'.join([str(x) for x in sexPrim])
sSet = '|'.join([str(x) for x in sexSec])
handle.write("{0},{1},{2},{3},{4},{5},{6},{7},{8},{9}\n".format(gene, prim, sec, pSet, sSet, numPrimary, numSecondary, numPrimEnrich, numSecEnrich, numEnrich))
if __name__ == "__main__":
lname = '/home/jfear/mclab/cegs_sem_sd_paper/analysis_output/ggm/dsrp_ggm_isoforms_FDR2_neighbor_table.log'
dname = '/home/jfear/mclab/cegs_sem_sd_paper/analysis_output/ggm/dsrp_ggm_isoforms_FDR2.dot'
oname = '/home/jfear/mclab/cegs_sem_sd_paper/analysis_output/ggm/dsrp_ggm_isoforms_FDR2_neighbor_table.csv'
odir = '/home/jfear/mclab/cegs_sem_sd_paper/analysis_output/ggm/subgraphs'
# Turn on Logging if option --log was given
setLogger(lname,logging.INFO)
# Import Dot File
mygraph = readData(dname)
# Create gene list by pulling all genes that don't start with 'CG'
logging.info("Creating gene list")
geneList = [x for x in mygraph.nodes_iter(data=False) if not x.startswith('CG')]
# I am wanting to highlight splicing factors in my output graph
logging.info("Creating splicing factor list")
splicingFactors = ['vir', 'Rbp1_PD', 'Rbp1_PA', 'B52_PI', 'B52_PK',
'B52_PM', 'B52_PN', 'B52_PO', 'B52_PA', 'B52_PB', 'B52_PC',
'B52_PD', 'B52_PF', 'sqd_PE', 'sqd_PD', 'sqd_PA', 'sqd_PC',
'sqd_PB', 'Psi', 'mub_PL', 'mub_PK', 'mub_PG', 'mub_PA', 'Rm62',
'snf', 'mub_PM', 'mub_PI', 'mub_PH', 'mub_PE', 'mub_PD', 'mub_PF',
'mub_PC', 'Spf45_PA', 'ps', 'Spf45_PC', 'Spf45_PB']
# Explore the nieghborhood and make CSV table and subplots
logging.info("Finding neighbors and writing output")
with open(oname, 'w') as OUT:
writeHeader(OUT)
# Iterate through all genes in Sex Det
for gene in geneList:
fName = getFigName(gene, odir)
# Calculate primary and secondary nearest neighbors
primary, secondary, sexPrim, sexSec, numPrimary, numSecondary, numPrimEnrich, numSecEnrich, numEnrich = getNeighbors(mygraph, gene, geneList)
writeOutput(gene, primary, secondary, sexPrim, sexSec, numPrimary, numSecondary, numPrimEnrich, numSecEnrich, numEnrich, OUT)
# Create neighborhood subplots so neighborhoods can be visuzlized
graphSubplot(mygraph, gene, primary, secondary, geneList, splicingFactors, fName)
logging.info("Script Complete")
|
WZQ1397/automatic-repo | python/modules/ForgeryPyGenerateVirtualData.py | Python | lgpl-3.0 | 706 | 0.048159 | from forgery_py import *
for x in range(20):
randPerson=name.first_name(),name.last_name(),personal.gender(),name.location(),address.phone()
randCV=lorem_ipsum.title(),lorem_ipsum.sentence()
randAddr=address.city(),address.state(),address.country(),address.continent()
randEmail=internet.email_address( | )
randColor=basic.hex_color()
randComment=basic.text(200)
randDate=date.date()
print("name: {}\n gender: {}\n home: {}\n phone: {}\n email: {}".
format(randPerson[:2],randPerson[2],randPerson[3],randPerson[4],randEmail))
print(f" CV: {randCV}")
print(f" favourite color: {randColor}")
print(f" comment: {randComment}")
print("handout date: {:#^50s}".format(str(randDate) | )) |
josircg/raizcidadanista | raizcidadanista/filebrowser/views.py | Python | gpl-3.0 | 127 | 0.015748 | # As | of version 3.4, all views moved to FileBrowserSite class in filebrowser.sites
# This file wil | l be removed with version 3.6 |
rodrigoasmacedo/PySPED | pysped/cte/__init__.py | Python | lgpl-2.1 | 3,165 | 0 | # -*- coding: utf-8 -*-
#
# PySPED - Python libraries to deal with Brazil's SPED Project
#
# Copyright (C) 2010-2012
# Copyright (C) Aristides Caldeira <aristides.caldeira at tauga.com.br>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Library General Public License as
# published by the Free Software Foundation, either version 2.1 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# PySPED - Bibliotecas Python para o
# SPED - Sistema Público de Escrituração Digital
#
# Copyright (C) 2010-2012
# Copyright (C) Aristides Calde | ira <aristides.caldeira arroba tauga.com.br>
#
# E | ste programa é um software livre: você pode redistribuir e/ou modificar
# este programa sob os termos da licença GNU Library General Public License,
# publicada pela Free Software Foundation, em sua versão 2.1 ou, de acordo
# com sua opção, qualquer versão posterior.
#
# Este programa é distribuido na esperança de que venha a ser útil,
# porém SEM QUAISQUER GARANTIAS, nem mesmo a garantia implícita de
# COMERCIABILIDADE ou ADEQUAÇÃO A UMA FINALIDADE ESPECÍFICA. Veja a
# GNU Library General Public License para mais detalhes.
#
# Você deve ter recebido uma cópia da GNU Library General Public License
# juntamente com este programa. Caso esse não seja o caso, acesse:
# <http://www.gnu.org/licenses/>
#
from .leiaute import (CTe_104,
CancCTe_104,
ConsReciCTe_104,
ConsSitCTe_104,
ConsStatServCTe_104,
Dup_104,
EnviCTe_104,
InfNF_104,
InfNFe_104,
InfOutros_104,
InfQ_104,
InutCTe_104,
ObsCont_104,
ObsFisco_104,
Pass_104,
ProcCTe_104,
ProcCancCTe_104,
ProcInutCTe_104,
ProtCTe_104,
RetCancCTe_104,
RetConsReciCTe_104,
RetConsSitCTe_104,
RetConsStatServCTe_104,
RetEnviCTe_104,
RetInutCTe_104,
SOAPEnvio_104,
SOAPRetorno_104)
# Pyflakes
CTe_104
CancCTe_104
ConsReciCTe_104
ConsSitCTe_104
ConsStatServCTe_104
Dup_104
EnviCTe_104
InfNF_104
InfNFe_104
InfOutros_104
InfQ_104
InutCTe_104
ObsCont_104
ObsFisco_104
Pass_104
ProcCTe_104
ProcCancCTe_104
ProcInutCTe_104
ProtCTe_104
RetCancCTe_104
RetConsReciCTe_104
RetConsSitCTe_104
RetConsStatServCTe_104
RetEnviCTe_104
RetInutCTe_104
SOAPEnvio_104
SOAPRetorno_104
|
ambitioninc/django-smart-manager | smart_manager/base.py | Python | mit | 1,637 | 0.004276 | from copy import deepcopy
from manager_utils import upsert
class BaseSmartManager(object):
def __init__(self, template):
self._template = deepcopy(template)
self._built_objs = set()
@property
def built_objs(self):
return self._built_objs
def build_obj(self, model_class, is_deletable=True, updates=None, defaults=None, **kwargs):
"""
Builds an object using the upsert function in manager utils. If the object can be deleted
by the smart manager, it is added to the internal _built_objs list and returned.
"""
built_obj = upsert(model_class.objects, updates=updates, defaults=defaults, **kwargs)[0]
if is_deletable:
self._built_objs |= set([built_obj])
return built_obj
def build_using(self, smart_manager_class, template):
"""
| Builds objects using another builder and a template. Adds the resulting built objects
from that builder to the built objects of this builder.
"""
smart_manager = smart_manager_class(template)
built_objs = smart_manager.build()
self._built_objs |= | smart_manager.built_objs
# make sure build objs is a list or tuple
if type(built_objs) not in (list, tuple,):
built_objs = [built_objs]
return built_objs
def build(self):
"""
All builders must implement the build function, which returns the built object. All build
functions must also maintain an interal list of built objects, which are accessed by
self.built_objs.
"""
raise NotImplementedError
|
mornelon/QtCreator_compliments | tests/system/suite_APTW/tst_APTW01/test.py | Python | lgpl-2.1 | 449 | 0.004454 | source("../. | ./shared/qtcreator.py")
source("../../shared/suites_qtta.py")
source("../shared/aptw.py")
# test New Qt Gui Application build and run for release and debug option
def main():
startApplication("qtcreator" + SettingsPath)
checkedTargets = createProject_Qt_GUI | (tempDir(), "SampleApp")
# run project for debug and release and verify results
runVerify(checkedTargets)
#close Qt Creator
invokeMenuItem("File", "Exit")
|
google/intellij-community | python/helpers/pydev/_pydevd_bundle/pydevd_constants.py | Python | apache-2.0 | 15,389 | 0.004809 | '''
This module holds the constants used for specifying the states of the debugger.
'''
from __future__ import nested_scopes
import platform
import sys # Note: the sys import must be here anyways (others depend on it)
STATE_RUN = 1
STATE_SUSPEND = 2
PYTHON_SUSPEND = 1
DJANGO_SUSPEND = 2
JINJA2_SUSPEND = 3
JUPYTER_SUSPEND = 4
class DebugInfoHolder:
# we have to put it here because it can be set through the command line (so, the
# already imported references would not have it).
DEBUG_RECORD_SOCKET_READS = False
DEBUG_TRACE_LEVEL = -1
DEBUG_TRACE_BREAKPOINTS = -1
IS_CPYTHON = platform.python_implementation() == 'CPython'
# Hold a reference to the original _getframe (because psyco will change that as soon as it's imported)
IS_IRONPYTHON = sys.platform == 'cli'
try:
get_frame = sys._getframe
if IS_IRONPYTHON:
def get_frame():
try:
return sys._getframe()
except ValueError:
pass
except AttributeError:
def get_frame():
raise AssertionError('sys._getframe not available (possible causes: enable -X:Frames on IronPython?)')
# Used to determine the maximum size of each variable passed to eclipse -- having a big value here may make
# the communication slower -- as the variables are being gathered lazily in the latest version of eclipse,
# this value was raised from 200 to 1000.
MAXIMUM_VARIABLE_REPRESENTATION_SIZE = 1000
# Prefix for saving functions return values in locals
RETURN_VALUES_DICT = '__pydevd_ret_val_dict'
original_excepthook = sys.__excepthook__
def dummy_excepthook(exctype, value, traceback):
return None
import os
from _pydevd_bundle import pydevd_vm_type
# Constant detects when running on Jython/windows properly later on.
IS_WINDOWS = sys.platform == 'win32'
IS_JYTHON = pydevd_vm_type.get_vm_type() == pydevd_vm_type.PydevdVmType.JYTHON
IS_JYTH_LESS25 = False
if IS_JYTHON:
import java.lang.System # @UnresolvedImport
IS_WINDOWS = java.lang.System.getProperty("os.name").lower().startswith("windows")
if sys.version_info[0] == 2 and sys.version_info[1] < 5:
IS_JYTH_LESS25 = True
elif IS_IRONPYTHON:
import System
IS_WINDOWS = "windows" in System.Environment.OSVersion.VersionString.lower()
IS_64BIT_PROCESS = sys.maxsize > (2 ** 32)
IS_LINUX = sys.platform.startswith('linux')
IS_MACOS = sys.platform == 'darwin'
IS_PYTHON_STACKLESS = "stackless" in sys.version.lower()
CYTHON_SUPPORTED = False
NUMPY_NUMERIC_TYPES = "biufc"
NUMPY_FLOATING_POINT_TYPES = "fc"
# b boolean
# i signed integer
# u unsigned integer
# f floating-point
# c complex floating-point
try:
| import platform
python_implementation = platform.python_implementation()
except:
pass
else:
if python_implementation == 'CPython' and not IS_PYTHON_STACKLESS: |
# Only available for CPython!
if (
(sys.version_info[0] == 2 and sys.version_info[1] >= 7)
or (sys.version_info[0] == 3 and sys.version_info[1] >= 5)
or (sys.version_info[0] > 3)
):
# Supported in 2.7 or 3.5 onwards (32 or 64)
CYTHON_SUPPORTED = True
IS_PYCHARM_ATTACH = os.getenv('PYCHARM_ATTACH') == 'True'
#=======================================================================================================================
# Python 3?
#=======================================================================================================================
IS_PY3K = False
IS_PY34_OR_GREATER = False
IS_PY36_OR_GREATER = False
IS_PY37_OR_GREATER = False
IS_PY36_OR_LESSER = False
IS_PY38_OR_GREATER = False
IS_PY2 = True
IS_PY27 = False
IS_PY24 = False
try:
if sys.version_info[0] >= 3:
IS_PY3K = True
IS_PY2 = False
IS_PY34_OR_GREATER = sys.version_info >= (3, 4)
IS_PY36_OR_GREATER = sys.version_info >= (3, 6)
IS_PY37_OR_GREATER = sys.version_info >= (3, 7)
IS_PY36_OR_LESSER = sys.version_info[:2] <= (3, 6)
IS_PY38_OR_GREATER = sys.version_info >= (3, 8)
IS_PY39_OR_GREATER = sys.version_info >= (3, 9)
elif sys.version_info[0] == 2 and sys.version_info[1] == 7:
IS_PY27 = True
elif sys.version_info[0] == 2 and sys.version_info[1] == 4:
IS_PY24 = True
except AttributeError:
pass # Not all versions have sys.version_info
try:
SUPPORT_GEVENT = os.getenv('GEVENT_SUPPORT', 'False') == 'True'
except:
# Jython 2.1 doesn't accept that construct
SUPPORT_GEVENT = False
try:
DROP_INTO_DEBUGGER_ON_FAILED_TESTS = os.environ.get('DROP_INTO_DEBUGGER_ON_FAILED_TESTS', 'False') == 'True'
except:
DROP_INTO_DEBUGGER_ON_FAILED_TESTS = False
# At the moment gevent supports Python >= 2.6 and Python >= 3.3
USE_LIB_COPY = SUPPORT_GEVENT and \
((not IS_PY3K and sys.version_info[1] >= 6) or
(IS_PY3K and sys.version_info[1] >= 3))
class ValuesPolicy:
SYNC = 0
ASYNC = 1
ON_DEMAND = 2
LOAD_VALUES_POLICY = ValuesPolicy.SYNC
if os.getenv('PYDEVD_LOAD_VALUES_ASYNC', 'False') == 'True':
LOAD_VALUES_POLICY = ValuesPolicy.ASYNC
if os.getenv('PYDEVD_LOAD_VALUES_ON_DEMAND', 'False') == 'True':
LOAD_VALUES_POLICY = ValuesPolicy.ON_DEMAND
DEFAULT_VALUES_DICT = {ValuesPolicy.ASYNC: "__pydevd_value_async", ValuesPolicy.ON_DEMAND: "__pydevd_value_on_demand"}
INTERACTIVE_MODE_AVAILABLE = sys.platform in ('darwin', 'win32') or os.getenv('DISPLAY') is not None
IS_PYCHARM = True
ASYNC_EVAL_TIMEOUT_SEC = 60
NEXT_VALUE_SEPARATOR = "__pydev_val__"
BUILTINS_MODULE_NAME = '__builtin__' if IS_PY2 else 'builtins'
SHOW_DEBUG_INFO_ENV = os.getenv('PYCHARM_DEBUG') == 'True' or os.getenv('PYDEV_DEBUG') == 'True'
# If True, CMD_SET_NEXT_STATEMENT and CMD_RUN_TO_LINE commands have responses indicating success or failure.
GOTO_HAS_RESPONSE = IS_PYCHARM
if SHOW_DEBUG_INFO_ENV:
# show debug info before the debugger start
DebugInfoHolder.DEBUG_RECORD_SOCKET_READS = True
DebugInfoHolder.DEBUG_TRACE_LEVEL = 3
DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS = 1
def protect_libraries_from_patching():
"""
In this function we delete some modules from `sys.modules` dictionary and import them again inside
`_pydev_saved_modules` in order to save their original copies there. After that we can use these
saved modules within the debugger to protect them from patching by external libraries (e.g. gevent).
"""
patched = ['threading', 'thread', '_thread', 'time', 'socket', 'Queue', 'queue', 'select',
'xmlrpclib', 'SimpleXMLRPCServer', 'BaseHTTPServer', 'SocketServer',
'xmlrpc.client', 'xmlrpc.server', 'http.server', 'socketserver']
for name in patched:
try:
__import__(name)
except:
pass
patched_modules = dict([(k, v) for k, v in sys.modules.items()
if k in patched])
for name in patched_modules:
del sys.modules[name]
# import for side effects
import _pydev_imps._pydev_saved_modules
for name in patched_modules:
sys.modules[name] = patched_modules[name]
if USE_LIB_COPY:
protect_libraries_from_patching()
from _pydev_imps._pydev_saved_modules import thread
_thread_id_lock = thread.allocate_lock()
thread_get_ident = thread.get_ident
if IS_PY3K:
def dict_keys(d):
return list(d.keys())
def dict_values(d):
return list(d.values())
dict_iter_values = dict.values
def dict_iter_items(d):
return d.items()
def dict_items(d):
return list(d.items())
else:
def dict_keys(d):
return d.keys()
try:
dict_iter_values = dict.itervalues
except:
try:
dict_iter_values = dict.values # Older versions don't have the itervalues
except:
def dict_iter_values(d):
return d.values()
try:
dict_values = dict.values
except:
def dict_values(d):
return d.values()
def dict_iter_items(d):
try:
return d.iteritems()
except:
return d.items()
def dict_items(d):
return d.items()
try:
xrange = xran |
foxscotch/advent-of-code | 2020/19/p1.py | Python | mit | 2,106 | 0.00095 | # Python 3.8.3
import re
class Rule:
PATTERN = re.compile(r"(\d+): (\d+|\"[ab]\")( \d+)?(?: \| (\d+)( \d+)?)?")
RULES = {}
def __init__(self, string):
match = self.PATTERN.match(string)
self.id = int(match.group(1))
self.RULES[self.id] = self
if match.group(2).isnumeric():
self.letter = None
self.subrule1 = int(match.group(2) or -1), int(match.group(3) or -1)
self.subrule2 = int(match.group(4) or -1), int(match.group(5) or -1)
else:
self.letter = match.group(2).strip('"')
self.subrule1 = None
self.subrule2 = None
def construct_pattern(self):
if self.letter:
return self.letter
sub1_cur, sub1_next = self.subrule1
sub2_cur, sub2_next = self.subrule2
left = self.RULES[sub1_cur].construct_pattern()
if sub1_next >= 0:
left += self.RULES[sub1_next].construct_pattern()
right = ""
if sub2_cur >= 0:
right += self.RULES[sub2_cur].construct_pattern()
if sub2_next >= 0:
right += self.RULES[sub2_next].construct_pattern()
return f"({left}|{right})" if right else left
def __repr__(self):
if self.letter:
return "Rule(\"{}: '{}'\")".format(self.id, self.letter)
return 'Rule("{}: {} {} | {} {}")'.format(
self.id,
self.subrule1[0],
self.subrule1[1],
self.subrule2[0],
self.subrule2[1],
)
def get_input():
with open("input.txt", "r") as f:
rules, messages | = f.read().split("\n\n")
yield [Rule(s) for s in rules.splitlines()]
yield messages.splitlines()
def main():
rules, messages = get_input()
pattern = re.compile(Rule.RULES[0].construct_pattern())
total = 0
for message in messages:
if pattern.fullmatch(message):
total += 1 |
return total
if __name__ == "__main__":
import time
start = time.perf_counter()
print(main())
print(time.perf_counter() - start)
|
dailin/wesnoth | scons/boost.py | Python | gpl-2.0 | 6,468 | 0.008349 | # vi: syntax=python:et:ts=4
from config_check_utils import find_include
from os.path import join, dirname, basename
from glob import glob
import re
def find_boost(env):
prefixes = [env["prefix"], "C:\\Boost"]
crosscompile = env["host"]
include = find_include(prefixes, "boost/config.hpp", default_prefixes=not crosscompile)
if include:
prefix, includefile = include[0]
env["boostdir"] = join(prefix, "include")
env["boostlibdir"] = join(prefix, "lib")
if not env.get("boost_suffix"):
for libdir in ["lib", "lib64"]:
if glob(join(prefix, libdir, "libboost_*-mt.*")):
env["boost_suffix"] = "-mt"
env["boostlibdir"] = join(prefix, libdir)
break
else:
env["boost_suffix"] = ""
return
includes = find_include(prefixes, "boost/config.hpp", include_subdir="include/boost-*")
if includes:
versions = []
for prefix, includefile in includes:
try:
versions.append(map(int, re.findall(r"^boost-(\d*)_(\d*)$", basename(dirname(dirname(includefile))))[0]))
except IndexError:
versions.append((0,0))
version_nums = map(lambda (major, minor): 100000 * major + 100 * minor, versions)
include_index = version_nums.index(max(version_nums))
prefix, includefile = includes[include_index]
version = versions[include_index]
env["boostdir"] = join(prefix, "include", "boost-" + str(version[0]) + "_" + str(version[1]))
env["boostlibdir"] = join(prefix, "lib")
if not env.get("boost_suffix"):
libs = glob(join(prefix, "lib", "libboost_*"))
for lib in libs:
try:
env["boost_suffix"] = re.findall(r"libboost_\w*(-.*%d_%d)" % tuple(version), lib)[0]
break
except:
pass
def CheckBoost(context, boost_lib, require_version = None, header_only = False):
env = context.env
if require_version:
context.Message("Checking for Boost %s library version >= %s... " % (boost_lib, require_version))
else:
context.Message("Checking for Boost %s library... " % boost_lib)
if not env.get("boostdir", "") and not env.get("boostlibdir", ""):
find_boost(env)
boostdir = env.get("boostdir", "")
boostlibdir = env.get("boostlibdir", "")
backup = env.Clone().Dictionary()
boost_headers = { "regex" : "regex/config.hpp",
"iostreams" : "iostreams/constants.hpp",
"unit_test_framework" : "test/unit_test.hpp",
"filesystem" : "filesystem/operations.hpp",
"system" : "system/error_code.hpp"}
header_name = boost_headers.get(boost_lib, boost_lib + ".hpp")
libname = "boost_" + boost_lib + env.get("boost_suffix", "")
if env["fast"]:
env.AppendUnique(CXXFLAGS = "-I" + boostdir, LIBPATH = [boostlibdir])
else:
env.AppendUnique(CPPPATH = [boostdir], LIBPATH = [boostlibdir])
if not header_only:
env.AppendUnique(LIBS = [libname])
if boost_lib == "thread" and env["PLATFORM"] == "posix":
env.AppendUnique(CCFLAGS = ["-pthread"], LINKFLAGS = ["-pthread"])
test_program = """
#include <boost/%s>
\n""" % header_name
if require_version:
version = require_version.split(".", 2)
major = int(version[0])
minor = int(version[1])
try:
sub_minor = int(version[2])
except (ValueError, IndexError):
sub_minor = 0
test_program += "#include <boost/version.hpp>\n"
test_program += \
"#if BOOST_VERSION < %d\n#error Boost version is too old!\n#endif\n" \
% (major * 100000 + minor * 100 + sub_minor)
if boost_lib == "unit_test_framework":
test_program += """
boost::unit_test::test_suite* init_unit_test_suite ( int, char | **)
{
}
\n"""
test_program += """
int main()
{
}
\n"""
if context.TryLink(test_program, ".cpp"):
context.Result("yes")
return True
else:
context.Result("no")
env.Replace(**backup)
return False
def CheckBoos | tIostreamsGZip(context):
env = context.env
backup = env.Clone().Dictionary()
context.Message("Checking for gzip support in Boost Iostreams... ")
test_program = """
#include <boost/iostreams/filtering_stream.hpp>
#include <boost/iostreams/filter/gzip.hpp>
int main()
{
boost::iostreams::filtering_stream<boost::iostreams::output> filter;
filter.push(boost::iostreams::gzip_compressor(boost::iostreams::gzip_params()));
}
\n"""
for zlib in ["", "z"]:
env.Append(LIBS = [zlib])
comment = ""
if zlib:
comment = " //Trying to link against '%s'.\n" % zlib
if context.TryLink(comment + test_program, ".cpp"):
context.Result("yes")
return True
else:
env.Replace(**backup)
context.Result("no")
return False
def CheckBoostIostreamsBZip2(context):
env = context.env
backup = env.Clone().Dictionary()
context.Message("Checking for bzip2 support in Boost Iostreams... ")
test_program = """
#include <boost/iostreams/filtering_stream.hpp>
#include <boost/iostreams/filter/bzip2.hpp>
int main()
{
boost::iostreams::filtering_stream<boost::iostreams::output> filter;
filter.push(boost::iostreams::bzip2_compressor(boost::iostreams::bzip2_params()));
}
\n"""
# bzip2 library name when it's statically compiled into Boost
boostname = "boost_bzip2" + env.get("boost_suffix", "")
for libbz2 in ["", "bz2", boostname]:
env.Append(LIBS = [libbz2])
comment = ""
if libbz2:
comment = " //Trying to link against '%s'.\n" % libbz2
if context.TryLink(comment + test_program, ".cpp"):
context.Result("yes")
return True
else:
env.Replace(**backup)
context.Result("no")
return False
config_checks = { "CheckBoost" : CheckBoost, "CheckBoostIostreamsGZip" : CheckBoostIostreamsGZip, "CheckBoostIostreamsBZip2" : CheckBoostIostreamsBZip2 }
|
jreback/pandas | pandas/tests/arrays/sparse/test_arithmetics.py | Python | bsd-3-clause | 20,217 | 0.000544 | import operator
import numpy as np
import pytest
from pandas.compat.numpy import _np_version_under1p20
import pandas as pd
import pandas._testing as tm
from pandas.core import ops
from pandas.core.arrays.sparse import SparseArray, SparseDtype
@pytest.fixture(params=["integer", "block"])
def kind(request):
"""kind kwarg to pass to SparseArray/SparseSeries"""
return request.param
@pytest.fixture(params=[True, False])
def mix(request):
# whether to operate op(sparse, dense) instead of op(sparse, sparse)
return request.param
class TestSparseArrayArithmetics:
_base = np.array
_klass = SparseArray
def _assert(self, a, b):
tm.assert_numpy_array_equal(a, b)
def _check_numeric_ops(self, a, b, a_dense, b_dense, mix, op):
with np.errstate(invalid="ignore", divide="ignore"):
if mix:
result = op(a, b_dense).to_dense()
else:
result = op(a, b).to_dense()
if op in [operator.truediv, ops.rtruediv]:
# pandas uses future division
expected = op(a_dense * 1.0, b_dense)
else:
expected = op(a_dense, b_dense)
if op in [operator.floordiv, ops.rfloordiv]:
# Series sets 1//0 to np.inf, which SparseArray does not do (yet)
mask = np.isinf(expected)
if mask.any():
expected[mask] = np.nan
self._assert(result, expected)
def _check_bool_result(self, res):
assert isinstance(res, self._klass)
assert isinstance(res.dtype, SparseDtype)
assert res.dtype.subtype == np.bool_
assert isinstance(res.fill_value, bool)
def _check_comparison_ops(self, a, b, a_dense, b_dense):
with np.errstate(invalid="ignore"):
# Unfortunately, trying to wrap the computation of each expected
# value is with np.errstate() is too tedious.
#
# sparse & sparse
self._check_bool_result(a == b)
self._assert((a == b).to_dense(), a_dense == b_dense)
self._check_bool_result(a != b)
self._assert((a != b).to_dense(), a_dense != b_dense)
self._check_bool_result(a >= b)
self._assert((a >= b).to_dense(), a_dense >= b_dense)
self._check_bool_result(a <= b)
self._assert((a <= b).to_dense(), a_dense <= b_dense)
| self._check_bool_result(a > | b)
self._assert((a > b).to_dense(), a_dense > b_dense)
self._check_bool_result(a < b)
self._assert((a < b).to_dense(), a_dense < b_dense)
# sparse & dense
self._check_bool_result(a == b_dense)
self._assert((a == b_dense).to_dense(), a_dense == b_dense)
self._check_bool_result(a != b_dense)
self._assert((a != b_dense).to_dense(), a_dense != b_dense)
self._check_bool_result(a >= b_dense)
self._assert((a >= b_dense).to_dense(), a_dense >= b_dense)
self._check_bool_result(a <= b_dense)
self._assert((a <= b_dense).to_dense(), a_dense <= b_dense)
self._check_bool_result(a > b_dense)
self._assert((a > b_dense).to_dense(), a_dense > b_dense)
self._check_bool_result(a < b_dense)
self._assert((a < b_dense).to_dense(), a_dense < b_dense)
def _check_logical_ops(self, a, b, a_dense, b_dense):
# sparse & sparse
self._check_bool_result(a & b)
self._assert((a & b).to_dense(), a_dense & b_dense)
self._check_bool_result(a | b)
self._assert((a | b).to_dense(), a_dense | b_dense)
# sparse & dense
self._check_bool_result(a & b_dense)
self._assert((a & b_dense).to_dense(), a_dense & b_dense)
self._check_bool_result(a | b_dense)
self._assert((a | b_dense).to_dense(), a_dense | b_dense)
@pytest.mark.parametrize("scalar", [0, 1, 3])
@pytest.mark.parametrize("fill_value", [None, 0, 2])
def test_float_scalar(
self, kind, mix, all_arithmetic_functions, fill_value, scalar, request
):
op = all_arithmetic_functions
if not _np_version_under1p20:
if op in [operator.floordiv, ops.rfloordiv]:
mark = pytest.mark.xfail(strict=False, reason="GH#38172")
request.node.add_marker(mark)
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
a = self._klass(values, kind=kind, fill_value=fill_value)
self._check_numeric_ops(a, scalar, values, scalar, mix, op)
def test_float_scalar_comparison(self, kind):
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
a = self._klass(values, kind=kind)
self._check_comparison_ops(a, 1, values, 1)
self._check_comparison_ops(a, 0, values, 0)
self._check_comparison_ops(a, 3, values, 3)
a = self._klass(values, kind=kind, fill_value=0)
self._check_comparison_ops(a, 1, values, 1)
self._check_comparison_ops(a, 0, values, 0)
self._check_comparison_ops(a, 3, values, 3)
a = self._klass(values, kind=kind, fill_value=2)
self._check_comparison_ops(a, 1, values, 1)
self._check_comparison_ops(a, 0, values, 0)
self._check_comparison_ops(a, 3, values, 3)
def test_float_same_index_without_nans(
self, kind, mix, all_arithmetic_functions, request
):
# when sp_index are the same
op = all_arithmetic_functions
values = self._base([0.0, 1.0, 2.0, 6.0, 0.0, 0.0, 1.0, 2.0, 1.0, 0.0])
rvalues = self._base([0.0, 2.0, 3.0, 4.0, 0.0, 0.0, 1.0, 3.0, 2.0, 0.0])
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind, fill_value=0)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
def test_float_same_index_with_nans(
self, kind, mix, all_arithmetic_functions, request
):
# when sp_index are the same
op = all_arithmetic_functions
if not _np_version_under1p20:
if op in [operator.floordiv, ops.rfloordiv]:
mark = pytest.mark.xfail(strict=False, reason="GH#38172")
request.node.add_marker(mark)
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = self._base([np.nan, 2, 3, 4, np.nan, 0, 1, 3, 2, np.nan])
a = self._klass(values, kind=kind)
b = self._klass(rvalues, kind=kind)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
def test_float_same_index_comparison(self, kind):
# when sp_index are the same
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = self._base([np.nan, 2, 3, 4, np.nan, 0, 1, 3, 2, np.nan])
a = self._klass(values, kind=kind)
b = self._klass(rvalues, kind=kind)
self._check_comparison_ops(a, b, values, rvalues)
values = self._base([0.0, 1.0, 2.0, 6.0, 0.0, 0.0, 1.0, 2.0, 1.0, 0.0])
rvalues = self._base([0.0, 2.0, 3.0, 4.0, 0.0, 0.0, 1.0, 3.0, 2.0, 0.0])
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind, fill_value=0)
self._check_comparison_ops(a, b, values, rvalues)
def test_float_array(self, kind, mix, all_arithmetic_functions):
op = all_arithmetic_functions
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = self._base([2, np.nan, 2, 3, np.nan, 0, 1, 5, 2, np.nan])
a = self._klass(values, kind=kind)
b = self._klass(rvalues, kind=kind)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
self._check_numeric_ops(a, b * 0, values, rvalues * 0, mix, op)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind, fill_value=0)
self._check_numeric_ops(a, b, values, rvalues, mix, op) |
nedlowe/amaas-core-sdk-python | amaascore/transactions/enums.py | Python | apache-2.0 | 767 | 0.006519 | from __future__ import absolute_import, division, print_function, unicode_literals
CASH_TRANSACTION_TYPES = {'Cashflow', 'Coupon', 'Dividend', 'Payment'}
TRANSACTION_TYPES = {'Allocation', 'Block', 'Exercise', 'Expiry', | 'Journal', 'Maturity', 'Net',
'Novation', 'Split', 'Trade', 'Transfer'} | CASH_TRANSACTION_TYPES
TRANSACTION_INVESTOR_ACTIONS = {'Subscription', 'Rede | mption'}
TRANSACTION_LIFECYCLE_ACTIONS = {'Acquire', 'Remove'}
TRANSACTION_ACTIONS = {'Buy', 'Sell', 'Short Sell', 'Deliver', 'Receive'} | TRANSACTION_LIFECYCLE_ACTIONS | \
TRANSACTION_INVESTOR_ACTIONS
TRANSACTION_CANCEL_STATUSES = {'Cancelled', 'Netted', 'Novated'}
TRANSACTION_STATUSES = {'New', 'Amended', 'Superseded'} | TRANSACTION_CANCEL_STATUSES
|
lukefi/missamustikka | backend/generateGeotiff.py | Python | mit | 2,298 | 0.012185 | # This script generates GeoTiff files based Corine land cover data
# Usage: python generateGeotiff.py berryName
# berryName is optional. If not provided all output layers are generated.
# Licensed under the MIT license
from osgeo import gdal, ogr, gdalconst
import sys
gdal.UseExceptions()
gdal.AllRegister()
# Paths for input and output. These may be adjusted as needed.
src_filename = "../../aineisto/Clc2012_FI20m.tif"
dstPath = "../../output"
berries = ["mustikka", "puolukka", "karpalo", "vadelma"]
if len(sys.argv) > 1:
berries = [sys.argv[1]]
# WARNING: these values are not based on scientific research.
corineToBerryIndex = dict()
corineToBerryIndex["mustikka"] = dict()
corineToBerryIndex["mustikka"][24] = 70
corineToBerryIndex["mustikka"][25] = 80
corineToBerryIndex["mustikka"][27] = 50
corineToBerryIndex["mustikka"][28] = 60
corineToBerryIndex["puolukka"] = dict()
corineToBerryIndex["puolukka"][24] = 80
corineToBerryIndex["puolukka"][25] = 60
corineToBerryIndex["karpalo"] = dict()
corineToBerryIndex["karpalo"][40] = 50
corineToBerryIndex["karpalo"][42] = 80
corineToBerryIndex["vadelma"] = dict()
corineToBerryIndex["vadelma | "][36] = 80
corineToBerryIndex["vadelma"][35] = 60
# Normalize values so that the highest value in output is always 100
normalizationFactor = 100.0 / 80.0
srcDs = gdal.Open(src_filename)
corineBand = | srcDs.GetRasterBand(1)
xSize = corineBand.XSize
ySize = corineBand.YSize
print "Input raster size is ", xSize, ySize
for berry in berries:
driver = srcDs.GetDriver()
dstDs = driver.Create(dstPath + "/" + berry + ".tif", xSize, ySize, 1, gdal.GDT_UInt16, options = ['COMPRESS=LZW'])
dstDs.SetGeoTransform(srcDs.GetGeoTransform())
dstDs.SetProjection(srcDs.GetProjection())
array = corineBand.ReadAsArray(0, 0, xSize, ySize)
for x in range(0, xSize):
indexes = corineToBerryIndex[berry]
if x % 500 == 0:
print `round(100.0 * x / xSize)` + " % of " + berry + " done"
for y in range(0, ySize):
origVal = array[y,x]
if origVal in indexes:
finalVal = int(indexes[origVal] * normalizationFactor)
else:
finalVal = 0
array[y,x] = finalVal
dstBand = dstDs.GetRasterBand(1)
dstBand.WriteArray(array, 0, 0)
# Once we're done, close properly the dataset
dstBand = None
dstDs = None
corineBand = None
srcDs = None
|
matejd11/birthdayNotify | birthdayNotify/group.py | Python | mit | 1,058 | 0.009452 | import json
class Group(object):
order = ["name",
"facebook",
"sms",
"mail",
"show"]
def __init__(self, name, eventsAtr):
self.name = name
self.eventsAtr = eventsAtr
def convert(self):
dictionary = {}
dictionary["name"] = self.name
for eventAtr in self.eventsAtr:
markDict = self.eventsAtr[eventAtr].__dict__
for atr in range(1, len(self.order)):
if self.order[atr] not in dictionary:
dictionary[self.order[atr]] = ""
if markDict[self.order[atr]] is True:
| if dictionary[self.order[atr]] != "":
dictionary[self.order[atr]] += ", "+self.eventsAtr[eventAtr].even | t.shortcut
else:
dictionary[self.order[atr]] = self.eventsAtr[eventAtr].event.shortcut
return dictionary
def __str__(self):
me = self.convert()
return str(json.dumps(me, sort_keys = True, indent = 4))
|
geimer/easybuild-easyblocks | easybuild/easyblocks/g/geant4.py | Python | gpl-2.0 | 25,334 | 0.006237 | ##
# Copyright 2009-2013 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY W | ARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
Geant4 support, implemented as an easyblock.
@author: Sti | jn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
"""
import os
import shutil
import re
from distutils.version import LooseVersion
import easybuild.tools.environment as env
from easybuild.framework.easyconfig import CUSTOM
from easybuild.easyblocks.generic.cmakemake import CMakeMake
from easybuild.tools.filetools import run_cmd, run_cmd_qa
from easybuild.tools.modules import get_software_root
from easybuild.tools.filetools import mkdir
class EB_Geant4(CMakeMake):
"""
Support for building Geant4.
Note: Geant4 moved to a CMAKE like build system as of version 9.5.
"""
@staticmethod
def extra_options():
"""
Define extra options needed by Geant4
"""
extra_vars = {
'G4ABLAVersion': [None, "G4ABLA version", CUSTOM],
'G4NDLVersion': [None, "G4NDL version", CUSTOM],
'G4EMLOWVersion': [None, "G4EMLOW version", CUSTOM],
'PhotonEvaporationVersion': [None, "PhotonEvaporation version", CUSTOM],
'G4RadioactiveDecayVersion': [None, "G4RadioactiveDecay version", CUSTOM],
}
return CMakeMake.extra_options(extra_vars)
def configure_step(self):
"""
Configure Geant4 build, either via CMake for versions more recent than 9.5,
or using an interactive configuration procedure otherwise.
"""
# Geant4 switched to a cmake build system in 9.5
if LooseVersion(self.version) >= LooseVersion("9.5"):
mkdir('configdir')
os.chdir('configdir')
super(EB_Geant4, self).configure_step(srcdir="..")
else:
pwd = self.cfg['start_dir']
dst = self.installdir
clhepdir = get_software_root('CLHEP')
cmd = "%s/Configure -E -build" % pwd
self.qanda = {# questions and answers for version 9.1.p03
"There exists a config.sh file. Shall I use it to set the defaults? [y]": "n",
"Would you like to see the instructions? [n]": "",
"[Type carriage return to continue]": "",
"Definition of G4SYSTEM variable is Linux-g++. That stands for: 1) OS : Linux" \
"2) Compiler : g++ To modify default settings, select number above (e.g. 2) " \
"[Press [Enter] for default settings]": "2",
"Which C++ compiler? [g++]": "$(GPP)",
"Confirm your selection or set recommended 'g++'! [*]": "",
"Definition of G4SYSTEM variable is Linux-icc. That stands for: 1) OS : Linux 2)" \
"Compiler : icc To modify default settings, select number above (e.g. 2) " \
"[Press [Enter] for default settings]": "",
"Do you expect to run these scripts and binaries on multiple machines? [n]": "y",
"Where is Geant4 source installed? [%s]" % pwd: "",
"Specify the path where Geant4 libraries and source files should be installed." \
" [%s]" % pwd: dst,
"Do you want to copy all Geant4 headers in one directory? [n]": "y",
"Please, specify default directory where ALL the Geant4 data is installed:" \
"G4LEVELGAMMADATA: %(pwd)s/data/PhotonEvaporation2.0 G4RADIOACTIVEDATA: " \
"%(pwd)s/data/RadioactiveDecay3.2 G4LEDATA: %(pwd)s/data/G4EMLOW5.1 G4NEUTRONHPDATA: " \
"%(pwd)s/data/G4NDL3.12 G4ABLADATA: %(pwd)s/data/G4ABLA3.0 You will be asked about " \
"customizing these next. [%(pwd)s/data]" % {'pwd': pwd}: "%s/data" % dst,
"Directory %s/data doesn't exist. Use that name anyway? [n]" % dst: "y",
"Please, specify default directory where the Geant4 data is installed: " \
"1) G4LEVELGAMMADATA: %(dst)s/data/PhotonEvaporation2.0 2) G4RADIOACTIVEDATA: " \
"%(dst)s/data/RadioactiveDecay3.2 3) G4LEDATA: %(dst)s/data/G4EMLOW5.1 4) G4NEUTRONHPDATA: " \
"%(dst)s/data/G4NDL3.12 5) G4ABLADATA: %(dst)s/data/G4ABLA3.0 To modify default settings, " \
"select number above (e.g. 2) [Press [Enter] for default settings]" % {'dst': dst}: "",
"Please, specify where CLHEP is installed: CLHEP_BASE_DIR: ": clhepdir,
"Please, specify where CLHEP is installed: CLHEP_BASE_DIR: [%s]" % clhepdir: "",
"You can customize paths and library name of you CLHEP installation: 1) CLHEP_INCLUDE_DIR: " \
"%(clhepdir)s/include 2) CLHEP_LIB_DIR: %(clhepdir)s/lib 3) CLHEP_LIB: CLHEP To modify " \
"default settings, select number above (e.g. 2) [Press [Enter] for default settings]" %
{'clhepdir': clhepdir}: "",
"By default 'static' (.a) libraries are built. Do you want to build 'shared' (.so) " \
"libraries? [n]": "y",
"You selected to build 'shared' (.so) libraries. Do you want to build 'static' (.a) " \
"libraries too? [n]": "y",
"Do you want to build 'global' compound libraries? [n]": "",
"Do you want to compile libraries in DEBUG mode (-g)? [n]": "",
"G4UI_NONE If this variable is set, no UI sessions nor any UI libraries are built. " \
"This can be useful when running a pure batch job or in a user framework having its own " \
"UI system. Do you want to set this variable ? [n]": "",
"G4UI_BUILD_XAW_SESSION G4UI_USE_XAW Specifies to include and use the XAW interfaces in " \
"the application to be built. The XAW (X11 Athena Widget set) extensions are required to " \
"activate and build this driver. [n]": "",
"G4UI_BUILD_XM_SESSION G4UI_USE_XM Specifies to include and use the XM Motif based user " \
"interfaces. The XM Motif extensions are required to activate and build this driver. [n]": "",
"G4VIS_NONE If this variable is set, no visualization drivers will be built or used. Do " \
"you want to set this variable ? [n]": "n",
"G4VIS_BUILD_OPENGLX_DRIVER G4VIS_USE_OPENGLX It is an interface to the de facto standard " \
"3D graphics library, OpenGL. It is well suited for real-time fast visualizatio |
lazlolazlolazlo/onionshare | test/test_onionshare_settings.py | Python | gpl-3.0 | 5,868 | 0 | """
OnionShare | https://onionshare.org/
Copyright (C) 2017 Micah Lee <micah@micahflee.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import json
import os
import tempfile
import pytest
from onionshare import common, settings, strings
@pytest.fixture
def custom_version(monkeypatch):
monkeypatch.setattr(common, 'get_version', lambda: 'DUMMY_VERSION_1.2.3')
@pytest.fixture
def os_path_expanduser(monkeypatch):
monkeypatch.setattr('os.path.expanduser', lambda path: path)
@pytest.fixture
def settings_obj(custom_version, sys_onionshare_dev_mode, platform_linux):
return settings.Settings()
class TestSettings:
def test_init(self, settings_obj):
assert settings_obj._settings == settings_obj.default_settings == {
'version': 'DUMMY_VERSION_1.2.3',
'connection_type': 'bundled',
'control_port_address': '127.0.0.1',
'control_port_port': 9051,
'socks_address': '127.0.0.1',
'socks_port': 9050,
'socket_file_path': '/var/run/tor/control',
'auth_type': 'no_auth',
'auth_password': '',
'close_after_first_download': True,
'systray_notifications': True,
'use_stealth': False,
'use_autoupdate': True,
'autoupdate_timestamp': None
}
def test_fill_in_defaults(self, settings_obj):
del settings_obj._settings['version']
settings_obj.fill_in_defaults()
assert settings_obj._settings['version'] == 'DUMMY_VERSION_1.2.3'
def test_load(self, settings_obj):
custom_settings = {
'version': 'CUSTOM_VERSION',
'socks_port': 9999,
'use_stealth': True
}
tmp_file, tmp_file_path = tempfile.mkstemp()
with open(tmp_file, 'w') as f:
json.dump(custom_settings, f)
settings_obj.filename = tmp_file_path
settings_obj.load()
assert settings_obj._settings['version'] == 'CUSTOM_VERSION'
assert settings_obj._settings['socks_port'] == 9999
assert settings_obj._settings['use_stealth'] is True
os.remove(tmp_file_path)
assert os.path.exists(tmp_file_path) is False
def test_save(self, monkeypatch, settings_obj):
monkeypatch.setattr(strings, '_', lambda _: '')
settings_filename = 'default_settings.json'
tmp_dir = tempfile.gettempdir()
settings_path = os.path.join(tmp_dir, settings_filename)
settings_obj.filename = settings_path
settings_obj.save()
with open(settings_path, 'r') as f:
settings = json.load(f)
assert settings_obj._settings == settings
os.remove(settings_path)
assert os.path.exists(settings_path) is False
def test_get(self, settings_obj):
assert settings_obj.get('version') == 'DUMMY_VERSION_1.2.3'
assert settings_obj.get('connection_type') == 'bundled'
assert settings_obj.get('control_port_address') == '127.0.0.1'
assert settings_obj.get('control_port_port') == 9051
assert settings_obj.get('socks_address') == '127.0.0.1'
assert settings_obj.get('socks_port') == 9050
assert settings_obj.get('socket_file_path') == '/var/run/tor/control'
assert settings_obj.get('auth_type') == 'no_auth'
assert settings_obj.get('auth_password') == ''
assert settings_obj.get('close_after_first_download') is True
assert settings_obj.get('systray_notifications') is True
assert settings_obj.get('use_stealth') is False
assert settings_obj.get('use_autoupdate') is True
assert settings_obj.get('autoupdate_timestamp') is None
def test_set_version(self, settings_obj):
settings_obj.set('version', 'CUSTOM_VERSION')
assert settings_obj._settings['version'] == 'CUSTOM_VERSION'
def test_set_control_port_port(self, settings_obj):
settings_obj.set('control_port_port', 999)
assert settings_obj._settings['control_port_port'] == 999
settings_obj.set('control_port_port', 'NON_INTEGER')
assert settings_obj._settings['control_port_port'] == 9051
def test_set_socks_port(self, settings_obj):
settings_obj.set('socks_port', 888)
assert settings_obj._settings['socks_port'] == 888
settings_obj.set('socks_port', 'NON_INTEGER')
assert settings_obj._settings['socks_port'] == 9050
def test_filename_darwin(
self,
custom_version,
monkeypatch,
os_path_expanduser,
platform_darwin):
obj = settings.Sett | ings()
assert (obj.filename ==
'~/Library/Application Support/OnionShare/onionshare.json')
def test_filename_linux(
self,
custom_version,
monkeypatch,
os_pa | th_expanduser,
platform_linux):
obj = settings.Settings()
assert obj.filename == '~/.config/onionshare/onionshare.json'
def test_filename_windows(
self,
custom_version,
monkeypatch,
platform_windows):
monkeypatch.setenv('APPDATA', 'C:')
obj = settings.Settings()
assert obj.filename == 'C:\\OnionShare\\onionshare.json'
|
msifuentes/pynet_ansible | test_python/netmiko_wk4_ex7.py | Python | apache-2.0 | 1,955 | 0.01023 | #!/usr/bin/env python
#Import libraries
from netmiko import ConnectHandler
from getpass import getpass
#define variables for the connection to network devices
ip_addr = '50.76.53.27'
username = 'pyclass'
password = getpass()
portpy2 = 8022
portsrx = 9822
cisco = 'cisco_ios'
juniper = 'juniper'
#create a dictionary of the devices you are going to make a connections with
pynetrtr1 = {
'device_type': cisco,
'ip': ip_addr,
'username': username,
'password': password,
}
pynetrtr2 = {
'device_type': cisco,
'ip': ip_addr,
'username': username,
'password': password,
'port': portpy2,
}
juniper_srx = {
'device_type': juniper,
'ip': ip_addr,
'username': username,
'password': password,
'secret': '',
'port': portsrx,
}
#This tests that the mapping of the dictonary to the variables is work | ing
# print pynetrtr1
print pynetrtr2
# print juniper_srx
#This uses makes the connection to the network devices defined.
#the ** is used to help pass all the dictionary information alone
# rtr1 = ConnectHandler(**pynetrtr1)
rtr2 = ConnectHandler(**pynetrtr2)
# srx = ConnectHandler(**juniper_srx)
#this output wi | ll confirm that the connection was made with netmiko and the ssh information used to make the connection
# print rtr1
print rtr2
# print srx
#this will place rtr2 into config mode and will display the output to confirm we are in that mode.
rtr2.config_mode()
outp_show = rtr2.find_prompt()
print outp_show
#then we will set the logging buffer to 11100
rtr2.send_command("logging buffer 11100")
rtr2.exit_config_mode()
outp_show = rtr2.send_command("show run | i logging")
print outp_show
#this closes the connection. Without out this command the connection stays open until the vty idle timer kicks in.
# rtr1.disconnect()
rtr2.disconnect()
#On the srx the connection is not a clean disconnect. the connection from the server sits in fin_wait
# srx.disconnect()
|
xiskoct/Meraki | python-2.7/get-vlans.py | Python | gpl-3.0 | 3,070 | 0.011401 | #!/usr/bin/env python2.7
#
# Aaron Blair
# RIoT Solutions
#
# aaron@aaronpb.me
#
# get-vlans.py - A simple Python script to get VLANs from Appliance Networks
# 15/03/2017
import argparse
import csv
import pprint
import requests
import signal
import sys
import json
count = 0
checknet = 0
paramslist = []
vlanoutput = 0
api_key = 'xxxxxxxxxxxxxxxxx' # <--- add your API key here
baseurl = 'https://dashboard.meraki.com/api/v0/'
orgurl = 'https://dashboard.meraki.com/api/v0/organizations/'
headers = {'X-Cisco-Meraki-API-Key': api_key,'Content-Type': 'application/json'}
#use argparse to handle command line arguments
parser = argparse.ArgumentParser(description="a script for getting the VLAN settings of a list of provided networks")
parser.add_argument('filename',
type=argparse.FileType('r'),
help='input txt file containing network IDs' )
parser.add_argument("ORGName",help='name of the | ORG where you would like to get the VLANs for')
args = parser.parse_args()
#get the organization-name where the networks have to be updated as an argument
org=args.ORGName
#exit cleanly for SIGINT/Ctrl+C
def signal_handler(signal, frame):
print("\n***Interrupt!*** ---> Exiting ...")
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
#read the txt file
csvFile = args.filename
cs | vReader = csv.reader(csvFile)
#loop through the rows in the csvReader object and create a new list (paramslist) of JSON formatted attributes
for row in csvReader:
NetworkId = str(row[0])
params = NetworkId
paramslist.append(params)
#get all of the orgs this API key has access to
orgsjson = requests.get(orgurl, headers=headers).text
output = json.loads(orgsjson)
#check whether the organization exists under the administered organization
if any(d['name'] == org for d in output):
for row in output:
orgname = row['name']
#get the ID of the ORG to create the networks URL
if org == orgname:
orgid = row['id']
networks_url = (str(orgurl) + str(orgid) + '/networks')
break
#get the list of networks in the ORG
networksjson = requests.get(networks_url, headers=headers).text
output = json.loads(networksjson)
#go through every network in the input file
for nwid in paramslist:
checknet=0
vlans_URL = (str(networks_url) + '/' + nwid + '/vlans')
#if network in csv matches network in dashboard, get the vlan-list
vlansinnetwork = requests.get(vlans_URL, headers=headers).text
vlanoutput = json.loads(vlansinnetwork)
print json.dumps(vlanoutput, indent=4)
#check whether networks in there are networks in the CSV that do not exist in this ORG
for nwid in paramslist:
netexist=0
for row in output:
if str(row['id']) == nwid:
netexist+=1
if netexist==0:
print "The network " + nwid + " mentioned in the input file does not exist in this organization."
else:
print "This ORG does not exist for your admin account"
csvFile.close(); |
benglard/Rhetorical-Analysis | AnaphoraFinder.py | Python | mit | 1,934 | 0.007756 | from nltk import word_tokenize
class AnaphoraFinder:
def __init__(self):
self.f = ""
self.counter = 0
self.first_word = ""
self.b = False
#Path is inputted from AIPController
#Returns anaphora counter
def sendFile(self, path):
self.f = open(path)
for line in self.f:
try:
if self.b:
self.counter += self.get_all_anaphora(line)
| else:
try:
self.first_word = word_tokenize(line)[0].lower()
except:
continue
self.counter += self.get_all_anaphora(line)
except:
continue
| c = self.counter
self.counter = 0
return c
#Returns the length of an array which contains all
#instances of anaphora
def get_all_anaphora(self, line):
ana = []
for w in word_tokenize(line)[1:]:
try:
new_word = self.get_next_word(line, w).lower()
except:
pass
if w.find('.') != -1 or w.find('!') != -1 or w.find('?') != -1:
if new_word == self.first_word:
ana.append([self.first_word, new_word])
self.first_word = new_word
elif new_word == False:
self.b = True
else:
self.first_word = new_word
return len(ana)
#Gets the next word after the period for
#anaphora comparison. If end-of-line and
#would be IndexError, return False
def get_next_word(self, line, target):
tokens = word_tokenize(line)[1:]
for w in range(len(tokens)):
if tokens[w] == target:
try:
return tokens[w+1].lower()
except:
return False
|
mbakthav/artos | PyARTOS/GUI/CameraSampler.py | Python | gpl-3.0 | 16,231 | 0.016881 | """Provides the CameraSampleDialog class which creates a window for capturing samples with a camera."""
try:
# Python 3
import tkinter as Tkinter
from tkinter import N, E, S, W
from tkinter import ttk
from tkinter import messagebox as tkMessageBox
except:
# Python 2
import Tkinter
from Tkinter import N, E, S, W
import ttk
import tkMessageBox
import os.path
try:
from PIL import Image, ImageTk
except:
import Image, ImageTk
from . import gui_utils
from .. import utils
from ..detecting import BoundingBox
from ..Camera.Capture import Capture
class CameraSampleDialog(gui_utils.Dialog):
"""A toplevel window that guides the user to capture samples of an object with a camera.
The number of samples to be taken can be controlled by passing an argument to the constructor of this class.
After the first snapshot has been taken, the user will be asked to draw a bounding box around the object on
that image. That bounding box is then used for all further samples too.
After the user has finished taking snapshots, those will be available in the `samples` attribute
of the CameraSampleDialog instance. That is a list of 2-tuples whose first component is the actual sample
as PIL.Image.Image instance and whose second component is the bounding box around the object on that
image as detecting.BoundingBox instance.
If the user has pressed the 'Cancel' button, the `samples` list will be empty, even if samples have
been captured before.
"""
def __init__(self, master = None, numSamples = 0, device = 0, parent = None):
"""Creates a new CameraSampleDialog.
master - The parent widget.
numSamples - Number of samples to be taken. Set this to 0 to let the user take an arbitrary
number of samples until he clicks the 'Finish' button.
device - The ID of the video device to be used. If you want to let the user select a device,
use the createWithDeviceSelection() static method to create a CameraSampleDialog instance.
parent - If set to a widget, this window will turn into a modal dialog and `parent` will
be it's parent window.
"""
gui_utils.Dialog.__init__(self, master, parent, gui_utils.Dialog.CENTER_ON_SCREEN)
# Intialize window
self.title('Take sample snapshots')
self.resizable(False, False)
self.protocol('WM_DELETE_WINDOW', self.cancel)
self.bind('<Destroy>', self.onDestroy, True)
# Initialize member variables
self.numSamples = numSamples
self.device = Capture(device)
self.samples = []
self.boundingBox = None
self.drawingMode = False
self.capturePaused = False
# Create slave widgets
self.instructions = Tkinter.StringVar(master = self, value = 'Please take a first snapshot of the object of interest.')
self.status = Tkinter.StringVar(master = self, value = '0 of {}'.format(numSamples) if numSamples > 0 else '0 samples captured')
self._createWidgets()
# Start capture
self.pollVideoFrame()
def _createWidgets(self):
# Instructions
| self.lblInstructions = Tkinter.Message(self, textvariable = self.instructions, justify = 'center')
self.lblInstructions.grid(column = 0, row = 0, columnspan = 3, padx = 20)
# Label for video stream
self.lblVideo = ttk.Label(self, b | ackground = '#000000')
self.lblVideo.grid(column = 0, row = 1, columnspan = 3, padx = 20)
# Controls at the bottom
self.btnCancel = ttk.Button(self, text = 'Cancel', command = self.cancel)
self.btnCancel.grid(column = 0, row = 2, sticky = W, padx = 20)
self.btnCapture = ttk.Button(self, text = 'OK', compound = 'image', command = self.capture)
self.btnCapture._img = ImageTk.PhotoImage(Image.open(os.path.join(utils.basedir, 'GUI', 'gfx', 'shutter.png')))
self.btnCapture['image'] = self.btnCapture._img
self.btnCapture.grid(column = 1, row = 2)
self.frmStatus = ttk.Frame(self)
self.frmStatus.grid(column = 2, row = 2, sticky = E, padx = 20)
self.lblStatus = ttk.Label(self.frmStatus, textvariable = self.status)
self.lblStatus.pack(side = 'top', anchor = E)
if self.numSamples <= 0:
self.btnFinish = ttk.Button(self.frmStatus, text = 'Finish', state = 'disabled', command = self.destroy)
self.btnFinish.pack(side = 'top', anchor = E, pady = (10, 0))
self.columnconfigure(1, weight = 1)
self.columnconfigure((0,2), uniform = 1)
self.rowconfigure(0, minsize = 60)
self.rowconfigure(1, weight = 1)
self.rowconfigure(2, minsize = 120)
def onDestroy(self, evt):
if (evt.widget is self):
# Stop running capture
self.capturePaused = True
self.after_cancel(self.pollId)
del self.device
try:
# Break reference cycles of TCL variables, because their
# __del__ method prevents the garbage collector from freeing them:
del self.instructions
del self.status
del self.lblVideo._img
except:
pass
def pollVideoFrame(self):
"""Periodically grabs a snapshot from the video device and displays it."""
try:
frame = self.device.grabFrame()
if not (frame is None):
self.frame = frame.copy()
if self.lblInstructions['width'] <= 0:
self.lblInstructions['width'] = frame.size[0]
# Scale down for displaying if camera input is too large for screen
screenSize = self.winfo_screenwidth(), self.winfo_screenheight()
maxSize = (screenSize[0] - 100, screenSize[1] - 300)
if (frame.size[0] > maxSize[0]) or (frame.size[1] > maxSize[1]):
frame.thumbnail(maxSize, Image.BILINEAR)
self.frameThumb = frame.copy()
# Draw bounding box
if self.boundingBox:
frame = self.boundingBox.drawToImage(frame)
# Update image on video label
self.lblVideo._img = ImageTk.PhotoImage(frame)
self.lblVideo["image"] = self.lblVideo._img
finally:
if not self.capturePaused:
self.pollId = self.after(1, self.pollVideoFrame)
def capture(self):
"""Captures a snapshot from the camera stream."""
if (self.drawingMode) and ((not self.boundingBox) or (self.boundingBox.width <= 1) or (self.boundingBox.height <= 1)):
return
if self.boundingBox:
# Capture sample
self.samples.append((self.frame.copy(), BoundingBox(self.boundingBox.scale(float(self.frame.size[0]) / float(self.frameThumb.size[0])))))
self.status.set('{} of {}'.format(len(self.samples), self.numSamples) if self.numSamples > 0 else '{} samples captured'.format(len(self.samples)))
if (self.numSamples > 0) and (len(self.samples) >= self.numSamples):
self.destroy()
return
elif self.numSamples <= 0:
self.btnFinish['state'] = 'normal'
self.btnFinish['default'] = 'active'
if self.drawingMode:
self.instructions.set('Place object in the bounding box to capture further images.')
self.drawingMode = False
self.capturePaused = False
self.btnCapture['compound'] = 'image'
self.lblVideo.unbind('<ButtonPress>')
self.lblVideo.unbind('<ButtonRelease>')
self.pollVideoFrame()
else:
# Enter drawing mode to let the user specify a bounding b |
halbbob/dff | api/events/__init__.py | Python | gpl-2.0 | 617 | 0.003241 | # DFF -- An Open Source Digital Forensics Framework
# Copyright (C) 2009-2011 ArxSys
# This program is free software, distributed under the terms of
# the GNU General Public License Version 2. See the LICENSE file
# at the top of the source tree.
#
# See http://www.digital-forensic.org for more information about this
# project. Please do not directly contact any of the maintainers of
# DFF for assistance; the project provides a web site, maili | ng lists
# and I | RC channels for your use.
#
# Author(s):
# Frederic Baguelin <fba@digital-forensic.org>
__version__ = "${API_EVENT_VERSION}"
__all__ = ["libevents"]
|
thunderhoser/GewitterGefahr | gewittergefahr/gg_utils/conus_boundary_test.py | Python | mit | 2,046 | 0.000489 | """Unit tests for conus_boundary.py."""
import unittest
import numpy
from gewittergefahr.gg_utils import conus_boundary
QUERY_LATITUDES_DEG = numpy.array([
33.7, 42.6, 39.7, 34.9, 40.2, 33.6, 36.4, 35.1, 30.8, 47.4, 44.2, 45.1,
49.6, 38.9, 35.0, 38.1, 40.7, 47.1, 30.2, 39.2
]) |
QUERY_LONGITUDES_DEG = numpy.array([
276.3, 282.7, 286.6, 287.5, 271.0, 266.4, 258.3, 257.3, 286.8, 235.0, 273.5,
262.5, 277.2, 255.3, 271.8, 254.3, 262.1, 247.8, 262.9, 251.6
])
IN_CONUS_FLAGS = numpy.array(
[1, 1, | 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1], dtype=bool
)
class ConusBoundaryTests(unittest.TestCase):
"""Each method is a unit test for conus_boundary.py."""
def test_find_points_in_conus_no_shortcuts(self):
"""Ensures correct output from find_points_in_conus.
In this case, does not use shortcuts.
"""
conus_latitudes_deg, conus_longitudes_deg = (
conus_boundary.read_from_netcdf()
)
these_flags = conus_boundary.find_points_in_conus(
conus_latitudes_deg=conus_latitudes_deg,
conus_longitudes_deg=conus_longitudes_deg,
query_latitudes_deg=QUERY_LATITUDES_DEG,
query_longitudes_deg=QUERY_LONGITUDES_DEG, use_shortcuts=False)
self.assertTrue(numpy.array_equal(these_flags, IN_CONUS_FLAGS))
def test_find_points_in_conus_with_shortcuts(self):
"""Ensures correct output from find_points_in_conus.
In this case, uses shortcuts.
"""
conus_latitudes_deg, conus_longitudes_deg = (
conus_boundary.read_from_netcdf()
)
these_flags = conus_boundary.find_points_in_conus(
conus_latitudes_deg=conus_latitudes_deg,
conus_longitudes_deg=conus_longitudes_deg,
query_latitudes_deg=QUERY_LATITUDES_DEG,
query_longitudes_deg=QUERY_LONGITUDES_DEG, use_shortcuts=True)
self.assertTrue(numpy.array_equal(these_flags, IN_CONUS_FLAGS))
if __name__ == '__main__':
unittest.main()
|
hfp/tensorflow-xsmm | tensorflow/contrib/tpu/python/tpu/session_support.py | Python | apache-2.0 | 15,206 | 0.005393 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2 | .0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================
"""Operations for | handling session logging and shutdown notifications."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import time
from google.protobuf import text_format
from tensorflow.contrib.tpu.python.ops import tpu_ops
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
_WATCHDOG = None
class CoordinatorShutdownException(Exception):
"""Raised when the coordinator needs to shutdown."""
pass
def _clone_session(session, graph=None):
return session_lib.Session(
target=session.sess_str,
config=session._config, # pylint: disable=protected-access
graph=graph if graph else session.graph)
def _make_heartbeat_op(session, device, request_ph):
"""Return a heartbeat op or None if heartbeats are not supported by device."""
try:
# Test if we can connect in a isolated graph + session
with ops.Graph().as_default():
with _clone_session(session) as temp_session:
with ops.device(device):
heartbeat_op = tpu_ops.worker_heartbeat('')
options = config_pb2.RunOptions(timeout_in_ms=5000)
temp_session.run(heartbeat_op, options=options)
except errors.InvalidArgumentError as _:
logging.warning('Error running heartbeat on %s', device)
return None
except errors.DeadlineExceededError as _:
logging.warning('Timeout connecting to %s when testing heartbeat', device)
return None
# If we successfully connected and pinged the worker, go ahead and construct
# the operation.
with ops.device(device):
return tpu_ops.worker_heartbeat(request_ph)
class WorkerHeartbeatManager(object):
"""Manages the status/heartbeat monitor for a set of workers."""
def __init__(self, session, devices, heartbeat_ops, request_placeholder):
"""Construct a new WorkerHeartbeatManager.
(Prefer using `WorkerHeartbeatManager.from_devices` when possible.)
Args:
session: `tf.Session`, session to use for heartbeat operations.
devices: `list[string]` Set of devices to connect to.
heartbeat_ops: `list[tf.Operation]` Heartbeat operations.
request_placeholder: `tf.Placeholder[String]` Placeholder used to specify
the WorkerHeartbeatRequest protocol buffer.
"""
self._session = session
self._devices = devices
self._ops = heartbeat_ops
self._request_placeholder = request_placeholder
@staticmethod
def from_devices(session, devices):
"""Construct a heartbeat manager for the given devices."""
if not devices:
logging.error('Trying to create heartbeat manager with no devices?')
logging.info('Creating heartbeat manager for %s', devices)
request_placeholder = array_ops.placeholder(
name='worker_heartbeat_request', dtype=dtypes.string)
heartbeat_ops = []
kept_devices = []
for device in devices:
heartbeat_op = _make_heartbeat_op(session, device, request_placeholder)
if heartbeat_op is not None:
kept_devices.append(device)
heartbeat_ops.append(heartbeat_op)
else:
logging.warning('Heartbeat support not available for %s', device)
return WorkerHeartbeatManager(session, kept_devices, heartbeat_ops,
request_placeholder)
def num_workers(self):
return len(self._devices)
def configure(self, message):
"""Configure heartbeat manager for all devices.
Args:
message: `event_pb2.WorkerHeartbeatRequest`
Returns: `None`
"""
logging.info('Configuring worker heartbeat: %s',
text_format.MessageToString(message))
self._session.run(self._ops,
{self._request_placeholder: message.SerializeToString()})
def ping(self, request=None, timeout_in_ms=5000):
"""Ping all workers, returning the parsed status results."""
if request is None:
request = event_pb2.WorkerHeartbeatRequest()
options = config_pb2.RunOptions(timeout_in_ms=timeout_in_ms)
results = self._session.run(
self._ops,
feed_dict={self._request_placeholder: request.SerializeToString()},
options=options)
parsed_results = [
event_pb2.WorkerHeartbeatResponse.FromString(res_pb)
for res_pb in results
]
logging.debug('Ping results: %s', parsed_results)
return parsed_results
def lame_workers(self):
"""Ping all workers, returning manager containing lame workers (or None)."""
ping_results = self.ping()
lame_workers = []
for ping_response, device, op in zip(ping_results, self._devices,
self._ops):
if ping_response.health_status != event_pb2.OK:
lame_workers.append((device, op))
if not lame_workers:
return None
bad_devices, bad_ops = zip(*lame_workers)
return WorkerHeartbeatManager(self._session, bad_devices, bad_ops,
self._request_placeholder)
def __repr__(self):
return 'HeartbeatManager(%s)' % ','.join(self._devices)
def shutdown(self, timeout_ms=10000):
"""Shutdown all workers after `shutdown_timeout_secs`."""
logging.info('Shutting down %s.', self)
req = event_pb2.WorkerHeartbeatRequest(
watchdog_config=event_pb2.WatchdogConfig(timeout_ms=timeout_ms))
self.configure(req)
# Wait for workers to shutdown. This isn't strictly required
# but it avoids triggering multiple checkpoints with the same lame worker.
logging.info('Waiting %dms for worker shutdown.', timeout_ms)
time.sleep(timeout_ms / 1000)
def all_worker_devices(session):
"""Return a list of devices for each worker in the system."""
devices = session.list_devices()
return [
device.name
for device in devices
if ':CPU:' in device.name and 'coordinator' not in device.name
]
class WatchdogManager(threading.Thread):
"""Configures worker watchdog timer and handles periodic pings.
Usage:
# Ping workers every minute, shutting down workers if they haven't received
# a ping after 1 hour.
watchdog_manager = WatchdogManager(
ping_interval=60, shutdown_timeout=3600
)
# Use as a context manager, resetting watchdog on context exit:
with watchdog_manager:
session.run(...)
# Or setup globally; watchdog will remain active until program exit.
watchdog_manager.configure_and_run()
"""
def __init__(self,
session,
devices=None,
ping_interval=60,
shutdown_timeout=3600):
"""Initialize a watchdog manager.
Args:
session: Session connected to worker devices. A cloned session and graph
will be created for managing worker pings.
devices: Set of devices to monitor. If none, all workers will be
monitored.
ping_interval: Time, in seconds, between watchdog pings.
shutdown_timeout: Time, in seconds, before watchdog timeout.
"""
threading.Thread.__init__(self)
self.ping_interval = ping_interval
self.shutdown_timeout = shutdown_timeout
sel |
cookinrelaxin/stupendousML | multiple_regression.py | Python | mit | 6,194 | 0.029706 | from matplotlib import pyplot as plt
import random
from math import sqrt, log
def read_csv(file_name):
with open(file_name, 'r') as f:
data = []
attribute_names = {index:name for index,name in enumerate(f.readline().strip().split(','))}
while True:
line = f.readline()
if not line: return data
else:
data.append({attribute_names[index]:val for index,val in enumerate(line.strip().split(','))})
def simple_regression(input_feature, output):
N = len(output)
input_sum = sum(input_feature)
input_squared_sum = sum([val**2 for val in input_feature])
output_sum = sum(output)
input_output_sum = sum([x*y for x,y in zip(input_feature, output)])
slope = (input_output_sum - ((input_sum * output_sum) / N)) / (input_squared_sum - (input_sum ** 2 / N))
intercept = (output_sum / N) - (slope * (input_sum / N))
return(intercept, slope)
def get_regression_predictions(input_feature, intercept, slope):
return [intercept + (slope * feature) for feature in input_feature]
def get_residual_sum_of_squares(feature_matrix, output, weights):
# return sum([(y - (intercept + (x*slope))) ** 2 for x,y in zip(input_feature, output)])
err = vector_subtract(output, matrix_vector_product(feature_matrix, weights))
return dot(err,err)
def inverse_regression_predictions(output, intercept, slope):
return (output - intercept) / slope
house_data = read_csv('kc_house_data.csv')
for point in house_data:
point['bathrooms'] = float(point['bathrooms'])
point['waterfront'] = int(point['waterfront'])
point['sqft_above'] = int(point['sqft_above'])
point['sqft_living15'] = float(point['sqft_living15'])
point['grade'] = int(point['grade'])
point['yr_renovated'] = int(point['yr_renovated'])
point['price'] = float(point['price'])
point['bedrooms'] = float(point['bedrooms'])
point['zipcode'] = str(point['zipcode'])
point['long'] = float(point['long'])
point['sqft_lot15'] = float(point['sqft_lot15'])
point['sqft_living'] = float(point['sqft_living'])
point['floors'] = str(point['floors'])
point['condition'] = int(point['condition'])
point['lat'] = float(point['lat'])
point['sqft_basement'] = int(point['sqft_basement'])
point['yr_built'] = int(point['yr_built'])
point['id'] = str(point['id'])
point['sqft_lot'] = int(point['sqft_lot'])
point['view'] = int(point['view'])
for point in house_data:
point['bedrooms_squared'] = point['bedrooms'] ** 2
point['bed_bath_rooms'] = point['bedrooms'] * point['bathrooms']
point['log_sqft_living'] = log(point['sqft_living'])
point['lat_plus_long'] = point['lat'] + point['long']
def predict_outcome(feature_matrix, weights):
return [dot(row,weights) for row in feature_matrix]
def dot(v,w):
return sum([v_i * w_i for v_i,w_i in zip(v,w)])
def magnitude(v):
return sqrt(dot(v,v))
def vector_add(v,w):
return [v_i + w_i for v_i,w_i in zip(v,w)]
def vector_subtract(v,w):
return [v_i - w_i for v_i,w_i in zip(v,w)]
def matrix_vector_product(A,v):
return [dot(A_i,v) for A_i in A]
def transpose(A):
return [[row[col] for row in A] for col in range(len(A[0]))]
def scalar_vector_product(c,v):
return [c*v_i for v_i in v]
def regression_gradient_descent(feature_matrix, output, initial_weights, step_size, tolerance):
y = output
w = initial_weights
H = feature_matrix
H_T = transpose(H)
eta = step_size
gradient_magnitude = float('inf')
while tolerance < gradient_magnitude:
RSS_gradient = scalar_vector_product(-2, matrix_vector_product(H_T, vector_subtract(y, matrix_vector_product(H,w))))
# print w
w = vector_subtract(w, scalar_vector_product(eta, RSS_gradient))
gradient_magnitude = magnitude(RSS_gradient)
# print RSS_gradient
print gradient_magnitude
return w
#def regression(feature_matrix, output):
random.seed(0)
random.shuffle(house_data)
train_data,test_data = (house_data[:int(len(house_data) * .8)],house_data[(int(len(house_data) * .8)):])
def simple_weights():
simple_feature_matrix = [[1.0, point['sqft_living']] for point in train_data]
output = [point['price'] for point in train_data]
initial_weights = [-47000, 1.0]
# step_size = 7e-12
step_size = 7 * (10 ** -12)
tolerance = 2.5e7
simple_weights = regression_gradient_descent(simple_feature_matrix, output, initial_weights, step_size, tolerance)
#print simple_weights
return get_residual_sum_of_squares([[1.0, point['sqft_living']] for point in test_data], output, simple_weights)
def less_simple_weights():
variable = 'bedrooms'
degree = 5
simple_feature_matrix = [[point[variable]**i for i in range(degree)] for point in train_data]
output = [point['price'] for point in train_data]
initial_weights = [1.0 for i in range(degree)]
step_size = 1 * (10 ** -11)
tolerance = 3e11
simple_weights = regression_gradient_descent(simple_feature_matrix, output, initial_weights, step_size, tolerance)
print simple_weights
# rss = get_residual_sum_of_squares([[1.0, point['bedrooms']] for point in | test_data], output, simple_weights)
plt.scatter([point[variable] for point in train_data], [point['price'] for point in train_data], s=1, alpha=.01)
max_x | = max([point[variable] for point in train_data])
max_y = max([point['price'] for point in train_data])
xs = []
ys = []
segment_count = 1000
for i in range(segment_count):
x = i*(10.0 / segment_count)
xs.append(x)
y = dot([x**i for i in range(degree)], simple_weights)
#print x,y
ys.append(y)
# ys.append(500000.0)
plt.plot(xs, ys, 'k-')
plt.xlim(0,max_x)
plt.ylim(0,max_y)
plt.xlabel(variable)
plt.ylabel('price USD')
plt.title(variable+' vs. price')
plt.show()
#print simple_weights()
train_data = [point for point in train_data if point['bedrooms'] != 33]
less_simple_weights()
# intercept,slope = simple_regression(sqft_vals, price_vals)
# print 'attributes: ', [attr for attr in train_data[0].keys()]
# print intercept,slope
# print get_regression_predictions([2650], intercept, slope)
#
# plt.scatter([point['sqft_living'] for point in train_data], output)
# plt.plot([0, 14000], [simple_weights[0], simple_weights[1] * 14000], 'k-')
#
# plt.ylim(0,max(output))
# # plt.xlim(0,max(sqft_vals))
#
# plt.xlabel('sqft')
# plt.ylabel('price USD')
# plt.title('sqft vs. price')
# plt.show()
|
ericflo/awesomestream | awesomestream/repl.py | Python | bsd-3-clause | 391 | 0.007673 | import code
import sys
from awesomestream.jsonrpc import Client
def main():
try:
host = sys.argv[1]
ex | cept IndexError:
host = 'http://localhost:9997/'
banner = """>>> from awesomestream.jsonrpc import Client
>>> c = Client('%s')""" % (host,)
c = Client(host)
code.interact(banner, local={'Client': Client, 'c': c})
if __ | name__ == '__main__':
main() |
twisted/mantissa | xmantissa/test/historic/test_developerapplication1to2.py | Python | mit | 449 | 0.004454 | fr | om axiom.test.historic import stubloader
from xmantissa.webadmin import DeveloperApplication
from xmantissa.webapp import PrivateApplication
class DATestCase(stubloader.StubbedTest):
def testUpgrade(self):
"""
Ensure upgraded fields refer to correct items.
"""
self.assertEqual(self.store.findUnique(DeveloperApplication).privateApplication,
| self.store.findUnique(PrivateApplication))
|
arielj/danceinstitute | models/installment.py | Python | gpl-2.0 | 8,146 | 0.021728 | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
import re
from datetime import datetime, date
from decimal import Decimal
from translations import _t, _a
from model import Model
from lib.query_builder import Query
from lib.money import Money
import payment
import membership
import package
import settings
class Installment(Model):
table = 'installments'
fields_for_save = ['year','month','membership_id','amount', 'status']
default_order = 'year ASC, month ASC'
def __init__(self, data = {}):
self._year = datetime.today().year
self.month = 0
self.membership_id = None
self._membership = None
self._amount = 0
self._payments = None
self._status = 'waiting'
self.ignore_recharge = False
self.ignore_second_recharge = False
Model.__init__(self, data)
@property
def year(self):
return self._year
@year.setter
def year(self, value):
try:
self._year = int(value)
except:
self._year = 0
def to_label(self):
return self.month_name() + " " + str(self.year)
@property
def amount(self):
return self._amount/100
@amount.setter
def amount(self,value):
try:
v = int(Decimal(value)*100)
except:
v = 0
self._amount = v
def description(self):
return self.membership.klass_or_package.name + ' ' + self.month_name() + ' ' + str(self.year)
def paid(self):
return sum(map(lambda p: p.amount, self.payments),0)
def is_paid(self):
return self._status != 'waiting'
def total(self, ignore_recharge = None, ignore_second_recharge = None):
if ignore_recharge is not None: self.ignore_recharge = ignore_recharge
if ignore_second_recharge is not None: self.ignore_second_recharge = ignore_second_recharge
return self.amount+self.get_recharge()
def get_recharge(self, after_day = None, recharge_value = None, second_recharge_value = None):
sets = settings.Settings.get_settings()
if after_day is None: after_day = sets.recharge_after
if recharge_value is None: recharge_value = sets.recharge_value
if second_recharge_value is None: second_recharge_value = sets.second_recharge_value
recharge = 0
sets = settings.Settings.get_settings()
today = self.__class__._today().date()
beginning_of_month = date(today.year, today.month, 1)
if self._status != 'paid':
rv = ''
if second_recharge_value != '' and self.date() < beginning_of_month and not self.ignore_second_recharge:
rv = second_recharge_value
elif recharge_value != '' and self.date(after_day) < today and not self.ignore_recharge:
rv = recharge_value
if rv != '':
if re.match('^\d+%$',rv):
recharge = self.amount*(int(rv[0:-1]))/100
elif re.match('^\d+$',rv):
recharge = int(rv)
return recharge
def date(self, after_day = None):
if after_day is None: after_day = settings.Settings.get_settings().recharge_after
return datetime.strptime(str(self.year)+"-"+str(self.month+1)+"-"+str(after_day),'%Y-%m-%d').date()
def detailed_total(self):
if self._status != 'paid_with_interests':
recharge = self.get_recharge()
recharge = '(+'+str(recharge)+')' if recharge > 0 else ''
else:
recharge = '(+'+str(self.paid() - self.amount)+')'
return '$'+str(self.amount)+recharge
def detailed_to_pay(self):
return '$'+str(self.to_pay())
def to_pay(self, ignore_recharge = None, ignore_second_recharge = None):
if ignore_recharge is not None: self.ignore_recharge = ignore_recharge
if ignore_second_recharge is not None: self.ignore_second_recharge = ignore_second_recharge
return self.total()-self.paid()
def month_name(self):
return _t('months')[self.month]
@property
def status(self):
return _a(self.cls_name(), self._status)
@status.setter
def status(self, value):
self._status = value
@property
def membership(self):
if self.membership_id and self._membership is None:
self._membership = membership.Membership.find(self.membership_id)
return self._membership
@membership.setter
def membership(self, value):
self.membership_id = None if value is None else value.id
self._membership = value
@property
def payments(self):
if self._payments is None: self._payments = payment.Payment.for_installment(self.id).do_get()
return self._payments
def to_db(self):
return {'year': self.year, 'month': self.month, 'membership_id': self.membership_id, 'amount': self.amount, 'status': self._status}
def _is_valid(self):
self.validate_numericallity_of('month', great_than_or_equal = 0, less_than_or_equal = 11)
self.validate_numericallity_of('amount', great_than_or_equal = 0, only_integer = False)
def add_payment(self, data = None):
if data is None: data = {}
if 'ignore_recharge' in data: self.ignore_recharge = data['ignore_recharge']
if 'amount' not in data: data['amount'] = self.to_pay()
amount = Money(data['amount'])
if amount <= self.to_pay():
data['installment_id'] = self.id
p = payment.Payment(data)
p.user = self.get_student()
if p.save():
self.update_status()
return p
else:
return p.full_errors()
else:
return "No se puede agregar un pago con mayor valor que el resto a pagar. Saldo: " + str(self.to_pay()) + ", Ingresado: " + str(amount)
def update_status(self):
self._status = 'waiting'
self._payments = None
if int(self.to_pay()) == 0:
if self.get_recharge() > 0 and self.ignore_recharge is False:
| self._status = 'paid_with_interes | ts'
else:
self._status = 'paid'
self.save()
def get_student_id(self):
s = self.get_student()
return s.id if s else None
def get_student(self):
return self.membership.student
def payments_details(self):
return "\n".join(map(lambda p: p.to_s(), self.payments))
def build_payment(self, data = {}):
p = payment.Payment(data)
p.installment = self
self.payments.append(p)
return p
@classmethod
def for_membership(cls,membership_id):
return cls.where('membership_id', membership_id)
def before_delete(self):
for p in self.payments:
p.description = p.description
p.installment = None
p.save(validate=False)
return True
@classmethod
def for_klass(cls, klass, q = None):
if q is None: q = Query(cls)
where = 'memberships.for_id = :klass_id AND memberships.for_type = "Klass"'
args = {'klass_id': klass.id}
packages = package.Package.with_klass(klass)
if packages.anything():
p_ids = ','.join(map(lambda p: str(p.id), packages))
where = '('+where+') OR (memberships.for_id IN ({0}) AND memberships.for_type = "Package")'.format(p_ids)
return q.set_join('LEFT JOIN memberships ON memberships.id = installments.membership_id').where(where,args)
@classmethod
def only_active_users(cls, q = None):
if q is None: q = Query(cls)
return q.set_join('LEFT JOIN memberships ON memberships.id = installments.membership_id LEFT JOIN users ON memberships.student_id = users.id').where('users.inactive = 0')
@classmethod
def overdues(cls, recharge_after = None, q = None):
if q is None: q = Query(cls)
today = cls._today()
if recharge_after is None: recharge_after = settings.Settings.get_settings().recharge_after
month = today.month-1
year = today.year
if today.day <= recharge_after: month = month-1
if month == -1:
month = 11
year = year-1
return q.where('status = "waiting" AND ((year = :year AND month <= :month) OR year < :year)', {'year': year, 'month': month})
@classmethod
def to_pay_for(cls,user):
today = cls._today()
w = 'status = "waiting" AND (memberships.student_id = :student_id OR users.family = :family)'
args = {'student_id': user.id, 'family': user.family}
return cls.where(w,args).set_join('LEFT JOIN memberships ON memberships.id = installments.membership_id LEFT JOIN users ON memberships.student_id = users.id').order_by('year ASC, month ASC')
@classmethod
def _today(cls):
return datetime.today()
|
ivanromakh/openprocurement.tender.limited | setup.py | Python | apache-2.0 | 1,687 | 0.000593 | from setuptools import setup, find_packages
import os
version = '2.3.14'
requires = [
'setuptools',
'openprocurement.api>=2.3',
'openprocurement.tender.openua',
]
test_requires = requires + [
'webtest',
'python-coveralls',
]
docs_requires = requires + [
'sphinxcontrib-httpdomain',
]
entry_points = {
'openprocurement.api.plugins': [
'reporting = openprocurement.tender.limited:includeme',
'negotiation = openprocurement.tender.limited:includeme_negotiation',
'negotiation.quick = openprocurement.tender.limited:includeme_negotiation_quick'
]
}
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.rst')) as f:
README = f.read()
setup(name='openprocurement.tender.limited',
version=version,
description="",
long_description=README,
# Get more strings from
# http://pypi.python.org/pypi?:action=list_classifiers
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
],
keywords="web | services",
author='Quintagroup, Ltd.',
author_email='inf | o@quintagroup.com',
url='https://github.com/openprocurement/openprocurement.tender.limited',
license='Apache License 2.0',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['openprocurement', 'openprocurement.tender'],
include_package_data=True,
zip_safe=False,
install_requires=requires,
extras_require={'test': test_requires, 'docs': docs_requires},
test_suite="openprocurement.tender.limited.tests.main.suite",
entry_points=entry_points)
|
rohitranjan1991/home-assistant | homeassistant/components/broadlink/const.py | Python | mit | 728 | 0 | """Constants."""
from homeassistant.const import Platform
DOMAIN = "broadlink"
DOMAINS_AND_TYPES = {
Platform.REMOTE: {"RM4MINI", "RM4PRO", "RMMINI", "RMMINIB", "RMPRO"},
Platform.SENSOR: {
| "A1",
"RM4MINI", |
"RM4PRO",
"RMPRO",
"SP2S",
"SP3S",
"SP4",
"SP4B",
},
Platform.SWITCH: {
"BG1",
"MP1",
"RM4MINI",
"RM4PRO",
"RMMINI",
"RMMINIB",
"RMPRO",
"SP1",
"SP2",
"SP2S",
"SP3",
"SP3S",
"SP4",
"SP4B",
},
Platform.LIGHT: {"LB1", "LB2"},
}
DEVICE_TYPES = set.union(*DOMAINS_AND_TYPES.values())
DEFAULT_PORT = 80
DEFAULT_TIMEOUT = 5
|
brettchien/PyBLEWrapper | pyble/osx/IOBluetooth.py | Python | mit | 2,261 | 0.000885 | import objc
# Load IOBluetooth
objc.loadBundle('IOBluetooth', globals(),
bundle_path=objc.pathForFramework("IOBluetooth.framework"))
# IOBluetooth CBCentralManager state CONSTS
CBCentralManagerStateUnkown = 0
CBCentralManagerStateResetting = 1
CBCentralManagerStateUnsupported = 2
CBCentralManagerStateUnauthorized = 3
CBCentralManagerStatePoweredOff = 4
CBCentralManagerStatePoweredOn = 5
# CBCentralManager option keys
CBCentralManagerScanOptionAllowDuplicatesKey = u"kCBScanOptionAllowDuplicates"
CBConnectPeripheralOptionNotifyOnDisconnectionKey = u"kCBConnectOptionNotifyOnDisconnection"
# CBCharacteristicWriteType CONSTS
CBCharacteristicWriteWithResponse = 0
CBCharacteristicWriteWithoutResponse = 1
# ADvertisement Data Retrieval Keys
CBAdvertisementDataLocalNameKey = u"kCBAdvDataLocalName"
CBAdvertisementDataManufacturerDataKey = u"kCBAdvDataManufacturerData"
CBAdvertisementDataServiceDataKey = u"kCBAdvDataServiceData"
CBAdvertisementDataServiceUUIDsKey = u"kCBAdvDataServiceUUIDs"
CBAdvertisementDataOverflowServiceUUIDsKey = u"kCBAdvDataOverflowService"
CBAdvertisementDataTxPowerLevelKey = u"kCBAdvDataTxPowerLevel"
CBAdvertisementDataIsConnectable = u"kCBAdvDataIsConnectable"
CBAdvertisementDataSolicitedServiceUUIDsKey = u"kCBAdvDataSolicitedServiceUUIDs"
# CBError Constants
CBErrorUnknown = 0
CBErrorInvalidParameters = 1
CBErrorInvalidHandle = 2
CBErrorNotConnected = 3
CBErrorOutOfSpace = 4
CBErrorOperationCancelled = 5
CBErrorConnectionTimeout = 6
CBErrorPeripheralDisconnected = 7
CBErrorUUIDNotAllowed = 8
CBErrorAlreadyAdvertising = 9
# CBATTError Constants
CBATTErrorSuccess = 0x00
CBATTErrorInvalidHandle = 0x01
CBATTErrorReadNotPermitted = 0x02
CBATTErrorWriteNotPermitted = 0x03
CBATTErrorInvalidPdu = 0x04
CBATTErrorInsuffic | ientAuthentication = 0x05
CBATTErrorRequestNotSupported = 0x06
CBATTErrorInvalidOffset = 0x07
CBATTErrorInsufficientAuthorization = 0x08
CBATTErrorPrepareQueueFull = 0x09
CBATTErrorAttributeNotFound = 0x0A
CBATTErrorAttributeNotLong = 0x0B
CBATTErrorInsufficientEncryptionKeySize = 0x0C
CBATTErrorInvalidAttributeValueLength = 0x0D |
CBATTErrorUnlikelyError = 0x0E
CBATTErrorInsufficientEncription = 0x0F
CBATTErrorUnsupportedGroupType = 0x10
CBATTErrorInsufficientResources = 0x11
|
Joble/CumulusCI | cumulusci/core/keychain.py | Python | bsd-3-clause | 12,877 | 0.002485 | import base64
import json
import os
import pickle
from Crypto import Random
from Crypto.Cipher import AES
from cumulusci.core.config import BaseConfig
from cumulusci.core.config import ConnectedAppOAuthConfig
from cumulusci.core.config import OrgConfig
from cumulusci.core.config import ScratchOrgConfig
from cumulusci.core.config import ServiceConfig
from cumulusci.core.exceptions import OrgNotFound
from cumulusci.core.exceptions import ServiceNotConfigured
from cumulusci.core.exceptions import ServiceNotValid
from cumulusci.core.exceptions import KeychainConnectedAppNotFound
class BaseProjectKeychain(BaseConfig):
encrypted = False
def __init__(self, project_config, key):
super(BaseProjectKeychain, self).__init__()
self.config = {
'orgs': {},
'app': None,
'services': {},
}
self.project_config = project_config
self.key = key
self._load_keychain()
def _load_keychain(self):
""" Subclasses can override to implement logic to load the keychain """
pass
def change_key(self, key):
""" re-encrypt stored services, orgs, and the connected_app
with the new key """
connected_app = self.get_connected_app()
services = {}
for service_name in self.list_services():
services[service_name] = self.get_service(service_name)
| orgs = {}
for org_name in self.list_orgs():
orgs[org_name] = self.get_org(org_name)
self.key = key
if connected_app:
self.set_connected_app(connect | ed_app)
if orgs:
for org_name, org_config in orgs.items():
self.set_org(org_name, org_config)
if services:
for service_name, service_config in services.items():
self.set_service(service_name, service_config)
def set_connected_app(self, app_config, project=False):
""" store a connected_app configuration """
self._set_connected_app(app_config, project)
self._load_keychain()
def _set_connected_app(self, app_config, project):
self.app = app_config
def get_connected_app(self):
""" retrieve the connected app configuration """
return self._get_connected_app()
def _get_connected_app(self):
return self.app
def set_org(self, name, org_config, global_org=False):
if isinstance(org_config, ScratchOrgConfig):
org_config.config['scratch'] = True
self._set_org(name, org_config, global_org)
self._load_keychain()
def _set_org(self, name, org_config, global_org):
self.orgs[name] = org_config
def get_default_org(self):
""" retrieve the name and configuration of the default org """
for org in self.list_orgs():
org_config = self.get_org(org)
if org_config.default:
return org, org_config
return None, None
def set_default_org(self, name):
""" set the default org for tasks by name key """
org = self.get_org(name)
self.unset_default_org()
org.config['default'] = True
self.set_org(name, org)
def unset_default_org(self):
""" unset the default orgs for tasks """
for org in self.list_orgs():
org_config = self.get_org(org)
if org_config.default:
del org_config.config['default']
self.set_org(org, org_config)
def get_org(self, name):
""" retrieve an org configuration by name key """
if name not in self.orgs:
self._raise_org_not_found(name)
return self._get_org(name)
def _get_org(self, name):
return self.orgs.get(name)
def _raise_org_not_found(self, name):
raise OrgNotFound('Org named {} was not found in keychain'.format(name))
def list_orgs(self):
""" list the orgs configured in the keychain """
orgs = self.orgs.keys()
orgs.sort()
return orgs
def set_service(self, name, service_config, project=False):
""" Store a ServiceConfig in the keychain """
if name not in self.project_config.services:
self._raise_service_not_valid(name)
self._validate_service(name, service_config)
self._set_service(name, service_config, project)
self._load_keychain()
def _set_service(self, name, service_config, project):
self.services[name] = service_config
def get_service(self, name):
""" Retrieve a stored ServiceConfig from the keychain or exception
:param name: the service name to retrieve
:type name: str
:rtype ServiceConfig
:return the configured Service
"""
if name not in self.project_config.services:
self._raise_service_not_valid(name)
if name not in self.services:
self._raise_service_not_configured(name)
return self._get_service(name)
def _get_service(self, name):
return self.services.get(name)
def _validate_service(self, name, service_config):
missing_required = []
attr_key = 'services__{0}__attributes'.format(name)
for atr, config in getattr(self.project_config, attr_key).iteritems():
if config.get('required') is True and not getattr(service_config, atr):
missing_required.append(atr)
if missing_required:
self._raise_service_not_valid(name)
def _raise_service_not_configured(self, name):
raise ServiceNotConfigured(
'Service named {} is not configured for this project'.format(name)
)
def _raise_service_not_valid(self, name):
raise ServiceNotValid('Service named {} is not valid for this project'.format(name))
def list_services(self):
""" list the services configured in the keychain """
services = self.services.keys()
services.sort()
return services
class EnvironmentProjectKeychain(BaseProjectKeychain):
""" A project keychain that stores org credentials in environment variables """
encrypted = False
org_var_prefix = 'CUMULUSCI_ORG_'
app_var = 'CUMULUSCI_CONNECTED_APP'
service_var_prefix = 'CUMULUSCI_SERVICE_'
def _load_keychain(self):
self._load_keychain_app()
self._load_keychain_orgs()
self._load_keychain_services()
def _load_keychain_app(self):
app = os.environ.get(self.app_var)
if app:
self.app = ConnectedAppOAuthConfig(json.loads(app))
def _load_keychain_orgs(self):
for key, value in os.environ.items():
if key.startswith(self.org_var_prefix):
org_config = json.loads(value)
if org_config.get('scratch'):
self.orgs[key[len(self.org_var_prefix):]] = ScratchOrgConfig(json.loads(value))
else:
self.orgs[key[len(self.org_var_prefix):]] = OrgConfig(json.loads(value))
def _load_keychain_services(self):
for key, value in os.environ.items():
if key.startswith(self.service_var_prefix):
self.services[key[len(self.service_var_prefix):]] = ServiceConfig(json.loads(value))
BS = 16
pad = lambda s: s + (BS - len(s) % BS) * chr(BS - len(s) % BS)
unpad = lambda s : s[0:-ord(s[-1])]
class BaseEncryptedProjectKeychain(BaseProjectKeychain):
""" Base class for building project keychains that use AES encryption for securing stored org credentials """
encrypted = True
def _set_connected_app(self, app_config, project):
encrypted = self._encrypt_config(app_config)
self._set_encrypted_connected_app(encrypted, project)
def _set_encrypted_connected_app(self, encrypted, project):
self.app = encrypted
def _get_connected_app(self):
if self.app:
return self._decrypt_config(ConnectedAppOAuthConfig, self.app)
def _get_service(self, name):
return self._decrypt_config(ServiceConfig, self.services[name])
def _set_service(self, service, service_config, project):
encrypted = se |
sesh/flexx | setup.py | Python | bsd-2-clause | 2,298 | 0.000435 | # -*- coding: utf-8 -*-
""" Flexx setup script.
"""
import os
from os import path as op
try:
# use setuptools namespace, allows for "develop"
import setuptools # noqa, analysis:ignore
except ImportError:
pass # it's not essential for installation
from distutils.core import setup
name = 'flexx'
description = "Pure Python toolkit for creating GUI's using web technology."
# Get version and docstring
__version__ = None
__doc__ = ''
docStatus = 0 # Not started, in progress, done
initFile = os.path.join(os.path.dirname(__file__), name, '__init__.py')
for line in open(initFile).readlines():
if (line.startswith('version_info') or line.startswith('__version__')):
exec(line.strip())
elif line.startswith('"""'):
if docStatus == 0:
docStatus = 1
line = line.lstrip('"')
elif docStatus == 1:
docStatus = 2
if docStatus == 1:
__doc__ += line
def package_tree(pkgroot):
path = os.path.dirname(__file__)
subdirs = [os.path.relpath(i[0], path).replace(os.path.sep, '.')
for i in os.walk(os.path.join(path, pkgroot))
if '__init__.py' in i[2]]
return subdirs
setup(
name=name,
version=__version__,
author='Flexx contributors',
author_email='almar.klein@gmail.com',
license='(new) BSD',
url='http://flexx.readthedocs.org',
download_url='https://pypi.python.org/pypi/flexx',
keywords="ui design, web runtime, pyscript, reactive programming, FRP",
description=description,
long_description=__doc__,
platforms='any',
provides=[name],
install_requires=[],
packages=package_tree(name),
package_dir={name: name},
package_data={},
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Intended Audience :: Education',
'Intended Audienc | e :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: | Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
#'Programming Language :: Python :: 2.7', # not yet supported
'Programming Language :: Python :: 3.4',
],
)
|
AndKe/MAVProxy | MAVProxy/modules/mavproxy_link.py | Python | gpl-3.0 | 44,637 | 0.003562 | #!/usr/bin/env python
'''enable run-time addition and removal of master link, just like --master on the cnd line'''
''' TO USE:
link add 10.11.12.13:14550
link list
link remove 3 # to remove 3rd output
'''
from pymavlink import mavutil
import time, struct, math, sys, fnmatch, traceback, json, os
from MAVProxy.modules.lib import mp_module
from MAVProxy.modules.lib import mp_util
if mp_util.has_wxpython:
from MAVProxy.modules.lib.mp_menu import *
from MAVProxy.modules.lib.wx_addlink import MPMenulinkAddDialog
dataPackets = frozenset(['BAD_DATA','LOG_DATA'])
delayedPackets = frozenset([ 'MISSION_CURRENT', 'SYS_STATUS', 'VFR_HUD',
'GPS_RAW_INT', 'SCALED_PRESSURE', 'GLOBAL_POSITION_INT',
'NAV_CONTROLLER_OUTPUT' ])
activityPackets = frozenset([ 'HEARTBEAT', 'GPS_RAW_INT', 'GPS_RAW', 'GLOBAL_POSITION_INT', 'SYS_STATUS' ])
radioStatusPackets = frozenset([ 'RADIO', 'RADIO_STATUS'])
preferred_ports = [
'*FTDI*',
"*Arduino_Mega_2560*",
"*3D*",
"*USB_to_UART*",
'*Ardu*',
'*PX4*',
'*Hex_*',
'*Holybro_*',
'*mRo*',
'*FMU*',
'*Swift-Flyer*',
]
class LinkModule(mp_module.MPModule):
def __init__(self, mpstate):
super(LinkModule, self).__init__(mpstate, "link", "link control", public=True, multi_vehicle=True)
self.add_command('link', self.cmd_link, "link control",
["<list|ports|resetstats>",
'add (SERIALPORT)',
'attributes (LINK) (ATTRIBUTES)',
'remove (LINKS)',
'dataratelogging (DLSTATE)',
'hl (HLSTATE)'])
self.add_command('vehicle', self.cmd_vehicle, "vehicle control")
self.add_command('alllinks', self.cmd_alllinks, "send command on all links", ["(COMMAND)"])
self.no_fwd_types = set()
self.no_fwd_types.add("BAD_DATA")
self.add_completion_function('(SERIALPORT)', self.complete_serial_ports)
self.add_completion_function('(LINKS)', self.complete_links)
self.add_completion_function('(LINK)', self.complete_links)
self.add_completion_function('(HLSTATE)', self.complete_hl)
self.add_completion_function('(DLSTATE)', self.complete_dl)
self.last_altitude_announce = 0.0
self.vehicle_list = set()
self.high_latency = False
self.datarate_logging = False
self.datarate_logging_timer = mavutil.periodic_event(1)
self.old_streamrate = 0
self.old_streamrate2 = 0
self.menu_added_console = False
if mp_util.has_wxpython:
self.menu_rm = MPMenuSubMenu('Remove', items=[])
self.menu = MPMenuSubMenu('Link',
items=[MPMenuItem('Add...', 'Add...', '# link add ', handler=MPMenulinkAddDialog()),
self.menu_rm,
MPMenuItem('Ports', 'Ports', '# link ports'),
MPMenuItem('List', 'List', '# link list'),
MPMenuItem('Status', 'Status', '# link')])
self.last_menu_update = 0
def idle_task(self):
'''called on idle'''
if mp_util.has_wxpython:
if self.module('console') is not None:
if not self.menu_added_console:
self.menu_added_console = True
# we don't dynamically update these yet due to a wx bug
self.menu_rm.items = [ MPMenuItem(p, p, '# link remove %s' % p) for p in self.complete_links('') ]
self.module('console').add_menu(self.menu)
else:
self.menu_added_console = False
for m in self.mpstate.mav_master:
m.source_system = self.settings.source_system
m.mav.srcSystem = m.source_system
m.mav.srcComponent = self.settings.source_component
# don't let pending statustext wait forever for last chunk:
for src in self.status.statustexts_by_sysidcompid:
msgids = list(self.status.statustexts_by_sysidcompid[src].keys())
for msgid in msgids:
pending = self.status.statustexts_by_sysidcompid[src][msgid]
if time.time() - pending.last_chunk_time > 1:
self.emit_accumulated_statustext(src, msgid, pending)
# datarate logging if enabled, at 1 Hz
if self.datarate_logging_timer.trigger() and self.datarate_logging:
with open(self.datarate_logging, 'a') as logfile:
for master in self.mpstate.mav_master:
highest_msec_key = (self.target_system, self.target_component)
linkdelay = (self.status.highest_msec.get(highest_msec_key, 0) - master.highest_msec.get(highest_msec_key, 0))*1.0e-3
logfile.write(str(time.strftime("%H:%M:%S")) + "," +
str(self.link_label(master)) + "," +
str(master.linknum) + "," +
str(self.status.counters['MasterIn'][master.linknum]) + "," +
str(self.status.bytecounters['MasterIn'][master.linknum].total()) + "," +
str(linkdelay) + "," +
str(100 * round(master.packet_loss(), 3)) + "\n")
def complete_serial_ports(self, text):
'''return list of serial ports'''
ports = mavutil.auto_detect_serial(preferred_list=preferred_ports)
return [ p.device for p in ports ]
def complete_hl(self, text):
'''return list of hl options'''
return [ 'on', 'off' ]
def complete_dl(self, text):
'''return list of datarate_logging options'''
return [ 'on', 'off' ]
def complete_links(self, text):
'''return list of links'''
try:
ret = [ m.address for m in self.mpstate.mav_master ]
for m in self.mpstate.mav_master:
ret.append(m.address)
if hasattr(m, 'label'):
ret.append(m.label)
return ret
except Exception as e:
print("Caught exception: %s" % str(e))
def cmd_link(self, args):
'''handle link commands'''
if len(args) < 1:
self.show_link()
elif args[0] == "list":
| self.cmd_link_list()
elif args[0] == "hl":
self.cmd_hl(args[1:])
elif args[0] == "dataratelogging":
self.cmd_dl(args[1:])
elif args[0] == "add":
if len(args) != 2:
print("Usage: link add LINK")
print('Usage: e.g. link add 127.0.0.1:9876')
print('Usage: e.g. link add 127.0.0.1: | 9876:{"label":"rfd900"}')
return
self.cmd_link_add(args[1:])
elif args[0] == "attributes":
if len(args) != 3:
print("Usage: link attributes LINK ATTRIBUTES")
print('Usage: e.g. link attributes rfd900 {"label":"bob"}')
return
self.cmd_link_attributes(args[1:])
elif args[0] == "ports":
self.cmd_link_ports()
elif args[0] == "remove":
if len(args) != 2:
print("Usage: link remove LINK")
return
self.cmd_link_remove(args[1:])
elif args[0] == "resetstats":
self.reset_link_stats()
else:
print("usage: link <list|add|remove|attributes|hl|dataratelogging|resetstats>")
def cmd_dl(self, args):
'''Toggle datarate logging'''
if len(args) < 1:
print("Datarate logging is " + ("on" if self.datarate_logging else "off"))
return
elif args[0] == "on":
self.datarate_logging = os.path.join(self.logdir, "dataratelog.csv")
print("Datarate Logging ON, logfile: " + self.datarate_logging)
# Open a new file handle (don't append) for logging
with open(self.datarate_loggi |
olivierdalang/stdm | third_party/sqlalchemy/engine/url.py | Python | gpl-2.0 | 8,113 | 0.000123 | # engine/url.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Provides the :class:`~sqlalchemy.engine.url.URL` class which encapsulates
information about a database connection specification.
The URL object is created automatically when
:func:`~sqlalchemy.engine.create_engine` is called with a string
argument; alternatively, the URL is a public-facing construct which can
be used directly and is also accepted directly by ``create_engine()``.
"""
import re
from .. import exc, util
from . import Dialect
from ..dialects import registry
class URL(object):
"""
Represent the components of a URL used to connect to a database.
This object is suitable to be passed directly to a
:func:`~sqlalchemy.create_engine` call. The fields of the URL are parsed
from a string by the :func:`.make_url` function. the string
format of the URL is an RFC-1738-style string.
All initialization parameters are available as public attributes.
:param drivername: the name of the database backend.
This name will correspond to a module in sqlalchemy/databases
or a third party plug-in.
:param username: The user name.
:param password: database password.
:param host: The name of t | he host.
:param port: The port number.
:param database: The database name.
:param query: A dictionary of options to be passed to the
dialect and/or the DBAPI upon connect.
"""
def __init__(se | lf, drivername, username=None, password=None,
host=None, port=None, database=None, query=None):
self.drivername = drivername
self.username = username
self.password = password
self.host = host
if port is not None:
self.port = int(port)
else:
self.port = None
self.database = database
self.query = query or {}
def __to_string__(self, hide_password=True):
s = self.drivername + "://"
if self.username is not None:
s += _rfc_1738_quote(self.username)
if self.password is not None:
s += ':' + ('***' if hide_password
else _rfc_1738_quote(self.password))
s += "@"
if self.host is not None:
if ':' in self.host:
s += "[%s]" % self.host
else:
s += self.host
if self.port is not None:
s += ':' + str(self.port)
if self.database is not None:
s += '/' + self.database
if self.query:
keys = list(self.query)
keys.sort()
s += '?' + "&".join("%s=%s" % (k, self.query[k]) for k in keys)
return s
def __str__(self):
return self.__to_string__(hide_password=False)
def __repr__(self):
return self.__to_string__()
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return \
isinstance(other, URL) and \
self.drivername == other.drivername and \
self.username == other.username and \
self.password == other.password and \
self.host == other.host and \
self.database == other.database and \
self.query == other.query
def get_backend_name(self):
if '+' not in self.drivername:
return self.drivername
else:
return self.drivername.split('+')[0]
def get_driver_name(self):
if '+' not in self.drivername:
return self.get_dialect().driver
else:
return self.drivername.split('+')[1]
def get_dialect(self):
"""Return the SQLAlchemy database dialect class corresponding
to this URL's driver name.
"""
if '+' not in self.drivername:
name = self.drivername
else:
name = self.drivername.replace('+', '.')
cls = registry.load(name)
# check for legacy dialects that
# would return a module with 'dialect' as the
# actual class
if hasattr(cls, 'dialect') and \
isinstance(cls.dialect, type) and \
issubclass(cls.dialect, Dialect):
return cls.dialect
else:
return cls
def translate_connect_args(self, names=[], **kw):
"""Translate url attributes into a dictionary of connection arguments.
Returns attributes of this url (`host`, `database`, `username`,
`password`, `port`) as a plain dictionary. The attribute names are
used as the keys by default. Unset or false attributes are omitted
from the final dictionary.
:param \**kw: Optional, alternate key names for url attributes.
:param names: Deprecated. Same purpose as the keyword-based alternate
names, but correlates the name to the original positionally.
"""
translated = {}
attribute_names = ['host', 'database', 'username', 'password', 'port']
for sname in attribute_names:
if names:
name = names.pop(0)
elif sname in kw:
name = kw[sname]
else:
name = sname
if name is not None and getattr(self, sname, False):
translated[name] = getattr(self, sname)
return translated
def make_url(name_or_url):
"""Given a string or unicode instance, produce a new URL instance.
The given string is parsed according to the RFC 1738 spec. If an
existing URL object is passed, just returns the object.
"""
if isinstance(name_or_url, util.string_types):
return _parse_rfc1738_args(name_or_url)
else:
return name_or_url
def _parse_rfc1738_args(name):
pattern = re.compile(r'''
(?P<name>[\w\+]+)://
(?:
(?P<username>[^:/]*)
(?::(?P<password>.*))?
@)?
(?:
(?:
\[(?P<ipv6host>[^/]+)\] |
(?P<ipv4host>[^/:]+)
)?
(?::(?P<port>[^/]*))?
)?
(?:/(?P<database>.*))?
''', re.X)
m = pattern.match(name)
if m is not None:
components = m.groupdict()
if components['database'] is not None:
tokens = components['database'].split('?', 2)
components['database'] = tokens[0]
query = (
len(tokens) > 1 and dict(util.parse_qsl(tokens[1]))) or None
if util.py2k and query is not None:
query = dict((k.encode('ascii'), query[k]) for k in query)
else:
query = None
components['query'] = query
if components['username'] is not None:
components['username'] = _rfc_1738_unquote(components['username'])
if components['password'] is not None:
components['password'] = _rfc_1738_unquote(components['password'])
ipv4host = components.pop('ipv4host')
ipv6host = components.pop('ipv6host')
components['host'] = ipv4host or ipv6host
name = components.pop('name')
return URL(name, **components)
else:
raise exc.ArgumentError(
"Could not parse rfc1738 URL from string '%s'" % name)
def _rfc_1738_quote(text):
return re.sub(r'[:@/]', lambda m: "%%%X" % ord(m.group(0)), text)
def _rfc_1738_unquote(text):
return util.unquote(text)
def _parse_keyvalue_args(name):
m = re.match(r'(\w+)://(.*)', name)
if m is not None:
(name, args) = m.group(1, 2)
opts = dict(util.parse_qsl(args))
return URL(name, *opts)
else:
return None
|
tekton/django_react | django_react/urls.py | Python | mit | 924 | 0 | """django_react URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Ad | d an import: from blog import urls as blog_urls
2. Import the include() function: from django.conf.urls import url | , include
3. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import url, include
from django.contrib import admin
import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r"^$", views.index),
url(r"^comments", include("comments.urls"))
]
|
jokeregistry/jokeregistryweb | jokeregistryweb/claims/models.py | Python | gpl-2.0 | 1,263 | 0.000792 | from django.db i | mport models
from jokeregistryweb.jokes.models import Joke
class Claim(models.Model):
'''A claim of prior art (or infringement)'''
FILED = 0
APPROVED = 1
REJECTED = 2
STATUS_CHOICES = (
(FILED, 'Filed'),
(APPROVED, 'Approved'),
(REJECTED, 'Rejected')
)
infringing_joke = models.ForeignKey(
'jokes.Joke',
related_name='infringing_claim',
help_text='The infringing joke')
infringed_joke = mod | els.ForeignKey(
'jokes.Joke',
related_name='infringed_claim',
help_text='The original joke')
text = models.TextField(help_text='additional detail', null=True, blank=True)
status = models.IntegerField(choices=STATUS_CHOICES, default=FILED)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class Meta:
ordering = ('-created', '-updated')
def approve(self):
if self.infringed_joke.created < self.infringing_joke.created:
self.infringing_joke.parent = self.infringed_joke
self.infringing_joke.save()
self.status = Claim.APPROVED
self.save()
def reject(self):
self.status = Claim.REJECTED
self.save()
|
Comunitea/CMNT_004_15 | project-addons/purchase_order_import_ubl/tests/test_ubl_order_import.py | Python | agpl-3.0 | 1,222 | 0 | # © 2016-2017 Akretion (Alexis de Lattre <alexis.delattre@akretion.com>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo.tests.common import TransactionCase
from odoo.tools import file_open
i | mport base64
class TestUblOrderImport(TransactionCase):
def test_ubl_order_import(self):
tests = {
'quote-PO00004.pdf': {
'po_to_update': self.env.ref('purchase.purchase_order_4'),
'incoterm': self.env.ref('stock.incoterm_DDU'),
},
| }
poio = self.env['purchase.order.import']
for filename, res in tests.iteritems():
po = res['po_to_update']
f = file_open(
'purchase_order_import_ubl/tests/files/' + filename, 'rb')
quote_file = f.read()
wiz = poio.with_context(
active_model='purchase.order', active_id=po.id).create({
'quote_file': base64.b64encode(quote_file),
'quote_filename': filename,
})
f.close()
self.assertEqual(wiz.purchase_id, po)
wiz.update_rfq_button()
self.assertEqual(po.incoterm_id, res['incoterm'])
|
msreis/SigNetSim | signetsim/tests/test_project.py | Python | agpl-3.0 | 4,292 | 0.018406 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2017 Vincent Noel (vincent.noel@butantan.gov.br)
#
# This file is part of libSigNetSim.
#
# libSigNetSim is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# libSigNetSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with libSigNetSim. If not, see <http://www.gnu.org/licenses/>.
""" test_project.py
This file tests the creation, copy, deletion, modification and sharing of projects
"""
from django.test import TestCase, Client
from django.conf import settings
from signetsim.models import User, Project
from json import loads
class TestProjects(TestCase):
fixtures = ["users.json"]
def testCreateProject(self):
user = User.objects.filter(username='test_user')[0]
self.assertEqual(len(Project.objects.filter(user=user)), 0)
c = Client()
self.assertTrue(c.login(username='test_user', password='password'))
response_create_project = c.post('/', {
'action': 'save_project',
'modal_project_name': 'Project 1'
})
self.assertEqual(response_create_project.status_code, 200)
self.assertEqual(len(Project.objects.filter(user=user)), 1)
project = Project.objects.filter(user=user)[0]
self.assertEqual(project.name, "Project 1")
response_copy_project = c.post('/', {
'action': 'copy_folder | ',
'id': project.id
})
self.assertEqual(response_copy_project.status_code, 200)
self.assertEqual(len(Project.objects.filter(user=user)), 2)
other_project = None
for t_project in Project.objects.filter(user=user):
if t_project != project:
other_project = t_project
self.assertEqual(other_project.name, u"Project 1 (Copy)")
respo | nse_delete_project = c.post('/', {
'action': 'delete_folder',
'id': other_project.id
})
self.assertEqual(response_delete_project.status_code, 200)
self.assertEqual(len(Project.objects.filter(user=user)), 1)
self.assertEqual(Project.objects.filter(user=user)[0], project)
user_2 = User.objects.filter(username='test_user_2')[0]
self.assertEqual(len(Project.objects.filter(user=user_2)), 0)
response_send_project = c.post('/', {
'action': 'send_folder',
'modal_send_project_id': project.id,
'modal_send_project_username': 'test_user_2'
})
self.assertEqual(response_send_project.status_code, 200)
self.assertEqual(len(Project.objects.filter(user=user)), 1)
self.assertEqual(len(Project.objects.filter(user=user_2)), 1)
response_get_project = c.post('/json/get_project/', {
'id': project.id
})
self.assertEqual(response_get_project.status_code, 200)
json_response = loads(response_get_project.content.decode('utf-8'))
self.assertEqual(json_response['name'], u'Project 1')
self.assertEqual(json_response['public'], 0)
response_set_project_public = c.post('/', {
'action': 'save_project',
'modal_project_id': project.id,
'modal_project_name': "Public project",
'modal_project_access': 'on',
})
self.assertEqual(response_set_project_public.status_code, 200)
response_get_project = c.post('/json/get_project/', {
'id': project.id
})
self.assertEqual(response_get_project.status_code, 200)
json_response = loads(response_get_project.content.decode('utf-8'))
self.assertEqual(json_response['name'], u'Public project')
self.assertEqual(json_response['public'], 1)
response_set_project_private = c.post('/', {
'action': 'save_project',
'modal_project_id': project.id,
'modal_project_name': "Private project",
})
self.assertEqual(response_set_project_private.status_code, 200)
response_get_project = c.post('/json/get_project/', {
'id': project.id
})
self.assertEqual(response_get_project.status_code, 200)
json_response = loads(response_get_project.content.decode('utf-8'))
self.assertEqual(json_response['name'], u'Private project')
self.assertEqual(json_response['public'], 0) |
funkotron/django-lean | setup.py | Python | bsd-3-clause | 2,630 | 0.001901 | # -*- coding: utf-8 -*-
"""A/B Testing for Django
django-lean allows you to perform split-test experiments on your users.
In brief, this involves exposing 50% of your users to one implementation
and 50% to another, then comparing the performance of these two groups
with regards to certain metrics.
"""
from distutils.core import setup
description, long_description = __doc__.split('\n\n', 1)
setup(
name='django-lean',
version='0.15',
author='Akoha, Inc.',
author_email='django-lean@akoha.com',
description=('A framework for performing and analyzing split-test ' +
'experiments in Django applications.'),
long_description=('django-lean aims to be a collection of tools for ' +
'Lean Startups using the Django platform. Currently ' +
'it provides a framework for implementing split-test ' +
'experiments in JavaScript, Python, or Django template ' +
'code along with administrative views for analyzing ' +
'the results of those experiments.'),
license='BSD',
platforms=['any'],
url='http://bitbucket.org/akoha/django-lean/wiki/Home',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules'
],
package_dir = {'': 'src'},
packages=[
'django_lean',
'django_lean.experiments',
'django_lean.experiments.management',
'django_lean.experiments.management.commands',
'django_lean.experiments.migrations',
'django_lean.experiments.templatetags',
'django_lean.experiments.tests',
'django_lean.lean_analytics',
'django_lean.lean_retention',
'django_lean.lean_retention.migrations',
'dja | ngo_lean.lean_retention.tests',
'django_lean.lean_segments',
'django_lean.lean_segments.management',
'django_lean.lean_segments.management.commands',
],
package_data={
'django_lean.experiments': ['media/experiments/*.js',
'templates/experiments/*.html',
| 'templates/experiments/include/*.html'],
'django_lean.experiments.tests': ['data/*.json'],
},
install_requires=['django >= 1.0'],
tests_require=['BeautifulSoup', 'mox'],
)
|
FredrikAppelros/ransac | test.py | Python | mit | 1,575 | 0.006984 | #! /usr/bin/env python
import numpy as np
import gc
import matplotlib.pyplot as plt
from random import seed, sample, randint
from ransac import LineModel, ransac
from time import time
random_seed = 0
num_iterations = 100
num_samples = 1000
noise_ratio = 0.8
num_noise = int(noise_ratio * num_samples)
def setup():
global data, model
seed(random_seed)
X = np.asarray(range(num_samples))
Y = 2 * X
noise = [randint(0, 2 * (num_samples - 1)) for i in xrange(num_noise)]
Y[sample(xrange(len(Y)), num_noise)] = noise
data = np.asarray([X, Y]).T
model = LineModel()
plt.plot(X, Y, 'bx')
def run():
global params, residual, mean_time
gc.disable()
start_time = time()
for i in xrange(num_iterations):
try:
(params, inliers, residual) = ransac(data, model, 2, (1 - noise_ratio) * num_samples)
| except ValueError:
pass
end_time = time()
mean_time = (end_time - start_time) / num_iterations
gc.enable()
def summary():
if params:
print ' Parameters '.center(40, '=')
print params
print ' Residual '.center(40, '=')
print residual
print ' Time '.center(40, '=')
print '%.1f msecs mean time spent per call' % (1000 * mean_time)
X = np.asarray([0, num_samples - 1])
Y = params[0] * X + params[1]
plt.plot( | X, Y, 'k-')
else:
print 'RANSAC failed to find a sufficiently good fit for the data.'
plt.show()
if __name__ == '__main__':
setup()
run()
summary()
|
pol3waf/svgEd | svged/test/random_draw_test.py | Python | gpl-2.0 | 213 | 0.032864 | ##
# This | test shall | simply spawn random shapes and print them to a svg file.
##
from svged import shapes
def run():
print(shapes.writeHeader())
print(shapes.line(10,10,200,200))
print(shapes.writeEnd())
|
vorushin/FamilyFeed | settings.py | Python | mit | 5,785 | 0.001556 | # Django settings for FamilyFeed project.
import os.path
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'familyfeed',
'USER': 'familyfeed',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDI | A_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
S | TATIC_URL = '/static/'
COFFEESCRIPT_OUTPUT_DIR = 'coffee'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/admin/media/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '(n^q2(t-x4^n_p2hftw43c57n9@!u&c3@aoj=5%&*!q*mucs)u'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = ('django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.contrib.messages.context_processors.messages',
'django.contrib.auth.context_processors.auth',
)
ROOT_URLCONF = 'FamilyFeed.urls'
TEMPLATE_DIRS = (
os.path.join(PROJECT_ROOT, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'south',
'coffeescript',
'sources',
'profiles',
'timeline',
)
SKIP_SOUTH_TESTS = True
SOUTH_TESTS_MIGRATE = False
LOGIN_URL = '/login/'
LOGIN_REDIRECT_URL = '/'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
try:
from local_settings import *
except ImportError:
import sys
sys.stderr.write('Unable to read local_settings.py\n')
# Convenient defaults
DEBUG = False
try:
DATABASES.update(LOCAL_DATABASES)
except NameError:
pass
try:
INSTALLED_APPS += ADDITIONAL_APPS
except NameError:
pass
try:
MIDDLEWARE_CLASSES += ADDITIONAL_MIDDLEWARE
except NameError:
pass
if not hasattr(globals(), 'TEMPLATE_DEBUG'):
TEMPLATE_DEBUG = DEBUG
if not hasattr(globals(), 'THUMBNAIL_DEBUG'):
THUMBNAIL_DEBUG = DEBUG
|
rosskarchner/wendy_lucy | wendy_lucy/storybits/urls.py | Python | gpl-3.0 | 332 | 0.003012 | from django.conf.urls import url
from django.views.generic.base import RedirectView
from .views import SceneView, PoiView
urlpatterns = [
url(r'^$', RedirectView.as_view(url='/1')),
url(r'^(?P<pk>\d+)/$', SceneView.as_view(), name='detail'),
url(r'^(? | P<scene_pk>\d+)/(?P<tag>.+)/$', PoiView.as_view(), name | ='poi'),
]
|
Cnlouds/pentest | sshbrute/Util.py | Python | mit | 1,119 | 0.01966 | '''
Created on Aug 25, 2011
@author: r4stl1n
'''
def fileContentsToList(fileName):
lineList = []
try:
fileParser = open(fileName, 'r')
except IOError:
print "[!] Could not open file %s " % fileName
except:
print "[!] Could not access file %s" % fileName
for line in fileParser.readlines():
newLine = line.replace('\n', '')
lineList.append(newLine)
return lineList
def fileContentsToTuple(fileName):
tupleList = []
try:
fileParser = open(fileName, 'r')
except IOError:
print "[!] Could not open file %s " % fileName
except:
| print "[!] Could not access file %s" % fileName
for line in fileParser. | readlines():
newLine = line.replace('\n', '')
newTuple = (newLine[:line.find(':')],newLine[line.find(':')+1:])
tupleList.append(newTuple)
return tupleList
def appendLineToFile(line,filename):
fileHandler = open(filename,"a+")
fileHandler.write(line + "\n")
fileHandler.close()
|
dya2/python-for-android | python3-alpha/python3-src/Lib/profile.py | Python | apache-2.0 | 22,537 | 0.001686 | #! /usr/bin/env python3
#
# Class for profiling python code. rev 1.0 6/2/94
#
# Written by James Roskind
# Based on prior profile module by Sjoerd Mullender...
# which was hacked somewhat by: Guido van Rossum
"""Class for profiling Python code."""
# Copyright Disney Enterprises, Inc. | All Rights Reserved.
# Licensed to PSF under a Contributor Agreement
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied | . See the License for the specific language
# governing permissions and limitations under the License.
import sys
import os
import time
import marshal
from optparse import OptionParser
__all__ = ["run", "runctx", "Profile"]
# Sample timer for use with
#i_count = 0
#def integer_timer():
# global i_count
# i_count = i_count + 1
# return i_count
#itimes = integer_timer # replace with C coded timer returning integers
#**************************************************************************
# The following are the static member functions for the profiler class
# Note that an instance of Profile() is *not* needed to call them.
#**************************************************************************
def run(statement, filename=None, sort=-1):
"""Run statement under profiler optionally saving results in filename
This function takes a single argument that can be passed to the
"exec" statement, and an optional file name. In all cases this
routine attempts to "exec" its first argument and gather profiling
statistics from the execution. If no file name is present, then this
function automatically prints a simple profiling report, sorted by the
standard name string (file/line/function-name) that is presented in
each line.
"""
prof = Profile()
try:
prof = prof.run(statement)
except SystemExit:
pass
if filename is not None:
prof.dump_stats(filename)
else:
return prof.print_stats(sort)
def runctx(statement, globals, locals, filename=None, sort=-1):
"""Run statement under profiler, supplying your own globals and locals,
optionally saving results in filename.
statement and filename have the same semantics as profile.run
"""
prof = Profile()
try:
prof = prof.runctx(statement, globals, locals)
except SystemExit:
pass
if filename is not None:
prof.dump_stats(filename)
else:
return prof.print_stats(sort)
if hasattr(os, "times"):
def _get_time_times(timer=os.times):
t = timer()
return t[0] + t[1]
# Using getrusage(3) is better than clock(3) if available:
# on some systems (e.g. FreeBSD), getrusage has a higher resolution
# Furthermore, on a POSIX system, returns microseconds, which
# wrap around after 36min.
_has_res = 0
try:
import resource
resgetrusage = lambda: resource.getrusage(resource.RUSAGE_SELF)
def _get_time_resource(timer=resgetrusage):
t = timer()
return t[0] + t[1]
_has_res = 1
except ImportError:
pass
class Profile:
"""Profiler class.
self.cur is always a tuple. Each such tuple corresponds to a stack
frame that is currently active (self.cur[-2]). The following are the
definitions of its members. We use this external "parallel stack" to
avoid contaminating the program that we are profiling. (old profiler
used to write into the frames local dictionary!!) Derived classes
can change the definition of some entries, as long as they leave
[-2:] intact (frame and previous tuple). In case an internal error is
detected, the -3 element is used as the function name.
[ 0] = Time that needs to be charged to the parent frame's function.
It is used so that a function call will not have to access the
timing data for the parent frame.
[ 1] = Total time spent in this frame's function, excluding time in
subfunctions (this latter is tallied in cur[2]).
[ 2] = Total time spent in subfunctions, excluding time executing the
frame's function (this latter is tallied in cur[1]).
[-3] = Name of the function that corresponds to this frame.
[-2] = Actual frame that we correspond to (used to sync exception handling).
[-1] = Our parent 6-tuple (corresponds to frame.f_back).
Timing data for each function is stored as a 5-tuple in the dictionary
self.timings[]. The index is always the name stored in self.cur[-3].
The following are the definitions of the members:
[0] = The number of times this function was called, not counting direct
or indirect recursion,
[1] = Number of times this function appears on the stack, minus one
[2] = Total time spent internal to this function
[3] = Cumulative time that this function was present on the stack. In
non-recursive functions, this is the total execution time from start
to finish of each invocation of a function, including time spent in
all subfunctions.
[4] = A dictionary indicating for each function name, the number of times
it was called by us.
"""
bias = 0 # calibration constant
def __init__(self, timer=None, bias=None):
self.timings = {}
self.cur = None
self.cmd = ""
self.c_func_name = ""
if bias is None:
bias = self.bias
self.bias = bias # Materialize in local dict for lookup speed.
if not timer:
if _has_res:
self.timer = resgetrusage
self.dispatcher = self.trace_dispatch
self.get_time = _get_time_resource
elif hasattr(time, 'clock'):
self.timer = self.get_time = time.clock
self.dispatcher = self.trace_dispatch_i
elif hasattr(os, 'times'):
self.timer = os.times
self.dispatcher = self.trace_dispatch
self.get_time = _get_time_times
else:
self.timer = self.get_time = time.time
self.dispatcher = self.trace_dispatch_i
else:
self.timer = timer
t = self.timer() # test out timer function
try:
length = len(t)
except TypeError:
self.get_time = timer
self.dispatcher = self.trace_dispatch_i
else:
if length == 2:
self.dispatcher = self.trace_dispatch
else:
self.dispatcher = self.trace_dispatch_l
# This get_time() implementation needs to be defined
# here to capture the passed-in timer in the parameter
# list (for performance). Note that we can't assume
# the timer() result contains two values in all
# cases.
def get_time_timer(timer=timer, sum=sum):
return sum(timer())
self.get_time = get_time_timer
self.t = self.get_time()
self.simulate_call('profiler')
# Heavily optimized dispatch routine for os.times() timer
def trace_dispatch(self, frame, event, arg):
timer = self.timer
t = timer()
t = t[0] + t[1] - self.t - self.bias
if event == "c_call":
self.c_func_name = arg.__name__
if self.dispatch[event](self, frame,t):
t = timer()
self.t = t[0] + t[1]
else:
r = timer()
self.t = r[0] + r[1] - t # put back unrecorded delta
# Dispatch routine for best timer program (return = scalar, fastest if
# an integer but float works too -- and time.clock() relies on that).
def trace_dispatch_i(self, frame, event, arg):
timer = self.timer
t = timer() - self.t - self.b |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.