repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
lmjohns3/downhill
|
test/base_test.py
|
1
|
3100
|
import downhill
import numpy as np
import util
class TestBuild:
def test_sgd(self):
assert isinstance(util.build_rosen('sgd')[0], downhill.SGD)
assert isinstance(util.build_factor('sgd')[0], downhill.SGD)
def test_nag(self):
assert isinstance(util.build_rosen('nag')[0], downhill.NAG)
def test_rprop(self):
assert isinstance(util.build_rosen('RProp')[0], downhill.RProp)
def test_rmsprop(self):
assert isinstance(util.build_rosen('RmsProp')[0], downhill.RMSProp)
def test_adadelta(self):
assert isinstance(util.build_rosen('ADADELTA')[0], downhill.ADADELTA)
def test_esgd(self):
assert isinstance(util.build_rosen('EsGd')[0], downhill.ESGD)
def test_adam(self):
assert isinstance(util.build_rosen('Adam')[0], downhill.Adam)
class Straight(downhill.Optimizer):
def _get_updates_for(self, param, grad):
yield (param, param + 1.1)
class TestOptimizer:
def test_rosen(self):
opt, train = util.build_rosen('straight')
assert isinstance(opt, Straight)
# run the optimizer for three iterations. check that the x and y values
# (being monitored) increase at each iteration.
for i, (tm, vm) in enumerate(opt.iterate(train, max_updates=3)):
assert tm['x'] >= vm['x']
assert tm['y'] >= vm['y']
assert i < 3
def test_rosen_unnamed(self):
opt, train = util.build_rosen('straight', name=False, monitor_gradients=True)
assert isinstance(opt, Straight)
# run the optimizer for three iterations. check that the x and y values
# (being monitored) increase at each iteration.
for i, (tm, vm) in enumerate(opt.iterate(train, max_updates=3)):
assert tm['x'] >= vm['x']
assert tm['y'] >= vm['y']
# check there's a manually-named parameter in here.
assert 1 == sum(1 for k in tm if 'unnamed' in k), tm
assert i < 3
def test_factor(self):
opt, train = util.build_factor('straight')
assert isinstance(opt, Straight)
# run the optimizer for two iterations. check that the u and v values
# (being monitored) are reasonable at the start.
for i, (tm, vm) in enumerate(opt.iterate(train)):
assert abs(vm['u<1'] - 0.001) < 1e-5
assert vm['u<-1'] == 0
assert vm['v<1'] == 1
assert vm['v<-1'] == 0
if i == 2:
break
def test_gradient_clip(self):
opt, data = util.build_rosen('straight')
for _ in opt.iterate(data, max_gradient_elem=3):
assert opt.max_gradient_elem == 3
break
def test_set_params(self):
opt, _ = util.build_rosen('straight')
opt.set_params([[1, 2]])
assert np.allclose(opt._params[0].get_value(), [1, 2])
def test_set_best_params(self):
opt, _ = util.build_rosen('straight')
opt._best_params = [[1, 2]]
opt.set_params('best')
assert np.allclose(opt._params[0].get_value(), [1, 2])
|
mit
| -4,127,588,063,821,869,600
| 33.444444
| 85
| 0.590323
| false
| 3.421634
| true
| false
| false
|
kevthehermit/viper
|
viper/modules/jar.py
|
2
|
2038
|
# -*- coding: utf-8 -*-
# This file is part of Viper - https://github.com/viper-framework/viper
# See the file 'LICENSE' for copying permission.
import hashlib
import zipfile
from viper.common.abstracts import Module
from viper.core.session import __sessions__
class Jar(Module):
cmd = 'jar'
description = 'Parse Java JAR archives'
authors = ['Kevin Breen']
def __init__(self):
super(Jar, self).__init__()
self.parser.add_argument('-d ', '--dump', metavar='dump_path', help='Extract all items from jar')
def run(self):
def read_manifest(manifest):
rows = []
lines = manifest.split(b'\r\n')
for line in lines:
if len(line) > 1:
item, value = line.split(b':')
rows.append([item.decode(), value.decode()])
self.log('info', "Manifest File:")
self.log('table', dict(header=['Item', 'Value'], rows=rows))
super(Jar, self).run()
if self.args is None:
return
arg_dump = self.args.dump
if not __sessions__.is_set():
self.log('error', "No open session")
return
if not zipfile.is_zipfile(__sessions__.current.file.path):
self.log('error', "Doesn't Appear to be a valid jar archive")
return
with zipfile.ZipFile(__sessions__.current.file.path, 'r') as archive:
jar_tree = []
for name in archive.namelist():
item_data = archive.read(name)
if name == 'META-INF/MANIFEST.MF':
read_manifest(item_data)
item_md5 = hashlib.md5(item_data).hexdigest()
jar_tree.append([name, item_md5])
self.log('info', "Jar Tree:")
self.log('table', dict(header=['Java File', 'MD5'], rows=jar_tree))
if arg_dump:
archive.extractall(arg_dump)
self.log('info', "Archive content extracted to {0}".format(arg_dump))
|
bsd-3-clause
| 3,725,401,441,174,335,500
| 30.353846
| 105
| 0.541217
| false
| 3.949612
| false
| false
| false
|
compas-dev/compas
|
src/compas/datastructures/network/duality.py
|
1
|
5940
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from math import pi
from compas.utilities import pairwise
from compas.geometry import angle_vectors
from compas.geometry import is_ccw_xy
__all__ = [
'network_find_cycles',
]
PI2 = 2.0 * pi
def network_find_cycles(network, breakpoints=None):
"""Find the faces of a network.
Parameters
----------
network : compas.datastructures.Network
The network object.
breakpoints : list, optional
The vertices at which to break the found faces.
Default is ``None``.
Notes
-----
``breakpoints`` are primarily used to break up the outside face in between
specific vertices. For example, in structural applications involving dual
diagrams, any vertices where external forces are applied (loads or reactions)
should be input as breakpoints.
Warnings
--------
This algorithms is essentially a wall follower (a type of maze-solving algorithm).
It relies on the geometry of the network to be repesented as a planar,
straight-line embedding. It determines an ordering of the neighboring vertices
around each vertex, and then follows the *walls* of the network, always
taking turns in the same direction.
Examples
--------
>>>
"""
if not breakpoints:
breakpoints = []
for u, v in network.edges():
network.adjacency[u][v] = None
network.adjacency[v][u] = None
network_sort_neighbors(network)
leaves = list(network.leaves())
if leaves:
u = sorted([(key, network.node_coordinates(key, 'xy')) for key in leaves], key=lambda x: (x[1][1], x[1][0]))[0][0]
else:
u = sorted(network.nodes(True), key=lambda x: (x[1]['y'], x[1]['x']))[0][0]
cycles = {}
found = {}
ckey = 0
v = network_node_find_first_neighbor(network, u)
cycle = network_find_edge_cycle(network, u, v)
frozen = frozenset(cycle)
found[frozen] = ckey
cycles[ckey] = cycle
for a, b in pairwise(cycle + cycle[:1]):
network.adjacency[a][b] = ckey
ckey += 1
for u, v in network.edges():
if network.adjacency[u][v] is None:
cycle = network_find_edge_cycle(network, u, v)
frozen = frozenset(cycle)
if frozen not in found:
found[frozen] = ckey
cycles[ckey] = cycle
ckey += 1
for a, b in pairwise(cycle + cycle[:1]):
network.adjacency[a][b] = found[frozen]
if network.adjacency[v][u] is None:
cycle = network_find_edge_cycle(network, v, u)
frozen = frozenset(cycle)
if frozen not in found:
found[frozen] = ckey
cycles[ckey] = cycle
ckey += 1
for a, b in pairwise(cycle + cycle[:1]):
network.adjacency[a][b] = found[frozen]
cycles = _break_cycles(cycles, breakpoints)
return cycles
def network_node_find_first_neighbor(network, key):
nbrs = network.neighbors(key)
if len(nbrs) == 1:
return nbrs[0]
ab = [-1.0, -1.0, 0.0]
a = network.node_coordinates(key, 'xyz')
b = [a[0] + ab[0], a[1] + ab[1], 0]
angles = []
for nbr in nbrs:
c = network.node_coordinates(nbr, 'xyz')
ac = [c[0] - a[0], c[1] - a[1], 0]
alpha = angle_vectors(ab, ac)
if is_ccw_xy(a, b, c, True):
alpha = PI2 - alpha
angles.append(alpha)
return nbrs[angles.index(min(angles))]
def network_sort_neighbors(network, ccw=True):
sorted_neighbors = {}
xyz = {key: network.node_coordinates(key) for key in network.nodes()}
for key in network.nodes():
nbrs = network.neighbors(key)
sorted_neighbors[key] = node_sort_neighbors(key, nbrs, xyz, ccw=ccw)
for key, nbrs in sorted_neighbors.items():
network.node_attribute(key, 'neighbors', nbrs[::-1])
return sorted_neighbors
def node_sort_neighbors(key, nbrs, xyz, ccw=True):
if len(nbrs) == 1:
return nbrs
ordered = nbrs[0:1]
a = xyz[key]
for i, nbr in enumerate(nbrs[1:]):
c = xyz[nbr]
pos = 0
b = xyz[ordered[pos]]
while not is_ccw_xy(a, b, c):
pos += 1
if pos > i:
break
b = xyz[ordered[pos]]
if pos == 0:
pos = -1
b = xyz[ordered[pos]]
while is_ccw_xy(a, b, c):
pos -= 1
if pos < -len(ordered):
break
b = xyz[ordered[pos]]
pos += 1
ordered.insert(pos, nbr)
if not ccw:
return ordered[::-1]
return ordered
def network_find_edge_cycle(network, u, v):
cycle = [u]
while True:
cycle.append(v)
nbrs = network.node_attribute(v, 'neighbors')
nbr = nbrs[nbrs.index(u) - 1]
u, v = v, nbr
if v == cycle[0]:
break
return cycle
def _break_cycles(cycles, breakpoints):
breakpoints = set(breakpoints)
broken = []
for fkey in cycles:
vertices = cycles[fkey]
faces = []
faces.append([vertices[0]])
for i in range(1, len(vertices) - 1):
key = vertices[i]
faces[-1].append(key)
if key in breakpoints:
faces.append([key])
faces[-1].append(vertices[-1])
faces[-1].append(vertices[0])
if len(faces) == 1:
broken.append(faces[0])
continue
if faces[0][0] not in breakpoints and faces[-1][-1] not in breakpoints:
if faces[0][0] == faces[-1][-1]:
faces[:] = [faces[-1] + faces[0][1:]] + faces[1:-1]
if len(faces) == 1:
broken.append(faces[0])
continue
for vertices in faces:
broken.append(vertices)
return broken
|
mit
| 5,137,100,386,426,476,000
| 27.834951
| 122
| 0.557407
| false
| 3.673469
| false
| false
| false
|
rafamanzo/colab
|
colab/plugins/trac/migrations/0001_initial.py
|
1
|
4776
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations, connections
def create_views(apps, schema_editor):
connection = connections['trac']
cursor = connection.cursor()
# revision_view
cursor.execute('''
CREATE OR REPLACE VIEW revision_view AS SELECT
revision.rev,
revision.author,
revision.message,
repository.value AS repository_name,
TIMESTAMP WITH TIME ZONE 'epoch' + (revision.time/1000000) * INTERVAL '1s' AS created,
CONCAT(revision.repos, '-', revision.rev) AS key
FROM revision
INNER JOIN repository ON(
repository.id = revision.repos
AND repository.name = 'name'
AND repository.value != ''
);
''')
# attachment_view
cursor.execute('''
CREATE OR REPLACE VIEW attachment_view AS SELECT
CONCAT(attachment.type, '/' , attachment.id, '/', attachment.filename) AS url,
attachment.type AS used_by,
attachment.filename AS filename,
attachment.id as attach_id,
(SELECT LOWER(SUBSTRING(attachment.filename FROM '\.(\w+)$'))) AS mimetype,
attachment.author AS author,
attachment.description AS description,
attachment.size AS size,
TIMESTAMP WITH TIME ZONE 'epoch' + (attachment.time/1000000)* INTERVAL '1s' AS created
FROM attachment;
''')
# wiki_view
cursor.execute('''
CREATE OR REPLACE VIEW wiki_view AS SELECT
wiki.name AS name,
(SELECT wiki2.text FROM wiki AS wiki2 WHERE wiki2.name = wiki.name
AND wiki2.version = MAX(wiki.version)) AS wiki_text,
(SELECT wiki3.author FROM wiki AS wiki3 WHERE wiki3.name = wiki.name
AND wiki3.version = 1) AS author,
string_agg(DISTINCT wiki.author, ', ') AS collaborators,
TIMESTAMP WITH TIME ZONE 'epoch' + (MIN(wiki.time)/1000000) * INTERVAL '1s' AS created,
TIMESTAMP WITH TIME ZONE 'epoch' + (MAX(wiki.time)/1000000) * INTERVAL '1s' AS modified,
(SELECT wiki4.author FROM wiki AS wiki4 WHERE wiki4.name = wiki.name
AND wiki4.version = MAX(wiki.version)) AS modified_by
FROM wiki
GROUP BY wiki.name;
''')
# ticket_view
cursor.execute('''
CREATE OR REPLACE VIEW ticket_view AS SELECT
ticket.id AS id,
ticket.summary as summary,
ticket.description as description,
ticket.milestone as milestone,
ticket.priority as priority,
ticket.component as component,
ticket.version as version,
ticket.severity as severity,
ticket.reporter as reporter,
ticket.reporter as author,
ticket.status as status,
ticket.keywords as keywords,
(SELECT
string_agg(DISTINCT ticket_change.author, ', ')
FROM ticket_change WHERE ticket_change.ticket = ticket.id
GROUP BY ticket_change.ticket) as collaborators,
TIMESTAMP WITH TIME ZONE 'epoch' + (time/1000000)* INTERVAL '1s' AS created,
TIMESTAMP WITH TIME ZONE 'epoch' + (changetime/1000000) * INTERVAL '1s' AS modified,
(SELECT
ticket_change.author
FROM ticket_change
WHERE ticket_change.ticket = ticket.id
AND ticket_change.time = ticket.changetime
LIMIT 1
) AS modified_by
FROM ticket;
''')
# ticket_collab_count_view
cursor.execute('''
CREATE OR REPLACE VIEW ticket_collab_count_view AS
SELECT
COALESCE (t1.author, t2.author) as author,
(COALESCE(t1.count, 0) + COALESCE(t2.count, 0)) as count
FROM
(SELECT author, count(*) as count
FROM ticket_change
GROUP BY author
ORDER BY author
) AS t1
FULL OUTER JOIN
(SELECT reporter as author, count(*) as count
FROM ticket
GROUP BY reporter
ORDER BY reporter
) AS t2
ON t1.author = t2.author;
''')
# wiki_collab_count_view
cursor.execute('''
CREATE OR REPLACE VIEW wiki_collab_count_view AS
SELECT author, count(*) from wiki GROUP BY author;
''')
def drop_views(apps, schema_editor):
connection = connections['trac']
cursor = connection.cursor()
cursor.execute('''
DROP VIEW IF EXISTS revision_view;
DROP VIEW IF EXISTS ticket_view;
DROP VIEW IF EXISTS wiki_view;
DROP VIEW IF EXISTS ticket_collab_count_view;
DROP VIEW IF EXISTS wiki_collab_count_view;
DROP VIEW IF EXISTS attachment_view;
''')
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.RunPython(code=create_views, reverse_code=drop_views)
]
|
gpl-2.0
| -4,734,781,128,982,950,000
| 32.87234
| 96
| 0.621022
| false
| 4.174825
| false
| false
| false
|
rembo10/headphones
|
lib/pygazelle/api.py
|
1
|
16383
|
#!/usr/bin/env python
#
# PyGazelle - https://github.com/cohena/pygazelle
# A Python implementation of the What.cd Gazelle JSON API
#
# Loosely based on the API implementation from 'whatbetter', by Zachary Denton
# See https://github.com/zacharydenton/whatbetter
from HTMLParser import HTMLParser
import sys
import json
import time
import requests as requests
import headphones
from .user import User
from .artist import Artist
from .tag import Tag
from .request import Request
from .torrent_group import TorrentGroup
from .torrent import Torrent
from .category import Category
from .inbox import Mailbox
class LoginException(Exception):
pass
class RequestException(Exception):
pass
class GazelleAPI(object):
last_request = time.time() # share amongst all api objects
default_headers = {
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'User-Agent': 'Headphones/%s' % headphones.CURRENT_VERSION,
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9'\
',*/*;q=0.8',
'Accept-Encoding': 'gzip,deflate,sdch',
'Accept-Language': 'en-US,en;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3'}
def __init__(self, username=None, password=None, url=None):
self.session = requests.session()
self.session.headers = self.default_headers
self.username = username
self.password = password
self.authkey = None
self.passkey = None
self.userid = None
self.logged_in_user = None
self.default_timeout = 30
self.cached_users = {}
self.cached_artists = {}
self.cached_tags = {}
self.cached_torrent_groups = {}
self.cached_torrents = {}
self.cached_requests = {}
self.cached_categories = {}
self.site = url + "/"
self.past_request_timestamps = []
def wait_for_rate_limit(self):
# maximum is 5 requests within 10 secs
time_frame = 10
max_reqs = 5
slice_point = 0
while len(self.past_request_timestamps) >= max_reqs:
for i, timestamp in enumerate(self.past_request_timestamps):
if timestamp < time.time() - time_frame:
slice_point = i + 1
else:
break
if slice_point:
self.past_request_timestamps = self.past_request_timestamps[slice_point:]
else:
time.sleep(0.1)
def logged_in(self):
return self.logged_in_user is not None and self.logged_in_user.id == self.userid
def _login(self):
"""
Private method.
Logs in user and gets authkey from server.
"""
if self.logged_in():
return
self.wait_for_rate_limit()
loginpage = self.site + 'login.php'
data = {'username': self.username,
'password': self.password,
'keeplogged': '1'}
r = self.session.post(loginpage, data=data, timeout=self.default_timeout, headers=self.default_headers)
self.past_request_timestamps.append(time.time())
if r.status_code != 200:
raise LoginException("Login returned status code %s" % r.status_code)
try:
accountinfo = self.request('index', autologin=False)
except RequestException as e:
raise LoginException("Login probably incorrect")
if not accountinfo or 'id' not in accountinfo:
raise LoginException("Login probably incorrect")
self.userid = accountinfo['id']
self.authkey = accountinfo['authkey']
self.passkey = accountinfo['passkey']
self.logged_in_user = User(self.userid, self)
self.logged_in_user.set_index_data(accountinfo)
def request(self, action, autologin=True, **kwargs):
"""
Makes an AJAX request at a given action.
Pass an action and relevant arguments for that action.
"""
def make_request(action, **kwargs):
ajaxpage = 'ajax.php'
content = self.unparsed_request(ajaxpage, action, **kwargs)
try:
if not isinstance(content, text_type):
content = content.decode('utf-8')
parsed = json.loads(content)
if parsed['status'] != 'success':
raise RequestException
return parsed['response']
except ValueError:
raise RequestException
try:
return make_request(action, **kwargs)
except Exception as e:
if autologin and not self.logged_in():
self._login()
return make_request(action, **kwargs)
else:
raise e
def unparsed_request(self, sitepage, action, **kwargs):
"""
Makes a generic HTTP request at a given page with a given action.
Also pass relevant arguments for that action.
"""
self.wait_for_rate_limit()
url = "%s%s" % (self.site, sitepage)
params = {'action': action}
if self.authkey:
params['auth'] = self.authkey
params.update(kwargs)
r = self.session.get(url, params=params, allow_redirects=False, timeout=self.default_timeout)
if r.status_code == 302 and r.raw.headers['location'] == 'login.php':
self.logged_in_user = None
raise LoginException("User login expired")
self.past_request_timestamps.append(time.time())
return r.content
def get_user(self, id):
"""
Returns a User for the passed ID, associated with this API object. If the ID references the currently logged in
user, the user returned will be pre-populated with the information from an 'index' API call. Otherwise, you'll
need to call User.update_user_data(). This is done on demand to reduce unnecessary API calls.
"""
id = int(id)
if id == self.userid:
return self.logged_in_user
elif id in self.cached_users.keys():
return self.cached_users[id]
else:
return User(id, self)
def search_users(self, search_query):
"""
Returns a list of users returned for the search query. You can search by name, part of name, and ID number. If
one of the returned users is the currently logged-in user, that user object will be pre-populated with the
information from an 'index' API call. Otherwise only the limited info returned by the search will be pre-pop'd.
You can query more information with User.update_user_data(). This is done on demand to reduce unnecessary API calls.
"""
response = self.request(action='usersearch', search=search_query)
results = response['results']
found_users = []
for result in results:
user = self.get_user(result['userId'])
user.set_search_result_data(result)
found_users.append(user)
return found_users
def get_inbox(self, page='1', sort='unread'):
"""
Returns the inbox Mailbox for the logged in user
"""
return Mailbox(self, 'inbox', page, sort)
def get_sentbox(self, page='1', sort='unread'):
"""
Returns the sentbox Mailbox for the logged in user
"""
return Mailbox(self, 'sentbox', page, sort)
def get_artist(self, id=None, name=None):
"""
Returns an Artist for the passed ID, associated with this API object. You'll need to call Artist.update_data()
if the artist hasn't already been cached. This is done on demand to reduce unnecessary API calls.
"""
if id:
id = int(id)
if id in self.cached_artists.keys():
artist = self.cached_artists[id]
else:
artist = Artist(id, self)
if name:
artist.name = HTMLParser().unescape(name)
elif name:
artist = Artist(-1, self)
artist.name = HTMLParser().unescape(name)
else:
raise Exception("You must specify either an ID or a Name to get an artist.")
return artist
def get_tag(self, name):
"""
Returns a Tag for the passed name, associated with this API object. If you know the count value for this tag,
pass it to update the object. There is no way to query the count directly from the API, but it can be retrieved
from other calls such as 'artist', however.
"""
if name in self.cached_tags.keys():
return self.cached_tags[name]
else:
return Tag(name, self)
def get_request(self, id):
"""
Returns a Request for the passed ID, associated with this API object. You'll need to call Request.update_data()
if the request hasn't already been cached. This is done on demand to reduce unnecessary API calls.
"""
id = int(id)
if id in self.cached_requests.keys():
return self.cached_requests[id]
else:
return Request(id, self)
def get_torrent_group(self, id):
"""
Returns a TorrentGroup for the passed ID, associated with this API object.
"""
id = int(id)
if id in self.cached_torrent_groups.keys():
return self.cached_torrent_groups[id]
else:
return TorrentGroup(id, self)
def get_torrent(self, id):
"""
Returns a Torrent for the passed ID, associated with this API object.
"""
id = int(id)
if id in self.cached_torrents.keys():
return self.cached_torrents[id]
else:
return Torrent(id, self)
def get_torrent_from_info_hash(self, info_hash):
"""
Returns a Torrent for the passed info hash (if one exists), associated with this API object.
"""
try:
response = self.request(action='torrent', hash=info_hash.upper())
except RequestException:
return None
id = int(response['torrent']['id'])
if id in self.cached_torrents.keys():
torrent = self.cached_torrents[id]
else:
torrent = Torrent(id, self)
torrent.set_torrent_complete_data(response)
return torrent
def get_category(self, id, name=None):
"""
Returns a Category for the passed ID, associated with this API object.
"""
id = int(id)
if id in self.cached_categories.keys():
cat = self.cached_categories[id]
else:
cat = Category(id, self)
if name:
cat.name = name
return cat
def get_top_10(self, type="torrents", limit=25):
"""
Lists the top <limit> items of <type>. Type can be "torrents", "tags", or "users". Limit MUST be
10, 25, or 100...it can't just be an arbitrary number (unfortunately). Results are organized into a list of hashes.
Each hash contains the results for a specific time frame, like 'day', or 'week'. In the hash, the 'results' key
contains a list of objects appropriate to the passed <type>.
"""
response = self.request(action='top10', type=type, limit=limit)
top_items = []
if not response:
raise RequestException
for category in response:
results = []
if type == "torrents":
for item in category['results']:
torrent = self.get_torrent(item['torrentId'])
torrent.set_torrent_top_10_data(item)
results.append(torrent)
elif type == "tags":
for item in category['results']:
tag = self.get_tag(item['name'])
results.append(tag)
elif type == "users":
for item in category['results']:
user = self.get_user(item['id'])
results.append(user)
else:
raise Exception("%s is an invalid type argument for GazelleAPI.get_top_ten()" % type)
top_items.append({
"caption": category['caption'],
"tag": category['tag'],
"limit": category['limit'],
"results": results
})
return top_items
def search_torrents(self, **kwargs):
"""
Searches based on the args you pass and returns torrent groups filled with torrents.
Pass strings unless otherwise specified.
Valid search args:
searchstr (any arbitrary string to search for)
page (page to display -- default: 1)
artistname (self explanatory)
groupname (torrent group name, equivalent to album)
recordlabel (self explanatory)
cataloguenumber (self explanatory)
year (self explanatory)
remastertitle (self explanatory)
remasteryear (self explanatory)
remasterrecordlabel (self explanatory)
remastercataloguenumber (self explanatory)
filelist (can search for filenames found in torrent...unsure of formatting for multiple files)
encoding (use constants in pygazelle.Encoding module)
format (use constants in pygazelle.Format module)
media (use constants in pygazelle.Media module)
releasetype (use constants in pygazelle.ReleaseType module)
haslog (int 1 or 0 to represent boolean, 100 for 100% only, -1 for < 100% / unscored)
hascue (int 1 or 0 to represent boolean)
scene (int 1 or 0 to represent boolean)
vanityhouse (int 1 or 0 to represent boolean)
freetorrent (int 1 or 0 to represent boolean)
taglist (comma separated tag names)
tags_type (0 for 'any' matching, 1 for 'all' matching)
order_by (use constants in pygazelle.order module that start with by_ in their name)
order_way (use way_ascending or way_descending constants in pygazelle.order)
filter_cat (for each category you want to search, the param name must be filter_cat[catnum] and the value 1)
ex. filter_cat[1]=1 turns on Music.
filter_cat[1]=1, filter_cat[2]=1 turns on music and applications. (two separate params and vals!)
Category object ids return the correct int value for these. (verify?)
Returns a dict containing keys 'curr_page', 'pages', and 'results'. Results contains a matching list of Torrents
(they have a reference to their parent TorrentGroup).
"""
response = self.request(action='browse', **kwargs)
results = response['results']
if len(results):
curr_page = response['currentPage']
pages = response['pages']
else:
curr_page = 1
pages = 1
matching_torrents = []
for torrent_group_dict in results:
torrent_group = self.get_torrent_group(torrent_group_dict['groupId'])
torrent_group.set_torrent_search_data(torrent_group_dict)
for torrent_dict in torrent_group_dict['torrents']:
torrent_dict['groupId'] = torrent_group.id
torrent = self.get_torrent(torrent_dict['torrentId'])
torrent.set_torrent_search_data(torrent_dict)
matching_torrents.append(torrent)
return {'curr_page': curr_page, 'pages': pages, 'results': matching_torrents}
def generate_torrent_link(self, id, use_token=False):
url = "%storrents.php?action=download&id=%s&authkey=%s&torrent_pass=%s&usetoken=%d" %\
(self.site, id, self.logged_in_user.authkey, self.logged_in_user.passkey, use_token)
return url
def save_torrent_file(self, id, dest, use_token=False):
file_data = self.unparsed_request("torrents.php", 'download',
id=id, authkey=self.logged_in_user.authkey, torrent_pass=self.logged_in_user.passkey,
usetoken=int(use_token))
with open(dest, 'w+') as dest_file:
dest_file.write(file_data)
if sys.version_info[0] == 3:
text_type = str
else:
text_type = unicode
|
gpl-3.0
| 1,708,872,479,942,694,700
| 38.100239
| 124
| 0.591101
| false
| 4.175076
| false
| false
| false
|
reidlindsay/wins
|
sandbox/experiments/aloha/infocom/parse-per.py
|
1
|
6546
|
#! /usr/bin/env python
"""
Parse PER vs. SINR data from trace files.
Revision Info
=============
* $LastChangedBy: mandke $
* $LastChangedDate: 2011-10-19 17:04:02 -0500 (Wed, 19 Oct 2011) $
* $LastChangedRevision: 5220 $
:author: Ketan Mandke <kmandke@mail.utexas.edu>
:copyright:
Copyright 2009-2011 The University of Texas at Austin
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__docformat__ = "restructuredtext en"
from wins import *
from wins.ieee80211 import *
from optparse import OptionParser
import sys
from copy import copy
from numpy import array
def read_trace(options, tracefile):
# load trace from file
tr = Trace()
tr.read(tracefile)
# return trace
return tr
DETECTFAIL1 = "not detected in LISTEN"
HEADERFAIL1 = "header parameters failed"
HEADERFAIL2 = "header decoding failed"
IGNOREFAIL1 = "ignore rxdata in DECODE"
IGNOREFAIL2 = "ignore detect in DECODE"
def parse_per_info(options, trace, fmt='bo', usemodel=False):
# initialize parameters
param, data = {}, []
mcs, rmsdelay = None, []
ncollision = options.ncollision
# parse trace
for e in trace.events:
obj, evt = e['obj'], e['event']
# check for MCS parameter
if ('phy-rate' in e):
rate = int(e['phy-rate'])
hparamfail = ('drop' in e) and (e['drop']==HEADERFAIL1)
if not hparamfail:
if mcs is None: mcs = rate
else: assert (mcs == rate)
# check for 802.11n RCV & DRP events
if (obj=="80211N"):
rcv, drp = (evt=="RCV"), (evt=="DRP")
x, y = None, None
if drp:
drop = e['drop']
notdetected = (drop==DETECTFAIL1)
hparamfail = (drop==HEADERFAIL1)
headerfail = (drop==HEADERFAIL2)
ignorefail = (drop==IGNOREFAIL1) or (drop==IGNOREFAIL2)
assert (notdetected or hparamfail or headerfail or ignorefail), "%s"%(e)
#sinr = float(e['dot11n-sinr'].lower().replace("db","") )
#x, y = sinr, 1.0 # log header drop as a packet error also
elif rcv:
sinr = float(e['dot11n-sinr'].lower().replace("db","") )
err = e['crc']
haserror = (err=="FAIL")
noerror = (err=="OK")
assert (haserror or noerror)
if usemodel:
per = float(e['dot11n-model-per'])
else:
if haserror: per = 1.0
else: per = 0.0
# check if ncollision matches
keepdata = True
if (ncollision is not None):
keepdata = False
if 'cif-collision' in e:
coll = eval(e['cif-collision'])
assert isinstance(coll, list)
keepdata = (len(coll) == ncollision)
if keepdata:
x, y = sinr, per
# log data point
if (x is not None) and (y is not None):
dp = {'x':x, 'y':y, 'ndata': 1}
data.append(dp)
# check for RMS delay
if (rcv or drp):
tau = float(e['dot11n-rmsdelay'])
rmsdelay.append(tau)
# check parameters
assert (rmsdelay)
assert (mcs is not None)
avgdelay = array(rmsdelay).mean()
pertype = "actual"
if usemodel: pertype = "model"
# return param and data
param['mcs'] = mcs
param['rmsdelay'] = avgdelay
param['format'] = fmt
label = "${\\rm PER}_{%s}$ ${\\rm (MCS = %d}$, "%(pertype,mcs)
if ncollision is not None: label +="$N_{coll} = %d$, "%(ncollision)
label += "$\\sigma_{rms} = %.3g ns)$"%(avgdelay*1e9)
param['label'] = label
return param, data
def parse_per():
usage = "%prog [OPTIONS] TRACEFILE1 [TRACEFILE2 ...]\n" + \
" Writes parsed data to standard output."
parser = OptionParser(usage=usage)
parser.add_option("-c", "--ncollision", dest="ncollision", type="int", \
default=None, help="Filter results using number of collisions. [default=%default]")
(options, args) = parser.parse_args()
if len(args)<1:
print "Insufficient number of arguments."
parser.print_help()
raise SystemExit
tracefile = args[0:]
numtraces = len(tracefile)
# set parameters
default_parameters = {'xlabel': "SINR (dB)", \
'ylabel': "PER", \
'title': "PER vs. SINR", \
'label': None, \
'source': None, \
'format': None}
lgd, formats = [], [('ro','r:'), ('bo', 'b:'), ('go', 'g:')]
for k in range(numtraces):
tfile = tracefile[k]
# treat as normal wins trace file
trace = read_trace(options, tfile)
fmt = formats[k%len(formats)]
if not trace: continue
sys.stderr.write("Parsing trace from %s ...\n"%(tfile))
# parse actual PER from trace
param, data = parse_per_info(options, trace)
if data:
parameters = copy(default_parameters)
parameters.update(param)
parameters['source'] = tfile
parameters['format'] = fmt[0]
assert (param['label'] is not None)
parsed_data = {'parameters': parameters, 'data': data}
sys.stdout.write("%s\n"%(parsed_data) )
# parse model PER from trace
param, data = parse_per_info(options, trace, usemodel=True)
if data:
parameters = copy(default_parameters)
parameters.update(param)
parameters['source'] = tfile
parameters['format'] = fmt[1]
assert (param['label'] is not None)
parsed_data = {'parameters': parameters, 'data': data}
sys.stdout.write("%s\n"%(parsed_data) )
if __name__ == '__main__':
parse_per()
|
apache-2.0
| 5,629,459,248,063,967,000
| 35.366667
| 95
| 0.543996
| false
| 3.801394
| false
| false
| false
|
douglassquirrel/alexandra
|
libraries/pubsub.py
|
1
|
7745
|
from docstore import connect as docstore_connect
from pika import BlockingConnection, ConnectionParameters
from re import match
from time import time as now
from urllib2 import build_opener, HTTPHandler, Request, urlopen
EXCHANGE = 'alexandra'
HTTP_PATIENCE_SEC = 1
class AMQPConnection:
def __init__(self, url, context, marshal, unmarshal):
self._context = context
self.marshal, self.unmarshal = marshal, unmarshal
host = match(r"amqp://([\w\d\.]+)", url).group(1)
connection = BlockingConnection(ConnectionParameters(host))
self._channel = connection.channel()
self._channel.exchange_declare(exchange=EXCHANGE, type='topic')
self._init_docstore()
def _init_docstore(self):
location_queue = self._subscribe_raw('docstore', 'location')
self._publish_raw('docstore', 'locate', 'locate')
docstore_url = self._get_message_block_raw(location_queue, timeout=1)
self._docstore = docstore_connect(docstore_url)
def publish(self, topic, message):
self._publish_raw(self._context, topic, self.marshal(message))
def _publish_raw(self, context, topic, message):
self._channel.basic_publish(exchange=EXCHANGE,
routing_key=context + '.' + topic,
body=message)
def subscribe(self, topic):
return self._subscribe_raw(self._context, topic)
def _subscribe_raw(self, context, topic):
result = self._channel.queue_declare()
queue = result.method.queue
self._channel.queue_bind(exchange=EXCHANGE,
queue=queue,
routing_key=context + '.' + topic)
return queue
def unsubscribe(self, queue):
self._channel.queue_delete(callback=None, queue=queue)
def consume_queue(self, queue, f):
def callback(ch, method, properties, body):
f(self.unmarshal(body))
self._channel.basic_consume(callback, queue=queue, no_ack=True)
self._channel.start_consuming()
def consume_topic(self, topic, f):
queue = self.subscribe(topic)
self.consume_queue(queue, f)
def consume_all(self, f):
queue = self.subscribe('#')
def callback(ch, method, properties, body):
amqp_topic = method.routing_key
context, topic = amqp_topic.split('.', 1)
f(context, topic, self.unmarshal(body))
self._channel.basic_consume(callback, queue=queue, no_ack=True)
self._channel.start_consuming()
def get_message(self, queue):
raw_message = self._get_message_raw(queue)
if raw_message is None:
return None
else:
return self.unmarshal(raw_message)
def _get_message_raw(self, queue):
return self._channel.basic_get(queue=queue, no_ack=True)[2]
def get_all_messages(self, queue):
messages = []
while True:
message = self.get_message(queue)
if message is None:
return messages
else:
messages.append(message)
def get_message_block(self, queue, timeout=None):
return self._get_message_block(queue, self.get_message, timeout)
def _get_message_block_raw(self, queue, timeout=None):
return self._get_message_block(queue, self._get_message_raw, timeout)
def _get_message_block(self, queue, fetcher, timeout):
alarm = Alarm(timeout)
while True:
message = fetcher(queue)
if message is not None:
return message
if alarm.is_ringing():
return None
def get_current_message(self, topic):
raw_message = self._docstore.get('/%s/%s' % (self._context, topic))
if raw_message is None:
return None
else:
return self.unmarshal(raw_message)
def make_topic_monitor(self, topic):
return TopicMonitor(self, topic)
class HTTPConnection:
def __init__(self, url, context, marshal, unmarshal):
self._root_url = '%s/contexts/%s' % (url, context)
self.marshal, self.unmarshal = marshal, unmarshal
def publish(self, topic, message):
url = '%s/%s' % (self._root_url, topic)
self._visit_url(url=url, data=self.marshal(message), method='POST')
def subscribe(self, topic):
return self._visit_url('%s/%s' % (self._root_url, topic))
def unsubscribe(self, queue):
url = '%s/queues/%s' % (self._root_url, queue)
self._visit_url(url=url, method='DELETE')
def consume_queue(self, queue, f):
url = '%s/queues/%s' % (self._root_url, queue)
headers = [('Patience', HTTP_PATIENCE_SEC)]
while True:
message = self._visit_url(url=url, headers=headers)
if len(message) > 0:
f(self.unmarshal(message))
def consume_topic(self, topic, f):
queue = self.subscribe(topic)
self.consume_queue(queue, f)
def consume_all(self, f):
pass #not implemented
def get_message(self, queue):
url = '%s/queues/%s' % (self._root_url, queue)
message = self._visit_url(url)
if len(message) == 0:
return None
else:
return self.unmarshal(message)
def get_all_messages(self, queue):
url = '%s/queues/%s' % (self._root_url, queue)
headers = [('Range', 'all')]
result = self._visit_url(url=url, headers=headers)
if len(result) == 0:
return []
else:
return map(self.unmarshal, result.split('\n'))
def get_message_block(self, queue, timeout=None):
url = '%s/queues/%s' % (self._root_url, queue)
headers = [('Patience', HTTP_PATIENCE_SEC)]
alarm = Alarm(timeout)
while True:
message = self._visit_url(url=url, headers=headers)
if len(message) > 0:
return self.unmarshal(message)
if alarm.is_ringing():
return None
def get_current_message(self, topic):
url = '%s/%s' % (self._root_url, topic)
headers = [('Range', 'current')]
message = self._visit_url(url=url, headers=headers)
if len(message) == 0:
return None
else:
return self.unmarshal(message)
def make_topic_monitor(self, topic):
return TopicMonitor(self, topic)
def _visit_url(self, url, data=None, method='GET', headers=[]):
opener = build_opener(HTTPHandler)
request = Request(url)
request.get_method = lambda: method
for header in headers:
request.add_header(*header)
return opener.open(request, data).read()
connection_classes = {'amqp': AMQPConnection, 'http': HTTPConnection}
def identity(x):
return x
def connect(url, context, marshal=identity, unmarshal=identity):
protocol = match(r"(\w+)://", url).group(1)
return connection_classes[protocol](url, context, marshal, unmarshal)
def firehose(url):
return connect(url, '#')
class Alarm:
def __init__(self, duration):
if duration is not None:
self.alarm_time = now() + duration
else:
self.alarm_time = None
def is_ringing(self):
return self.alarm_time is not None and now() > self.alarm_time
class TopicMonitor:
def __init__(self, connection, topic):
self._connection = connection
self._queue = connection.subscribe(topic)
self._latest = None
def latest(self):
messages = self._connection.get_all_messages(self._queue)
if len(messages) > 0:
self._latest = messages[-1]
return self._latest
|
mit
| -9,086,060,357,095,325,000
| 33.575893
| 77
| 0.59277
| false
| 3.837958
| false
| false
| false
|
zarr-developers/numcodecs
|
numcodecs/fixedscaleoffset.py
|
1
|
4198
|
import numpy as np
from .abc import Codec
from .compat import ensure_ndarray, ndarray_copy
class FixedScaleOffset(Codec):
"""Simplified version of the scale-offset filter available in HDF5.
Applies the transformation `(x - offset) * scale` to all chunks. Results
are rounded to the nearest integer but are not packed according to the
minimum number of bits.
Parameters
----------
offset : float
Value to subtract from data.
scale : int
Value to multiply by data.
dtype : dtype
Data type to use for decoded data.
astype : dtype, optional
Data type to use for encoded data.
Notes
-----
If `astype` is an integer data type, please ensure that it is
sufficiently large to store encoded values. No checks are made and data
may become corrupted due to integer overflow if `astype` is too small.
Examples
--------
>>> import numcodecs
>>> import numpy as np
>>> x = np.linspace(1000, 1001, 10, dtype='f8')
>>> x
array([1000. , 1000.11111111, 1000.22222222, 1000.33333333,
1000.44444444, 1000.55555556, 1000.66666667, 1000.77777778,
1000.88888889, 1001. ])
>>> codec = numcodecs.FixedScaleOffset(offset=1000, scale=10, dtype='f8', astype='u1')
>>> y1 = codec.encode(x)
>>> y1
array([ 0, 1, 2, 3, 4, 6, 7, 8, 9, 10], dtype=uint8)
>>> z1 = codec.decode(y1)
>>> z1
array([1000. , 1000.1, 1000.2, 1000.3, 1000.4, 1000.6, 1000.7,
1000.8, 1000.9, 1001. ])
>>> codec = numcodecs.FixedScaleOffset(offset=1000, scale=10**2, dtype='f8', astype='u1')
>>> y2 = codec.encode(x)
>>> y2
array([ 0, 11, 22, 33, 44, 56, 67, 78, 89, 100], dtype=uint8)
>>> z2 = codec.decode(y2)
>>> z2
array([1000. , 1000.11, 1000.22, 1000.33, 1000.44, 1000.56,
1000.67, 1000.78, 1000.89, 1001. ])
>>> codec = numcodecs.FixedScaleOffset(offset=1000, scale=10**3, dtype='f8', astype='u2')
>>> y3 = codec.encode(x)
>>> y3
array([ 0, 111, 222, 333, 444, 556, 667, 778, 889, 1000], dtype=uint16)
>>> z3 = codec.decode(y3)
>>> z3
array([1000. , 1000.111, 1000.222, 1000.333, 1000.444, 1000.556,
1000.667, 1000.778, 1000.889, 1001. ])
See Also
--------
numcodecs.quantize.Quantize
"""
codec_id = 'fixedscaleoffset'
def __init__(self, offset, scale, dtype, astype=None):
self.offset = offset
self.scale = scale
self.dtype = np.dtype(dtype)
if astype is None:
self.astype = self.dtype
else:
self.astype = np.dtype(astype)
if self.dtype == object or self.astype == object:
raise ValueError('object arrays are not supported')
def encode(self, buf):
# normalise input
arr = ensure_ndarray(buf).view(self.dtype)
# flatten to simplify implementation
arr = arr.reshape(-1, order='A')
# compute scale offset
enc = (arr - self.offset) * self.scale
# round to nearest integer
enc = np.around(enc)
# convert dtype
enc = enc.astype(self.astype, copy=False)
return enc
def decode(self, buf, out=None):
# interpret buffer as numpy array
enc = ensure_ndarray(buf).view(self.astype)
# flatten to simplify implementation
enc = enc.reshape(-1, order='A')
# decode scale offset
dec = (enc / self.scale) + self.offset
# convert dtype
dec = dec.astype(self.dtype, copy=False)
# handle output
return ndarray_copy(dec, out)
def get_config(self):
# override to handle encoding dtypes
return dict(
id=self.codec_id,
scale=self.scale,
offset=self.offset,
dtype=self.dtype.str,
astype=self.astype.str
)
def __repr__(self):
r = '%s(scale=%s, offset=%s, dtype=%r' % \
(type(self).__name__, self.scale, self.offset, self.dtype.str)
if self.astype != self.dtype:
r += ', astype=%r' % self.astype.str
r += ')'
return r
|
mit
| 3,727,921,587,109,626,400
| 29.867647
| 93
| 0.57051
| false
| 3.527731
| false
| false
| false
|
treverhines/PyGeoNS
|
pygeons/plot/quiver.py
|
1
|
5221
|
'''
This module provides a quiver function which allows for error ellipses.
'''
import numpy as np
from matplotlib.quiver import Quiver as _Quiver
from matplotlib.collections import EllipseCollection
from matplotlib.backends import pylab_setup
from matplotlib.pyplot import sci
from matplotlib.pyplot import gca
from scipy.spatial import cKDTree
import warnings
_backend_mod, new_figure_manager, draw_if_interactive, _show = pylab_setup()
def _estimate_scale(x,y,u,v):
pos = np.array([x,y]).T
# return a scale of 1 if there is only one datum
if pos.shape[0] == 0:
return 1.0
T = cKDTree(pos)
average_dist = np.mean(T.query(pos,2)[0][:,1])
average_length = np.mean(np.sqrt(u**2 + v**2))
return average_length/average_dist
def compute_abphi(sigma_x,sigma_y,rho):
n = len(sigma_x)
a = []
b = []
phi = []
for i in range(n):
if ((not np.isfinite(sigma_x[i])) |
(not np.isfinite(sigma_y[i])) |
(not np.isfinite(rho[i]))):
# this block should run if the uncertainties or correlations are
# not finite or are masked
a += [0.0]
b += [0.0]
phi += [0.0]
continue
sigma_xy = rho[i]*sigma_x[i]*sigma_y[i]
cov_mat = np.array([[sigma_x[i]**2,sigma_xy],
[sigma_xy,sigma_y[i]**2]])
val,vec = np.linalg.eig(cov_mat)
maxidx = np.argmax(val)
minidx = np.argmin(val)
a += [np.sqrt(val[maxidx])]
b += [np.sqrt(val[minidx])]
phi += [np.arctan2(vec[:,maxidx][1],vec[:,maxidx][0])]
a = np.array(a)
b = np.array(b)
phi = np.array(phi)*180/np.pi
return a,b,phi
def quiver(*args, **kw):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kw.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
if not ax._hold:
ax.cla()
q = Quiver(ax, *args, **kw)
ax.add_collection(q, autolim=True)
ax.autoscale_view()
draw_if_interactive()
finally:
ax.hold(washold)
sci(q)
return q
class Quiver(_Quiver):
def __init__(self,ax,*args,**kwargs):
if 'sigma' in kwargs:
scale_units = kwargs.get('scale_units','xy')
kwargs['scale_units'] = scale_units
if kwargs['scale_units'] != 'xy':
raise ValueError('scale units must be "xy" when sigma is given')
angles = kwargs.get('angles','xy')
kwargs['angles'] = angles
if kwargs['angles'] != 'xy':
raise ValueError('angles must be "xy" when sigma is given')
sigma = kwargs.pop('sigma',None)
ellipse_kwargs = kwargs.pop('ellipse_kwargs',{})
if 'offsets' in ellipse_kwargs:
raise ValueError('cannot specify ellipse offsets')
if 'units' in ellipse_kwargs:
raise ValueError('cannot specify ellipse units')
self.ellipse_kwargs = {'edgecolors':'k',
'facecolors':'none',
'linewidths':1.0}
self.ellipse_kwargs.update(ellipse_kwargs)
self.ellipsoids = None
_Quiver.__init__(self,ax,*args,**kwargs)
if sigma is not None:
if self.scale is None:
self.scale = _estimate_scale(self.X,self.Y,self.U,self.V)
su,sv,rho = sigma[0],sigma[1],sigma[2]
self._update_ellipsoids(su,sv,rho)
def _update_ellipsoids(self,su,sv,rho):
self.scale_units = 'xy'
self.angles = 'xy'
tips_x = self.X + self.U/self.scale
tips_y = self.Y + self.V/self.scale
tips = np.array([tips_x,tips_y]).transpose()
a,b,angle = compute_abphi(su,sv,rho)
width = 2.0*a/self.scale
height = 2.0*b/self.scale
if self.ellipsoids is not None:
self.ellipsoids.remove()
# do not draw ellipses which are too small
too_small = 0.001
length = np.sqrt((self.U/self.scale)**2 + (self.V/self.scale)**2)
with warnings.catch_warnings():
# do not print out zero division warning
warnings.simplefilter("ignore")
is_not_too_small = ((np.nan_to_num(width/length) > too_small) |
(np.nan_to_num(height/length) > too_small))
width = width[is_not_too_small]
height = height[is_not_too_small]
angle = angle[is_not_too_small]
tips = tips[is_not_too_small]
# dont add ellipses if there are no ellipses to add
if any(is_not_too_small):
self.ellipsoids = EllipseCollection(width,height,angle,
units=self.scale_units,
offsets = tips,
transOffset=self.ax.transData,
**self.ellipse_kwargs)
self.ax.add_collection(self.ellipsoids)
else:
self.ellipsoids = None
def set_UVC(self,u,v,C=None,sigma=None):
if C is None:
_Quiver.set_UVC(self,u,v)
else:
_Quiver.set_UVC(self,u,v,C)
if sigma is not None:
su,sv,rho = sigma[0],sigma[1],sigma[2]
self._update_ellipsoids(su,sv,rho)
def remove(self):
# remove the quiver and ellipsoid collection
_Quiver.remove(self)
if self.ellipsoids is not None:
self.ellipsoids.remove()
|
mit
| 676,115,310,877,625,300
| 28.834286
| 88
| 0.583605
| false
| 3.230817
| false
| false
| false
|
yinzishao/programming
|
offer_11.py
|
1
|
1126
|
# -*- coding:utf-8 -*-
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
"""
链表中倒数第k个结点
题目描述
输入一个链表,输出该链表中倒数第k个结点。
特殊情况 k=0 k 超过长度 head 为空
思路:
如果我们在遍历时维持两个指针,第一个指针从链表的头指针开始遍历,在第k-1步之前,第二个指针保持不动;
在第k-1步开始,第二个指针也开始从链表的头指针开始遍历。
由于两个指针的距离保持在k-1,当第一个(走在前面的)指针到达链表的尾结点时,第二个指针(走在后面的)指针正好是倒数第k个结点。
"""
class Solution:
def FindKthToTail(self, head, k):
# write code here
pre,aft=head,head
if head ==None:
return head
if k ==0:
return None
for i in range(k-1):
if aft.next == None:
return None
aft = aft.next
while aft.next != None:
aft = aft.next
pre = pre.next
return pre
|
gpl-3.0
| -1,569,520,840,452,344,800
| 20.914286
| 64
| 0.55483
| false
| 1.589212
| false
| false
| false
|
chhsiao90/cheat-ext
|
cheat_ext/utils.py
|
1
|
1503
|
import re
import os
_GITHUB_URL = "https://github.com"
STATE_UNLINK = "unlink"
STATE_CONFLICT = "conflict"
STATE_LINKED = "linked"
def get_github_url(repo):
return _GITHUB_URL + "/" + repo + ".git"
def get_cheat_path():
return os.path.join(
os.path.expanduser("~"),
".cheat")
def get_ext_path():
return os.path.join(
get_cheat_path(), ".ext")
def get_sheet_path(repo):
return os.path.join(
get_ext_path(),
repo.replace("/", "_"))
def get_available_sheets_at(sheet_dir):
def is_available_sheet(sheet):
return (
not os.path.isdir(os.path.join(sheet_dir, sheet)) and
re.match(r"^[a-zA-Z-_]+$", sheet))
sheets = list(filter(is_available_sheet, os.listdir(sheet_dir)))
sheets.sort()
return sheets
def get_sheets_with_state(cheat_dir, sheet_dir, sheets):
def append_state(sheet):
cheat_path = os.path.join(cheat_dir, sheet)
if not os.path.exists(cheat_path):
return (sheet, STATE_UNLINK)
elif (os.path.islink(cheat_path) and
os.readlink(cheat_path) == os.path.join(sheet_dir, sheet)):
return (sheet, STATE_LINKED)
else:
return (sheet, STATE_CONFLICT)
return list(map(append_state, sheets))
def filter_by_state(match, state_sheets):
def filter_by_state_function(state_sheet):
_, state = state_sheet
return state == match
return filter(filter_by_state_function, state_sheets)
|
mit
| -2,809,685,477,133,944,300
| 24.05
| 73
| 0.60479
| false
| 3.211538
| false
| false
| false
|
RTHMaK/RPGOne
|
deep_qa-master/deep_qa/layers/attention/attention.py
|
1
|
3467
|
from copy import deepcopy
from typing import Any, Dict
from keras import backend as K
from overrides import overrides
from ..masked_layer import MaskedLayer
from ...common.params import get_choice_with_default
from ...tensors.masked_operations import masked_softmax
from ...tensors.similarity_functions import similarity_functions
class Attention(MaskedLayer):
"""
This Layer takes two inputs: a vector and a matrix. We compute the
similarity between the vector and each row in the matrix, and then perform
a softmax over rows using those computed similarities. We handle masking
properly for masked rows in the matrix, though we ignore any masking on
the vector.
By default similarity is computed with a dot product, but you can
alternatively use a parameterized similarity function if you wish.
Inputs:
- vector: shape ``(batch_size, embedding_dim)``, mask is ignored if provided
- matrix: shape ``(batch_size, num_rows, embedding_dim)``, with mask ``(batch_size, num_rows)``
Output:
- attention: shape ``(batch_size, num_rows)``, no mask (masked input rows have value 0 in the
output)
Parameters
----------
similarity_function_params: Dict[str, Any], optional (default={})
These parameters get passed to a similarity function (see
:mod:`deep_qa.tensors.similarity_functions` for more info on what's acceptable). The
default similarity function with no parameters is a simple dot product.
"""
def __init__(self, similarity_function: Dict[str, Any]=None, **kwargs):
super(Attention, self).__init__(**kwargs)
self.similarity_function_params = deepcopy(similarity_function)
if similarity_function is None:
similarity_function = {}
sim_function_choice = get_choice_with_default(similarity_function,
'type',
list(similarity_functions.keys()))
similarity_function['name'] = self.name + '_similarity_function'
self.similarity_function = similarity_functions[sim_function_choice](**similarity_function)
@overrides
def build(self, input_shape):
tensor_1_dim = input_shape[0][-1]
tensor_2_dim = input_shape[1][-1]
self.trainable_weights = self.similarity_function.initialize_weights(tensor_1_dim, tensor_2_dim)
super(Attention, self).build(input_shape)
@overrides
def compute_mask(self, inputs, mask=None):
# pylint: disable=unused-argument
# We do not need a mask beyond this layer.
return None
@overrides
def compute_output_shape(self, input_shapes):
return (input_shapes[1][0], input_shapes[1][1])
@overrides
def call(self, inputs, mask=None):
vector, matrix = inputs
if mask is None:
matrix_mask = None
else:
matrix_mask = mask[1]
num_rows = K.int_shape(matrix)[1]
tiled_vector = K.repeat_elements(K.expand_dims(vector, axis=1), num_rows, axis=1)
similarities = self.similarity_function.compute_similarity(tiled_vector, matrix)
return masked_softmax(similarities, matrix_mask)
@overrides
def get_config(self):
base_config = super(Attention, self).get_config()
config = {'similarity_function': self.similarity_function_params}
config.update(base_config)
return config
|
apache-2.0
| 5,103,416,499,689,313,000
| 39.313953
| 104
| 0.657341
| false
| 4.259214
| true
| false
| false
|
canardleteer/pydisque
|
pydisque/client.py
|
1
|
16532
|
"""Pydisque makes Disque easy to access in python."""
import redis
from redis.exceptions import ConnectionError
from functools import wraps
try:
# Python 3
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
import logging
logger = logging.getLogger(__name__)
class Job(object):
"""Represents a Disque Job."""
def __init__(self, id, queue_name, payload):
"""Initialize a job."""
self.id = id
self.queue_name = queue_name
self.payload = payload
def __repr__(self):
"""Make a Job easy to read."""
return '<Job id:%s queue_name:%s>' % (self.id, self.queue_name)
class Node(object):
"""Represents a Disque Node via host and port."""
def __init__(self, node_id, host, port, connection):
"""
Initialize a the Disque Node.
:param node_id:
:param host:
:param port:
:param connection: redis.Redis connection
:returns:
"""
self.node_id = node_id
self.host = host
self.port = port
self.connection = connection
def __repr__(self):
"""Make Node easy to read."""
return '<Node %s:%s>' % (self.host, self.port)
class retry(object):
"""retry utility object."""
def __init__(self, retry_count=2):
"""Initialize retry utility object."""
self.retry_count = retry_count
def __call__(self, fn):
"""Function wrapper."""
@wraps(fn)
def wrapped_f(*args, **kwargs):
c = 0
while c <= self.retry_count:
try:
return fn(*args, **kwargs)
except:
logging.critical("retrying because of this exception - %s",
c)
logging.exception("exception to retry ")
if c == self.retry_count:
raise
c += 1
return wrapped_f
class Client(object):
"""
Client is the Disque Client.
You can pass in a list of nodes, it will try to connect to
first if it can't then it will try to connect to second and
so forth.
:Example:
>>> client = Client(['localhost:7711', 'localhost:7712'])
>>> client.connect()
"""
def __init__(self, nodes=None):
"""Initalize a client to the specified nodes."""
if nodes is None:
nodes = ['localhost:7711']
self.nodes = {}
for n in nodes:
self.nodes[n] = None
self.connected_node = None
def connect(self):
"""
Connect to one of the Disque nodes.
You can get current connection with connected_node property
:returns: nothing
"""
self.connected_node = None
for i, node in self.nodes.items():
host, port = i.split(':')
port = int(port)
redis_client = redis.Redis(host, port)
try:
ret = redis_client.execute_command('HELLO')
format_version, node_id = ret[0], ret[1]
others = ret[2:]
self.nodes[i] = Node(node_id, host, port, redis_client)
self.connected_node = self.nodes[i]
except redis.exceptions.ConnectionError:
pass
if not self.connected_node:
raise Exception('couldnt connect to any nodes')
logger.info("connected to node %s" % self.connected_node)
def get_connection(self):
"""
Return current connected_nodes connection.
:rtype: redis.Redis
"""
return self.connected_node.connection
@retry()
def execute_command(self, *args, **kwargs):
"""Execute a command on the connected server."""
try:
return self.get_connection().execute_command(*args, **kwargs)
except ConnectionError as e:
logger.warn('trying to reconnect')
self.connect()
logger.warn('connected')
raise
def _grouper(self, iterable, n, fillvalue=None):
"""Collect data into fixed-length chunks or blocks."""
args = [iter(iterable)] * n
return zip_longest(fillvalue=fillvalue, *args)
def info(self):
"""
Return server information.
INFO
:returns: server info
"""
return self.execute_command("INFO")
def add_job(self, queue_name, job, timeout=200, replicate=None, delay=None,
retry=None, ttl=None, maxlen=None, async=None):
"""
Add a job to a queue.
ADDJOB queue_name job <ms-timeout> [REPLICATE <count>] [DELAY <sec>]
[RETRY <sec>] [TTL <sec>] [MAXLEN <count>] [ASYNC]
:param queue_name: is the name of the queue, any string, basically.
:param job: is a string representing the job.
:param timeout: is the command timeout in milliseconds.
:param replicate: count is the number of nodes the job should be
replicated to.
:param delay: sec is the number of seconds that should elapse
before the job is queued by any server.
:param retry: sec period after which, if no ACK is received, the
job is put again into the queue for delivery. If RETRY is 0,
the job has an at-least-once delivery semantics.
:param ttl: sec is the max job life in seconds. After this time,
the job is deleted even if it was not successfully delivered.
:param maxlen: count specifies that if there are already count
messages queued for the specified queue name, the message is
refused and an error reported to the client.
:param async: asks the server to let the command return ASAP and
replicate the job to other nodes in the background. The job
gets queued ASAP, while normally the job is put into the queue
only when the client gets a positive reply.
:returns: job_id
"""
command = ['ADDJOB', queue_name, job, timeout]
if replicate:
command += ['REPLICATE', replicate]
if delay:
command += ['DELAY', delay]
if retry:
command += ['RETRY', retry]
if ttl:
command += ['TTL', ttl]
if maxlen:
command += ['MAXLEN', maxlen]
if async:
command += ['ASYNC']
# TODO(canardleteer): we need to handle "-PAUSE" messages more
# appropriately, for now it's up to the person using the library
# to handle a generic ResponseError on their own.
logger.debug("sending job - %s", command)
job_id = self.execute_command(*command)
logger.debug("sent job - %s", command)
logger.debug("job_id: %s " % job_id)
return job_id
def get_job(self, queues, timeout=None, count=None, nohang=False, withcounters=False):
"""
Return some number of jobs from specified queues.
GETJOB [NOHANG] [TIMEOUT <ms-timeout>] [COUNT <count>] [WITHCOUNTERS] FROM
queue1 queue2 ... queueN
:param queues: name of queues
:returns: list of tuple(job_id, queue_name, job), tuple(job_id, queue_name, job, nacks, additional_deliveries) or empty list
:rtype: list
"""
assert queues
command = ['GETJOB']
if nohang:
command += ['NOHANG']
if timeout:
command += ['TIMEOUT', timeout]
if count:
command += ['COUNT', count]
if withcounters:
command += ['WITHCOUNTERS']
command += ['FROM'] + queues
results = self.execute_command(*command)
if not results:
return []
if withcounters:
return [(job_id, queue_name, job, nacks, additional_deliveries) for
job_id, queue_name, job, _, nacks, _, additional_deliveries in results]
else:
return [(job_id, queue_name, job) for
job_id, queue_name, job in results]
def ack_job(self, *job_ids):
"""
Acknowledge the execution of one or more jobs via job IDs.
ACKJOB jobid1 jobid2 ... jobidN
:param job_ids: list of job_ids
"""
self.execute_command('ACKJOB', *job_ids)
def nack_job(self, *job_ids):
"""
Acknowledge the failure of one or more jobs via job IDs.
NACK jobid1 jobid2 ... jobidN
:param job_ids: list of job_ids
"""
self.execute_command('NACK', *job_ids)
def fast_ack(self, *job_ids):
"""
Perform a best effort cluster wide deletion of the specified job IDs.
FASTACK jobid1 jobid2 ... jobidN
:param job_ids:
"""
self.execute_command('FASTACK', *job_ids)
def working(self, job_id):
"""
Signal Disque to postpone the next time it will deliver the job again.
WORKING <jobid>
:param job_id: name of the job still being worked on
:returns: returns the number of seconds you (likely)
postponed the message visiblity for other workers
"""
return self.execute_command('WORKING', job_id)
def qlen(self, queue_name):
"""
Return the length of the named queue.
QLEN <qname>
:param queue_name: name of the queue
:returns: length of the queue
"""
return self.execute_command('QLEN', queue_name)
# TODO (canardleteer): a QueueStatus object may be the best way to do this
# TODO (canardleteer): return_dict should probably be True by default, but
# i don't want to break anyones code
def qstat(self, queue_name, return_dict=False):
"""
Return the status of the queue (currently unimplemented).
Future support / testing of QSTAT support in Disque
QSTAT <qname>
Return produced ... consumed ... idle ... sources [...] ctime ...
"""
rtn = self.execute_command('QSTAT', queue_name)
if return_dict:
grouped = self._grouper(rtn, 2)
rtn = dict((a, b) for a, b in grouped)
return rtn
def qpeek(self, queue_name, count):
"""
Return, without consuming from queue, count jobs.
If count is positive the specified number of jobs are
returned from the oldest to the newest (in the same
best-effort FIFO order as GETJOB). If count is negative
the commands changes behavior and shows the count newest jobs,
from the newest from the oldest.
QPEEK <qname> <count>
:param queue_name: name of the queue
:param count:
"""
return self.execute_command("QPEEK", queue_name, count)
def enqueue(self, *job_ids):
"""
Queue jobs if not already queued.
:param job_ids:
"""
return self.execute_command("ENQUEUE", *job_ids)
def dequeue(self, *job_ids):
"""
Remove the job from the queue.
:param job_ids: list of job_ids
"""
return self.execute_command("DEQUEUE", *job_ids)
def del_job(self, *job_ids):
"""
Completely delete a job from a node.
Note that this is similar to FASTACK, but limited to a
single node since no DELJOB cluster bus message is sent
to other nodes.
:param job_ids:
"""
return self.execute_command("DELJOB", *job_ids)
# TODO (canardleteer): a JobStatus object may be the best for this,
# but I think SHOW is going to change to SHOWJOB
def show(self, job_id, return_dict=False):
"""
Describe the job.
:param job_id:
"""
rtn = self.execute_command('SHOW', job_id)
if return_dict:
grouped = self._grouper(rtn, 2)
rtn = dict((a, b) for a, b in grouped)
return rtn
def pause(self, queue_name, kw_in=None, kw_out=None, kw_all=None,
kw_none=None, kw_state=None, kw_bcast=None):
"""
Pause a queue.
Unfortunately, the PAUSE keywords are mostly reserved words in Python,
so I've been a little creative in the function variable names. Open
to suggestions to change it (canardleteer)
:param queue_name: The job queue we are modifying.
:param kw_in: pause the queue in input.
:param kw_out: pause the queue in output.
:param kw_all: pause the queue in input and output (same as specifying
both the in and out options).
:param kw_none: clear the paused state in input and output.
:param kw_state: just report the current queue state.
:param kw_bcast: send a PAUSE command to all the reachable nodes of
the cluster to set the same queue in the other nodes
to the same state.
"""
command = ["PAUSE", queue_name]
if kw_in:
command += ["in"]
if kw_out:
command += ["out"]
if kw_all:
command += ["all"]
if kw_none:
command += ["none"]
if kw_state:
command += ["state"]
if kw_bcast:
command += ["bcast"]
return self.execute_command(*command)
def qscan(self, cursor=0, count=None, busyloop=None, minlen=None,
maxlen=None, importrate=None):
"""
Iterate all the existing queues in the local node.
:param count: An hint about how much work to do per iteration.
:param busyloop: Block and return all the elements in a busy loop.
:param minlen: Don't return elements with less than count jobs queued.
:param maxlen: Don't return elements with more than count jobs queued.
:param importrate: Only return elements with an job import rate
(from other nodes) >= rate.
"""
command = ["QSCAN", cursor]
if count:
command += ["COUNT", count]
if busyloop:
command += ["BUSYLOOP"]
if minlen:
command += ["MINLEN", minlen]
if maxlen:
command += ["MAXLEN", maxlen]
if importrate:
command += ["IMPORTRATE", importrate]
return self.execute_command(*command)
def jscan(self, cursor=0, count=None, busyloop=None, queue=None,
state=None, reply=None):
"""Iterate all the existing jobs in the local node.
:param count: An hint about how much work to do per iteration.
:param busyloop: Block and return all the elements in a busy loop.
:param queue: Return only jobs in the specified queue.
:param state: Must be a list - Return jobs in the specified state.
Can be used multiple times for a logic OR.
:param reply: None or string {"all", "id"} - Job reply type. Type can
be all or id. Default is to report just the job ID. If all is
specified the full job state is returned like for the SHOW command.
"""
command = ["JSCAN", cursor]
if count:
command += ["COUNT", count]
if busyloop:
command += ["BUSYLOOP"]
if queue:
command += ["QUEUE", queue]
if type(state) is list:
for s in state:
command += ["STATE", s]
if reply:
command += ["REPLY", reply]
return self.execute_command(*command)
def hello(self):
"""
Returns hello format version, this node ID, all the nodes IDs, IP addresses, ports, and priority (lower is better, means node more available).
Clients should use this as an handshake command when connecting with a Disque node.
HELLO
:returns: [<hello format version>, <this node ID>, [<all the nodes IDs, IP addresses, ports, and priority>, ...]
"""
return self.execute_command("HELLO")
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
c = Client(['localhost:7712', 'localhost:7711'])
c.connect()
import json
job = json.dumps(["hello", "1234"])
logger.info(c.add_job("test", job))
jobs = c.get_job(['test'], timeout=5)
for queue_name, job_id, payload in jobs:
logger.info(job_id)
c.ack_job(job_id)
# while True:
# jobs = c.get_job(['test'], timeout=5)
|
mit
| -4,344,732,607,685,367,300
| 30.792308
| 150
| 0.567566
| false
| 4.19381
| false
| false
| false
|
Trust-Code/trust-addons
|
crm_multi_call/models/multi_call.py
|
1
|
2414
|
# -*- encoding: utf-8 -*-
###############################################################################
# #
# Copyright (C) 2015 Trustcode - www.trustcode.com.br #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
from openerp import api, models, fields
class wizard(models.TransientModel):
_name = 'multi.call'
res_user_id = fields.Many2many('res.users', string="Atendentes")
@api.multi
def create_calls(self):
customers = self._context.get('active_ids')
customers_ids = self.env['res.partner'].browse(customers)
cpu = len(customers_ids) / len(self.res_user_id)
indice_usuario = 0
somador = 0
for c in customers_ids:
crm_phonecall = self.env['crm.phonecall']
crm_phonecall.create({
'name': c.category_id.name,
'partner_phone': '%s-%s-%s-%s' % (c.phone, c.mobile,
c.x_phone1, c.fax),
'partner_id': c.id,
'user_id': self.res_user_id[indice_usuario].id
})
somador += 1
if somador >= cpu and indice_usuario < len(self.res_user_id) - 1:
indice_usuario += 1
somador = 0
|
agpl-3.0
| -1,911,900,117,082,598,700
| 48.265306
| 79
| 0.444905
| false
| 4.615679
| false
| false
| false
|
davy39/eric
|
Plugins/WizardPlugins/QRegExpWizard/Ui_QRegExpWizardRepeatDialog.py
|
1
|
7059
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file './Plugins/WizardPlugins/QRegExpWizard/QRegExpWizardRepeatDialog.ui'
#
# Created: Tue Nov 18 17:53:58 2014
# by: PyQt5 UI code generator 5.3.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_QRegExpWizardRepeatDialog(object):
def setupUi(self, QRegExpWizardRepeatDialog):
QRegExpWizardRepeatDialog.setObjectName("QRegExpWizardRepeatDialog")
QRegExpWizardRepeatDialog.resize(331, 197)
QRegExpWizardRepeatDialog.setSizeGripEnabled(True)
self.vboxlayout = QtWidgets.QVBoxLayout(QRegExpWizardRepeatDialog)
self.vboxlayout.setObjectName("vboxlayout")
self.groupBox = QtWidgets.QGroupBox(QRegExpWizardRepeatDialog)
self.groupBox.setTitle("")
self.groupBox.setFlat(True)
self.groupBox.setObjectName("groupBox")
self.gridlayout = QtWidgets.QGridLayout(self.groupBox)
self.gridlayout.setContentsMargins(0, 0, 0, 0)
self.gridlayout.setObjectName("gridlayout")
self.textLabel1_6 = QtWidgets.QLabel(self.groupBox)
self.textLabel1_6.setObjectName("textLabel1_6")
self.gridlayout.addWidget(self.textLabel1_6, 2, 2, 1, 1)
self.textLabel1_7 = QtWidgets.QLabel(self.groupBox)
self.textLabel1_7.setObjectName("textLabel1_7")
self.gridlayout.addWidget(self.textLabel1_7, 3, 2, 1, 1)
self.textLabel1_5 = QtWidgets.QLabel(self.groupBox)
self.textLabel1_5.setObjectName("textLabel1_5")
self.gridlayout.addWidget(self.textLabel1_5, 1, 2, 1, 1)
self.lowerSpin = QtWidgets.QSpinBox(self.groupBox)
self.lowerSpin.setEnabled(False)
self.lowerSpin.setAlignment(QtCore.Qt.AlignRight)
self.lowerSpin.setProperty("value", 1)
self.lowerSpin.setObjectName("lowerSpin")
self.gridlayout.addWidget(self.lowerSpin, 4, 1, 1, 1)
self.upperSpin = QtWidgets.QSpinBox(self.groupBox)
self.upperSpin.setEnabled(False)
self.upperSpin.setAlignment(QtCore.Qt.AlignRight)
self.upperSpin.setProperty("value", 1)
self.upperSpin.setObjectName("upperSpin")
self.gridlayout.addWidget(self.upperSpin, 4, 3, 1, 1)
self.textLabel6 = QtWidgets.QLabel(self.groupBox)
self.textLabel6.setObjectName("textLabel6")
self.gridlayout.addWidget(self.textLabel6, 4, 2, 1, 1)
self.betweenButton = QtWidgets.QRadioButton(self.groupBox)
self.betweenButton.setObjectName("betweenButton")
self.gridlayout.addWidget(self.betweenButton, 4, 0, 1, 1)
self.exactSpin = QtWidgets.QSpinBox(self.groupBox)
self.exactSpin.setEnabled(False)
self.exactSpin.setAlignment(QtCore.Qt.AlignRight)
self.exactSpin.setProperty("value", 1)
self.exactSpin.setObjectName("exactSpin")
self.gridlayout.addWidget(self.exactSpin, 3, 1, 1, 1)
self.exactButton = QtWidgets.QRadioButton(self.groupBox)
self.exactButton.setObjectName("exactButton")
self.gridlayout.addWidget(self.exactButton, 3, 0, 1, 1)
self.maxSpin = QtWidgets.QSpinBox(self.groupBox)
self.maxSpin.setEnabled(False)
self.maxSpin.setAlignment(QtCore.Qt.AlignRight)
self.maxSpin.setProperty("value", 1)
self.maxSpin.setObjectName("maxSpin")
self.gridlayout.addWidget(self.maxSpin, 2, 1, 1, 1)
self.maxButton = QtWidgets.QRadioButton(self.groupBox)
self.maxButton.setObjectName("maxButton")
self.gridlayout.addWidget(self.maxButton, 2, 0, 1, 1)
self.minButton = QtWidgets.QRadioButton(self.groupBox)
self.minButton.setObjectName("minButton")
self.gridlayout.addWidget(self.minButton, 1, 0, 1, 1)
self.minSpin = QtWidgets.QSpinBox(self.groupBox)
self.minSpin.setEnabled(False)
self.minSpin.setAlignment(QtCore.Qt.AlignRight)
self.minSpin.setProperty("value", 1)
self.minSpin.setObjectName("minSpin")
self.gridlayout.addWidget(self.minSpin, 1, 1, 1, 1)
self.unlimitedButton = QtWidgets.QRadioButton(self.groupBox)
self.unlimitedButton.setObjectName("unlimitedButton")
self.gridlayout.addWidget(self.unlimitedButton, 0, 0, 1, 4)
self.vboxlayout.addWidget(self.groupBox)
self.buttonBox = QtWidgets.QDialogButtonBox(QRegExpWizardRepeatDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.vboxlayout.addWidget(self.buttonBox)
self.retranslateUi(QRegExpWizardRepeatDialog)
self.minButton.toggled['bool'].connect(self.minSpin.setEnabled)
self.maxButton.toggled['bool'].connect(self.maxSpin.setEnabled)
self.exactButton.toggled['bool'].connect(self.exactSpin.setEnabled)
self.betweenButton.toggled['bool'].connect(self.lowerSpin.setEnabled)
self.betweenButton.toggled['bool'].connect(self.upperSpin.setEnabled)
self.buttonBox.accepted.connect(QRegExpWizardRepeatDialog.accept)
self.buttonBox.rejected.connect(QRegExpWizardRepeatDialog.reject)
QtCore.QMetaObject.connectSlotsByName(QRegExpWizardRepeatDialog)
QRegExpWizardRepeatDialog.setTabOrder(self.unlimitedButton, self.minButton)
QRegExpWizardRepeatDialog.setTabOrder(self.minButton, self.minSpin)
QRegExpWizardRepeatDialog.setTabOrder(self.minSpin, self.maxButton)
QRegExpWizardRepeatDialog.setTabOrder(self.maxButton, self.maxSpin)
QRegExpWizardRepeatDialog.setTabOrder(self.maxSpin, self.exactButton)
QRegExpWizardRepeatDialog.setTabOrder(self.exactButton, self.exactSpin)
QRegExpWizardRepeatDialog.setTabOrder(self.exactSpin, self.betweenButton)
QRegExpWizardRepeatDialog.setTabOrder(self.betweenButton, self.lowerSpin)
QRegExpWizardRepeatDialog.setTabOrder(self.lowerSpin, self.upperSpin)
def retranslateUi(self, QRegExpWizardRepeatDialog):
_translate = QtCore.QCoreApplication.translate
QRegExpWizardRepeatDialog.setWindowTitle(_translate("QRegExpWizardRepeatDialog", "Number of repetitions"))
self.textLabel1_6.setText(_translate("QRegExpWizardRepeatDialog", "times"))
self.textLabel1_7.setText(_translate("QRegExpWizardRepeatDialog", "times"))
self.textLabel1_5.setText(_translate("QRegExpWizardRepeatDialog", "times"))
self.textLabel6.setText(_translate("QRegExpWizardRepeatDialog", "and"))
self.betweenButton.setText(_translate("QRegExpWizardRepeatDialog", "Between"))
self.exactButton.setText(_translate("QRegExpWizardRepeatDialog", "Exactly"))
self.maxButton.setText(_translate("QRegExpWizardRepeatDialog", "Maximum"))
self.minButton.setText(_translate("QRegExpWizardRepeatDialog", "Minimum"))
self.unlimitedButton.setText(_translate("QRegExpWizardRepeatDialog", "Unlimited (incl. zero times)"))
|
gpl-3.0
| 4,168,190,662,937,062,400
| 57.338843
| 121
| 0.726732
| false
| 3.725066
| false
| false
| false
|
EluOne/Nett
|
nett.py
|
1
|
42271
|
#!/usr/bin/python
'Nova Echo Trade Tool'
# Copyright (C) 2014 Tim Cumming
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Tim Cumming aka Elusive One
# Created: 01/04/14
import os
import pickle
import time
import datetime
import wx
import sqlite3 as lite
import config
from common.api import onError, reprocess, fetchItems
from common.classes import Item, Material, MaterialRow
from ObjectListView import ObjectListView, ColumnDefn, GroupListView
# This will be the lists for the ui choices on the market.
quickbarList = []
materialsList = []
itemList = []
marketGroups = {}
marketRelations = {}
numIDs = 0
materialDict = {}
# Lets try to load up our previous quickbarList from the cache file.
if (os.path.isfile('nett.cache')):
cacheFile = open('nett.cache', 'r')
quickbarList = pickle.load(cacheFile)
cacheFile.close()
class MainWindow(wx.Frame):
def __init__(self, *args, **kwds):
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.numWidgets = 0
# List and Dictionary initialisation.
if itemList == []: # Build a list of all items from the static data dump.
try:
con = lite.connect('static.db') # A cut down version of the CCP dump converted to sqlite. (~8mb)
con.text_factory = str
with con:
cur = con.cursor()
# With this query we are looking to populate the itemID's with their respective names and parent market groups.
# Eve items currently go up to ID 33612, then Dust items start from 350916
statement = "SELECT typeID, typeName, marketGroupID FROM invtypes WHERE marketGroupID >= 0 ORDER BY typeName;"
cur.execute(statement)
rows = cur.fetchall()
for row in rows:
# The data above taken from the db then all zeros for the buy/sell values (x16), query time and widget key.
itemList.append(Item(int(row[0]), str(row[1]), int(row[2]), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
# This query will hold all of the market group ID to name relations in a dictionary for ease.
groupStatement = "SELECT marketGroupID, marketGroupName FROM invMarketGroups WHERE marketGroupID >= 0 ORDER BY marketGroupID;"
cur.execute(groupStatement)
groupRows = cur.fetchall()
for row in groupRows:
marketGroups.update({int(row[0]): str(row[1])})
# This statement is for the branches of the market treeCtrl using all the market groups and their relationship to each other.
relationStatement = "SELECT marketGroupID, parentGroupID FROM invMarketGroups ORDER BY parentGroupID;"
cur.execute(relationStatement)
relationRows = cur.fetchall()
for row in relationRows:
if row[1]:
marketRelations.update({int(row[0]): int(row[1])})
else:
marketRelations.update({int(row[0]): 'Market'})
except lite.Error as err:
error = ('SQL Lite Error: ' + repr(err.args[0]) + repr(err.args[1:])) # Error String
onError(error)
finally:
if con:
con.close()
self.leftNotebook = wx.Notebook(self, wx.ID_ANY, style=0)
self.marketNotebookPane = wx.Panel(self.leftNotebook, wx.ID_ANY)
self.searchTextCtrl = wx.TextCtrl(self.marketNotebookPane, wx.ID_ANY, "")
self.searchButton = wx.Button(self.marketNotebookPane, wx.ID_FIND, (""))
self.marketTree = wx.TreeCtrl(self.marketNotebookPane, wx.ID_ANY, style=wx.TR_HAS_BUTTONS | wx.TR_DEFAULT_STYLE | wx.SUNKEN_BORDER)
self.addButton = wx.Button(self.marketNotebookPane, wx.ID_ANY, ("Add to Quickbar"))
self.fetchButton = wx.Button(self.marketNotebookPane, wx.ID_ANY, ("Fetch Data"))
self.quickbarNotebookPane = wx.Panel(self.leftNotebook, wx.ID_ANY)
self.quickbarListCtrl = ObjectListView(self.quickbarNotebookPane, wx.ID_ANY, style=wx.LC_REPORT | wx.SUNKEN_BORDER)
self.removeButton = wx.Button(self.quickbarNotebookPane, wx.ID_ANY, ("Remove From Quickbar"))
self.fetchButtonTwo = wx.Button(self.quickbarNotebookPane, wx.ID_ANY, ("Fetch Data"))
self.materiallsNotebookPane = wx.Panel(self.leftNotebook, wx.ID_ANY)
self.materialsListCtrl = GroupListView(self.materiallsNotebookPane, wx.ID_ANY, style=wx.LC_REPORT | wx.SUNKEN_BORDER)
self.rightPanel = wx.ScrolledWindow(self, wx.ID_ANY, style=wx.TAB_TRAVERSAL)
self.statusbar = self.CreateStatusBar() # A Status bar in the bottom of the window
# Menu Bar
self.frame_menubar = wx.MenuBar()
self.fileMenu = wx.Menu()
self.menuAbout = wx.MenuItem(self.fileMenu, wx.ID_ABOUT, "&About", "", wx.ITEM_NORMAL)
self.fileMenu.AppendItem(self.menuAbout)
self.menuExport = wx.MenuItem(self.fileMenu, wx.ID_SAVE, "&Export", " Export Price Data", wx.ITEM_NORMAL)
self.fileMenu.AppendItem(self.menuExport)
self.menuExit = wx.MenuItem(self.fileMenu, wx.ID_EXIT, "E&xit", "", wx.ITEM_NORMAL)
self.fileMenu.AppendItem(self.menuExit)
self.frame_menubar.Append(self.fileMenu, "File")
self.SetMenuBar(self.frame_menubar)
# Menu Bar end
# Menu events.
self.Bind(wx.EVT_MENU, self.OnExport, self.menuExport)
self.Bind(wx.EVT_MENU, self.OnExit, self.menuExit)
self.Bind(wx.EVT_MENU, self.OnAbout, self.menuAbout)
# Button Events
self.Bind(wx.EVT_BUTTON, self.onProcess, self.fetchButton)
self.Bind(wx.EVT_BUTTON, self.onProcess, self.fetchButtonTwo)
self.Bind(wx.EVT_BUTTON, self.onAdd, self.addButton)
self.Bind(wx.EVT_BUTTON, self.onRemove, self.removeButton)
self.Bind(wx.EVT_BUTTON, self.searchTree, self.searchButton)
# register the self.onExpand function to be called
wx.EVT_TREE_ITEM_EXPANDING(self.marketTree, self.marketTree.GetId(), self.onExpand)
self.__set_properties()
self.__do_layout()
def __set_properties(self):
self.SetTitle(("Nett"))
self.SetSize((1024, 600))
self.rightPanel.SetScrollRate(10, 10)
self.SetBackgroundColour(wx.NullColour) # Use system default colour
self.statusbar.SetStatusText('Welcome to Nett')
self.quickbarListCtrl.SetEmptyListMsg('Add some items\nto start')
self.quickbarListCtrl.SetColumns([
ColumnDefn('Name', 'left', 320, 'itemName'),
])
self.materialsListCtrl.SetColumns([
ColumnDefn('Name', 'left', 100, 'materialName'),
ColumnDefn('Buy', 'right', 90, 'materialBuy'),
ColumnDefn('Sell', 'right', 90, 'materialSell'),
ColumnDefn('System', 'right', -1, 'systemName'),
])
self.materialsListCtrl.SetSortColumn(self.materialsListCtrl.columns[4])
def __do_layout(self):
mainSizer = wx.BoxSizer(wx.HORIZONTAL)
self.itemsSizer = wx.BoxSizer(wx.VERTICAL)
materialSizer = wx.BoxSizer(wx.VERTICAL)
quickbarSizer = wx.BoxSizer(wx.VERTICAL)
mainMarketSizer = wx.BoxSizer(wx.VERTICAL)
searchSizer = wx.BoxSizer(wx.HORIZONTAL)
searchSizer.Add(self.searchTextCtrl, 1, wx.EXPAND, 0)
searchSizer.Add(self.searchButton, 0, wx.ADJUST_MINSIZE, 0)
marketButtonSizer = wx.BoxSizer(wx.HORIZONTAL)
marketButtonSizer.Add(self.addButton, 1, wx.ADJUST_MINSIZE, 0)
marketButtonSizer.Add(self.fetchButton, 1, wx.ADJUST_MINSIZE, 0)
mainMarketSizer.Add(searchSizer, 0, wx.EXPAND, 0)
mainMarketSizer.Add(self.marketTree, 2, wx.EXPAND, 0)
mainMarketSizer.Add(marketButtonSizer, 0, wx.EXPAND, 0)
self.marketNotebookPane.SetSizer(mainMarketSizer)
quickbarButtonSizer = wx.BoxSizer(wx.HORIZONTAL)
quickbarButtonSizer.Add(self.removeButton, 1, wx.ADJUST_MINSIZE, 0)
quickbarButtonSizer.Add(self.fetchButtonTwo, 1, wx.ADJUST_MINSIZE, 0)
quickbarSizer.Add(self.quickbarListCtrl, 1, wx.EXPAND, 0)
quickbarSizer.Add(quickbarButtonSizer, 0, wx.EXPAND, 0)
self.quickbarNotebookPane.SetSizer(quickbarSizer)
materialSizer.Add(self.materialsListCtrl, 1, wx.EXPAND, 0)
self.materiallsNotebookPane.SetSizer(materialSizer)
self.leftNotebook.AddPage(self.marketNotebookPane, ("Market"))
self.leftNotebook.AddPage(self.quickbarNotebookPane, ("Quickbar"))
self.leftNotebook.AddPage(self.materiallsNotebookPane, ("Minerals"))
mainSizer.Add(self.leftNotebook, 1, wx.EXPAND, 0)
self.rightPanel.SetSizer(self.itemsSizer)
mainSizer.Add(self.rightPanel, 2, wx.EXPAND, 0)
self.SetSizer(mainSizer)
self.Layout()
# initialize the marketTree
self.buildTree('Market')
# If we've loaded up a cache file send the data to the UI.
if quickbarList != []:
self.quickbarListCtrl.SetObjects(quickbarList)
def searchTree(self, event):
searchText = self.searchTextCtrl.GetValue()
# Reset the itemList and marketRelations
del itemList[:]
marketRelations.clear()
itemMarketGroups = []
# List and Dictionary initialisation.
if itemList == []: # Build a list of all items from the static data dump.
try:
con = lite.connect('static.db') # A cut down version of the CCP dump converted to sqlite. (~8mb)
con.text_factory = str
with con:
cur = con.cursor()
# With this query we are looking to populate the itemID's with their respective names and parent market groups.
# Eve items currently go up to ID 33612, then Dust items start from 350916
statement = "SELECT typeID, typeName, marketGroupID FROM invtypes WHERE marketGroupID >= 0 AND typeName LIKE '%" + searchText + "%' ORDER BY typeName;"
cur.execute(statement)
rows = cur.fetchall()
for row in rows:
# The data above taken from the db then all zeros for the buy/sell values (x16), query time and widget key.
itemList.append(Item(int(row[0]), str(row[1]), int(row[2]), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
itemMarketGroups.append(int(row[2]))
# Iterate over the relations to build all the relavent branches.
while itemMarketGroups != []:
# This statement is for the branches of the market treeCtrl using all the market groups and their relationship to each other.
itemMarketList = ("', '".join(map(str, itemMarketGroups[:])))
relationStatement = ("SELECT marketGroupID, parentGroupID FROM invMarketGroups WHERE marketGroupID IN ('%s') ORDER BY parentGroupID;" % itemMarketList)
cur.execute(relationStatement)
relationRows = cur.fetchall()
itemMarketGroups = []
for row in relationRows:
if row[1]:
marketRelations.update({int(row[0]): int(row[1])})
itemMarketGroups.append(int(row[1]))
else:
marketRelations.update({int(row[0]): 'Market'})
except lite.Error as err:
error = ('SQL Lite Error: ' + repr(err.args[0]) + repr(err.args[1:])) # Error String
onError(error)
finally:
if con:
con.close()
# Reinitialize the marketTree
self.marketTree.DeleteAllItems()
self.buildTree('Market')
def onExpand(self, event):
'''onExpand is called when the user expands a node on the tree
object. It checks whether the node has been previously expanded. If
not, the extendTree function is called to build out the node, which
is then marked as expanded.'''
# get the wxID of the entry to expand and check it's validity
itemID = event.GetItem()
if not itemID.IsOk():
itemID = self.marketTree.GetSelection()
# only build that marketTree if not previously expanded
old_pydata = self.marketTree.GetPyData(itemID)
if old_pydata[1] is False:
# clean the subtree and rebuild it
self.marketTree.DeleteChildren(itemID)
self.extendTree(itemID)
self.marketTree.SetPyData(itemID, (old_pydata[0], True, old_pydata[2]))
def buildTree(self, rootID):
'''Add a new root element and then its children'''
self.rootID = self.marketTree.AddRoot(rootID)
self.marketTree.SetPyData(self.rootID, (rootID, 1))
self.extendTree(self.rootID)
self.marketTree.Expand(self.rootID)
def extendTree(self, parentID):
'''extendTree is a semi-lazy Tree builder. It takes
the ID of a tree entry and fills in the tree with its child
sub market groups and their children - updating 2 layers of the
tree. This function is called by buildTree and onExpand methods'''
parentGroup = self.marketTree.GetPyData(parentID)[0]
subGroups = []
numIDs = list(range(len(itemList)))
for key in marketRelations:
if marketRelations[key] == parentGroup:
subGroups.append(int(key))
subGroups.sort()
if subGroups == []:
# We've reached the end of the branch and must add the leaves.
newsubGroups = []
for x in numIDs: # Iterate over all of the id lists generated above.
if itemList[x].marketGroupID == parentGroup:
newsubGroups.append(int(x))
newsubGroups.sort()
for child in newsubGroups:
childGroup = child
childID = self.marketTree.AppendItem(parentID, str(itemList[child].itemName))
self.marketTree.SetPyData(childID, (itemList[child].itemID, False, True))
else:
for child in subGroups:
childGroup = child
# add the child to the parent
childID = self.marketTree.AppendItem(parentID, str(marketGroups[child]))
# associate the child ID with its marketTree entry
self.marketTree.SetPyData(childID, (childGroup, False, False))
# Now the child entry will show up, but it current has no
# known children of its own and will not have a '+' showing
# that it can be expanded to step further down the marketTree.
# Solution is to go ahead and register the child's children,
# meaning the grandchildren of the original parent
newParentID = childID
newParentGroup = childGroup
newsubGroups = []
for key in marketRelations:
if marketRelations[key] == newParentGroup:
newsubGroups.append(int(key))
newsubGroups.sort()
if newsubGroups != []:
for grandchild in newsubGroups:
grandchildGroup = grandchild
if marketRelations[grandchildGroup]:
grandchildID = self.marketTree.AppendItem(newParentID, str(marketGroups[grandchild]))
self.marketTree.SetPyData(grandchildID, (grandchildGroup, False, False))
else:
for x in numIDs: # Iterate over all of the id lists generated above.
if itemList[x].marketGroupID == newParentGroup:
newsubGroups.append(int(x))
newsubGroups.sort()
for grandchild in newsubGroups:
grandchildGroup = grandchild
grandchildID = self.marketTree.AppendItem(newParentID, str(itemList[grandchild].itemName))
self.marketTree.SetPyData(grandchildID, (grandchildGroup, False, False))
def onAddWidget(self, moduleID, moduleName, widgetKey):
'''onAddWidget will add widgets into the right scrolling
panel as required to show the number of items prices'''
# Lets try add to the right panel.
self.moduleSizer_1_staticbox = wx.StaticBox(self.rightPanel, int('100%s' % widgetKey), (str(moduleName)), name="module_%s" % moduleID)
self.moduleSizer_1_staticbox.Lower()
moduleSizer_1 = wx.StaticBoxSizer(self.moduleSizer_1_staticbox, wx.VERTICAL)
reproGrid_1 = wx.GridSizer(3, 5, 0, 0)
itemGrid_1 = wx.GridSizer(3, 5, 0, 0)
itemLabel_1 = wx.StaticText(self.rightPanel, wx.ID_ANY, ("Item Value"), name="itemValue_%s" % moduleID)
itemMarketLabel_1 = wx.StaticText(self.rightPanel, wx.ID_ANY, ("Market"), name="itemMarket_%s" % moduleID)
itemAmarrLabel_1 = wx.StaticText(self.rightPanel, wx.ID_ANY, ("Amarr"), name="itemAmarr_%s" % moduleID)
itemDodiLabel_1 = wx.StaticText(self.rightPanel, wx.ID_ANY, ("Dodixie"), name="itemDodixie_%s" % moduleID)
itemHekLabel_1 = wx.StaticText(self.rightPanel, wx.ID_ANY, ("Hek"), name="itemHek_%s" % moduleID)
itemJitaLabel_1 = wx.StaticText(self.rightPanel, wx.ID_ANY, ("Jita"), name="itemJita_%s" % moduleID)
itemSellLabel_1 = wx.StaticText(self.rightPanel, wx.ID_ANY, ("Sell"), name="itemSell_%s" % moduleID)
# itemAmarrSell_1 = wx.TextCtrl(self.rightPanel, wx.ID_ANY, "", size=(130, 21), style=wx.TE_RIGHT, name="amarrItemSell_%s" % moduleID)
itemAmarrSell_1 = wx.TextCtrl(self.rightPanel, int('101%s' % widgetKey), "", size=(130, 21), style=wx.TE_RIGHT, name="amarrItemSell_%s" % moduleID)
itemDodiSell_1 = wx.TextCtrl(self.rightPanel, int('102%s' % widgetKey), "", size=(130, 21), style=wx.TE_RIGHT, name="dodixieItemSell_%s" % moduleID)
itemHekSell_1 = wx.TextCtrl(self.rightPanel, int('103%s' % widgetKey), "", size=(130, 21), style=wx.TE_RIGHT, name="hekItemSell_%s" % moduleID)
itemJitaSell_1 = wx.TextCtrl(self.rightPanel, int('104%s' % widgetKey), "", size=(130, 21), style=wx.TE_RIGHT, name="jitaItemSell_%s" % moduleID)
itemBuyLabel_1 = wx.StaticText(self.rightPanel, wx.ID_ANY, ("Buy"), name="itemBuy_%s" % moduleID)
itemAmarrBuy_1 = wx.TextCtrl(self.rightPanel, int('105%s' % widgetKey), "", size=(130, 21), style=wx.TE_RIGHT, name="amarrItemBuy_%s" % moduleID)
itemDodiBuy_1 = wx.TextCtrl(self.rightPanel, int('106%s' % widgetKey), "", size=(130, 21), style=wx.TE_RIGHT, name="dodixieItemBuy_%s" % moduleID)
itemHekBuy_1 = wx.TextCtrl(self.rightPanel, int('107%s' % widgetKey), "", size=(130, 21), style=wx.TE_RIGHT, name="hekItemBuy_%s" % moduleID)
itemJitaBuy_1 = wx.TextCtrl(self.rightPanel, int('108%s' % widgetKey), "", size=(130, 21), style=wx.TE_RIGHT, name="jitaItemBuy_%s" % moduleID)
static_line_1 = wx.StaticLine(self.rightPanel, wx.ID_ANY, name="line_%s" % moduleID)
reproLabel_1 = wx.StaticText(self.rightPanel, wx.ID_ANY, ("Reprocessed Value"), name="reproValue_%s" % moduleID)
reproMarketLabel_1 = wx.StaticText(self.rightPanel, wx.ID_ANY, ("Market"), name="reproMarket_%s" % moduleID)
reproAmarrLabel_1 = wx.StaticText(self.rightPanel, wx.ID_ANY, ("Amarr"), name="reproAmarr_%s" % moduleID)
reproDodiLabel_1 = wx.StaticText(self.rightPanel, wx.ID_ANY, ("Dodixie"), name="reproDodixie_%s" % moduleID)
reproHekLabel_1 = wx.StaticText(self.rightPanel, wx.ID_ANY, ("Hek"), name="reproHek_%s" % moduleID)
reproJitaLabel_1 = wx.StaticText(self.rightPanel, wx.ID_ANY, ("Jita"), name="reproJita_%s" % moduleID)
reproSellLabel_1 = wx.StaticText(self.rightPanel, wx.ID_ANY, ("Sell"), name="reproSell_%s" % moduleID)
reproAmarrSell_1 = wx.TextCtrl(self.rightPanel, int('201%s' % widgetKey), "", size=(130, 21), style=wx.TE_RIGHT, name="reproAmarrSell_%s" % moduleID)
reproDodiSell_1 = wx.TextCtrl(self.rightPanel, int('202%s' % widgetKey), "", size=(130, 21), style=wx.TE_RIGHT, name="reproDodixieSell_%s" % moduleID)
reproHekSell_1 = wx.TextCtrl(self.rightPanel, int('203%s' % widgetKey), "", size=(130, 21), style=wx.TE_RIGHT, name="reproHekSell_%s" % moduleID)
reproJitaSell_1 = wx.TextCtrl(self.rightPanel, int('204%s' % widgetKey), "", size=(130, 21), style=wx.TE_RIGHT, name="reproJitaSell_%s" % moduleID)
reproBuyLabel_1 = wx.StaticText(self.rightPanel, wx.ID_ANY, ("Buy"), name="reproBuy_%s" % moduleID)
reproAmarrBuy_1 = wx.TextCtrl(self.rightPanel, int('205%s' % widgetKey), "", size=(130, 21), style=wx.TE_RIGHT, name="reproAmarrBuy_%s" % moduleID)
reproDodiBuy_1 = wx.TextCtrl(self.rightPanel, int('206%s' % widgetKey), "", size=(130, 21), style=wx.TE_RIGHT, name="reproDodixieBuy_%s" % moduleID)
reproHekBuy_1 = wx.TextCtrl(self.rightPanel, int('207%s' % widgetKey), "", size=(130, 21), style=wx.TE_RIGHT, name="reproHekBuy_%s" % moduleID)
reproJitaBuy_1 = wx.TextCtrl(self.rightPanel, int('208%s' % widgetKey), "", size=(130, 21), style=wx.TE_RIGHT, name="reproJitaBuy_%s" % moduleID)
moduleSizer_1.Add(itemLabel_1, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ADJUST_MINSIZE, 0)
itemGrid_1.Add(itemMarketLabel_1, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ADJUST_MINSIZE, 0)
itemGrid_1.Add(itemAmarrLabel_1, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ADJUST_MINSIZE, 0)
itemGrid_1.Add(itemDodiLabel_1, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ADJUST_MINSIZE, 0)
itemGrid_1.Add(itemHekLabel_1, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ADJUST_MINSIZE, 0)
itemGrid_1.Add(itemJitaLabel_1, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ADJUST_MINSIZE, 0)
itemGrid_1.Add(itemSellLabel_1, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ADJUST_MINSIZE, 0)
itemGrid_1.Add(itemAmarrSell_1, 0, wx.ADJUST_MINSIZE, 0)
itemGrid_1.Add(itemDodiSell_1, 0, wx.ADJUST_MINSIZE, 0)
itemGrid_1.Add(itemHekSell_1, 0, wx.ADJUST_MINSIZE, 0)
itemGrid_1.Add(itemJitaSell_1, 0, wx.ADJUST_MINSIZE, 0)
itemGrid_1.Add(itemBuyLabel_1, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ADJUST_MINSIZE, 0)
itemGrid_1.Add(itemAmarrBuy_1, 0, wx.ADJUST_MINSIZE, 0)
itemGrid_1.Add(itemDodiBuy_1, 0, wx.ADJUST_MINSIZE, 0)
itemGrid_1.Add(itemHekBuy_1, 0, wx.ADJUST_MINSIZE, 0)
itemGrid_1.Add(itemJitaBuy_1, 0, wx.ADJUST_MINSIZE, 0)
moduleSizer_1.Add(itemGrid_1, 1, wx.EXPAND, 0)
moduleSizer_1.Add(static_line_1, 0, wx.EXPAND, 0)
moduleSizer_1.Add(reproLabel_1, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ADJUST_MINSIZE, 0)
reproGrid_1.Add(reproMarketLabel_1, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ADJUST_MINSIZE, 0)
reproGrid_1.Add(reproAmarrLabel_1, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ADJUST_MINSIZE, 0)
reproGrid_1.Add(reproDodiLabel_1, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ADJUST_MINSIZE, 0)
reproGrid_1.Add(reproHekLabel_1, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ADJUST_MINSIZE, 0)
reproGrid_1.Add(reproJitaLabel_1, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ADJUST_MINSIZE, 0)
reproGrid_1.Add(reproSellLabel_1, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ADJUST_MINSIZE, 0)
reproGrid_1.Add(reproAmarrSell_1, 0, wx.ADJUST_MINSIZE, 0)
reproGrid_1.Add(reproDodiSell_1, 0, wx.ADJUST_MINSIZE, 0)
reproGrid_1.Add(reproHekSell_1, 0, wx.ADJUST_MINSIZE, 0)
reproGrid_1.Add(reproJitaSell_1, 0, wx.ADJUST_MINSIZE, 0)
reproGrid_1.Add(reproBuyLabel_1, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ADJUST_MINSIZE, 0)
reproGrid_1.Add(reproAmarrBuy_1, 0, wx.ADJUST_MINSIZE, 0)
reproGrid_1.Add(reproDodiBuy_1, 0, wx.ADJUST_MINSIZE, 0)
reproGrid_1.Add(reproHekBuy_1, 0, wx.ADJUST_MINSIZE, 0)
reproGrid_1.Add(reproJitaBuy_1, 0, wx.ADJUST_MINSIZE, 0)
moduleSizer_1.Add(reproGrid_1, 1, wx.EXPAND, 0)
self.itemsSizer.Add(moduleSizer_1, 1, wx.EXPAND | wx.SHAPED, 0)
self.rightPanel.SetSizer(self.itemsSizer)
self.Layout()
def onRemoveWidget(self, widgetKey):
"""Remove all children components for a given module and destroy them"""
child = wx.FindWindowById(int('100%s' % widgetKey))
if child:
parent = child.GetContainingSizer()
widgetIds = ['101', '102', '103', '104', '105', '106', '107', '108',
'201', '202', '203', '204', '205', '206', '207', '208']
for wid in widgetIds:
widget = wx.FindWindowById(int('%s%s' % (wid, widgetKey)))
if widget:
widget.Destroy()
if parent:
self.itemsSizer.Hide(parent)
self.itemsSizer.Remove(parent)
self.Layout()
def updateCache(self):
# Update the quickbarList to the cache file.
if quickbarList != []:
cacheFile = open('nett.cache', 'w')
pickle.dump(quickbarList, cacheFile)
cacheFile.close()
else:
# Delete the cache file when the quickbarList is empty.
if (os.path.isfile('nett.cache')):
os.remove('nett.cache')
def onAdd(self, event):
# Get current selection data from tree ctrl
currentSelection = self.marketTree.GetSelection()
pydata = self.marketTree.GetPyData(currentSelection)
# Check its an item not a market group
if pydata[2] is True:
selectedID = pydata[0]
for item in itemList:
# Find the selected ID in the complete item list
if item.itemID == selectedID:
# Check for duplicates in the quickbar list
if item not in quickbarList:
quickbarList.append(item)
self.quickbarListCtrl.SetObjects(quickbarList)
self.updateCache()
def onRemove(self, event):
# Use the selection from the quickbarListCtrl to remove items.
numItemRows = list(range(len(quickbarList)))
# Get current selection from quickbarList ctrl
for x in self.quickbarListCtrl.GetSelectedObjects():
for y in numItemRows:
if (x.itemID == quickbarList[y].itemID):
quickbarList[y] = 'deleted'
self.onRemoveWidget(x.widgetKey)
for z in quickbarList[:]:
if z == 'deleted':
quickbarList.remove(z)
# Recreate the iteration list so the loop can continue if removing multiple items.
numItemRows = list(range(len(quickbarList)))
self.quickbarListCtrl.SetObjects(quickbarList)
self.updateCache()
def updateDisplay(self, idList):
"""Send Values to the GUI elements. as we have added to the wx widgets
on the fly the easiest way to identify the widgets is by their unique
names assigned on creation."""
for item in idList:
if wx.FindWindowByName("module_%s" % int(item.itemID)):
continue
else:
self.numWidgets += 1
item.widgetKey = self.numWidgets
self.onAddWidget(int(item.itemID), item.itemName, item.widgetKey)
# Iterate over all of the widgets and their respective variables to fill in values.
# '{:,.2f}'.format(value) Uses the Format Specification Mini-Language to produce more human friendly output.
# Item Values
widgetNames = ['amarrItemBuy', 'dodixieItemBuy', 'hekItemBuy', 'jitaItemBuy',
'amarrItemSell', 'dodixieItemSell', 'hekItemSell', 'jitaItemSell']
for name in widgetNames:
widget = wx.FindWindowByName("%s_%s" % (name, int(item.itemID)))
widget.SetValue('{:,.2f}'.format(vars(item)[name]))
# Reprocess Values
widgetNames = ['reproAmarrBuy', 'reproDodixieBuy', 'reproHekBuy', 'reproJitaBuy',
'reproAmarrSell', 'reproDodixieSell', 'reproHekSell', 'reproJitaSell']
for name in widgetNames:
widget = wx.FindWindowByName("%s_%s" % (name, int(item.itemID)))
widget.SetValue('{:,.2f}'.format(vars(item)[name]))
def onProcess(self, event):
"""Generate a list of item and material ids to send to the Eve-Central servers
then use the returned data to generate our prices"""
currentTime = datetime.datetime.utcnow().replace(microsecond=0)
if quickbarList != []:
timingMsg = 'Using Local Cache'
# Build a list of item ids to send to Eve-Central.
idList = []
for item in quickbarList:
if item.lastQuery == 0:
idList.append(item.itemID)
elif (currentTime - item.lastQuery).seconds > config.queryLimit:
idList.append(item.itemID)
# We'll tag on the mineral query with the item ids to save traffic.
if materialsList != []:
for mat in materialsList:
if mat.lastQuery == 0:
idList.append(mat.materialID)
elif (currentTime - mat.lastQuery).seconds > config.queryLimit:
idList.append(mat.materialID)
else:
for mineral in config.mineralIDs:
idList.append(mineral)
# print(idList)
# idList = [4473, 16437...]
# This is for time stamping our out bound queries so we don't request data we already have that is recent.
queryTime = datetime.datetime.utcnow().replace(microsecond=0)
# Start the clock for the fetch from Eve-Central.
t = time.clock()
self.statusbar.SetStatusText('Nett - Fetching Data from Eve-Central.com...')
dodixieBuy, dodixieSell, jitaBuy, jitaSell, hekBuy, hekSell, amarrBuy, amarrSell = fetchItems(idList)
fetchTime = ((time.clock() - t) * 1000) # Timing messages for info and debug.
# Check that our mineral prices are updated if returned for the query.
for mineral in config.mineralIDs:
# Check if it was in the idList for the Eve-Central query.
if mineral in idList:
# Check if we already have some data for this id
if mineral in materialDict:
# Buy values updates via materialDict to materialsList
materialsList[materialDict[mineral]].amarrBuy = amarrBuy[mineral]
materialsList[materialDict[mineral]].dodixieBuy = dodixieBuy[mineral]
materialsList[materialDict[mineral]].hekBuy = hekBuy[mineral]
materialsList[materialDict[mineral]].jitaBuy = jitaBuy[mineral]
# Sell values updates via materialDict to materialsList
materialsList[materialDict[mineral]].amarrSell = amarrSell[mineral]
materialsList[materialDict[mineral]].dodixieSell = dodixieSell[mineral]
materialsList[materialDict[mineral]].hekSell = hekSell[mineral]
materialsList[materialDict[mineral]].jitaSell = jitaSell[mineral]
else:
materialsList.append(Material(int(mineral), config.mineralIDs[mineral],
amarrBuy[mineral], dodixieBuy[mineral], hekBuy[mineral], jitaBuy[mineral],
amarrSell[mineral], dodixieSell[mineral], hekSell[mineral], jitaSell[mineral],
queryTime))
# Once we have fetched material data its now stored in objects in materialsList
# So we need to make a quick dictionary like a primary key to match list positions to mineral ids.
numMats = list(range(len(materialsList)))
if numMats != []:
for x in numMats:
# materialDict = {materialId: materialsList[index], 34: 0, 35: 1, ...}
materialDict[materialsList[x].materialID] = x
# print(materialDict)
# TODO: Move this loop somewhere more logical.
materialRows = []
for mineral in materialsList:
materialRows.append(MaterialRow(mineral.materialName, 'Amarr', mineral.amarrBuy, mineral.amarrSell))
materialRows.append(MaterialRow(mineral.materialName, 'Dodixie', mineral.dodixieBuy, mineral.dodixieSell))
materialRows.append(MaterialRow(mineral.materialName, 'Hek', mineral.hekBuy, mineral.hekSell))
materialRows.append(MaterialRow(mineral.materialName, 'Jita', mineral.jitaBuy, mineral.jitaSell))
self.materialsListCtrl.SetObjects(materialRows)
self.statusbar.SetStatusText('Nett - Calculating Reprocessed Values...')
# Restart the clock for processing data.
t = time.clock()
for item in quickbarList:
if item.itemID in idList:
output = reprocess(item.itemID)
# print(output)
reproAmarrBuy = 0 # Fullfilling Buy orders
reproAmarrSell = 0 # Placing Sell orders
reproDodixieBuy = 0 # Fullfilling Buy orders
reproDodixieSell = 0 # Placing Sell orders
reproHekBuy = 0 # Fullfilling Buy orders
reproHekSell = 0 # Placing Sell orders
reproJitaBuy = 0 # Fullfilling Buy orders
reproJitaSell = 0 # Placing Sell orders
# Generate reprocessed values from raw material prices. (Currently not stored)
for key in output:
if key in config.mineralIDs:
# We are now using the materialDict so we can use previously fetched data in the materialsList.
reproAmarrBuy = reproAmarrBuy + (int(output[key]) * materialsList[materialDict[key]].amarrBuy)
reproAmarrSell = reproAmarrSell + (int(output[key]) * materialsList[materialDict[key]].amarrSell)
reproDodixieBuy = reproDodixieBuy + (int(output[key]) * materialsList[materialDict[key]].dodixieBuy)
reproDodixieSell = reproDodixieSell + (int(output[key]) * materialsList[materialDict[key]].dodixieSell)
reproHekBuy = reproHekBuy + (int(output[key]) * materialsList[materialDict[key]].hekBuy)
reproHekSell = reproHekSell + (int(output[key]) * materialsList[materialDict[key]].hekSell)
reproJitaBuy = reproJitaBuy + (int(output[key]) * materialsList[materialDict[key]].jitaBuy)
reproJitaSell = reproJitaSell + (int(output[key]) * materialsList[materialDict[key]].jitaSell)
# Send Values to the quickbarList objects.
item.amarrItemBuy = amarrBuy[item.itemID]
item.dodixieItemBuy = dodixieBuy[item.itemID]
item.hekItemBuy = hekBuy[item.itemID]
item.jitaItemBuy = jitaBuy[item.itemID]
item.amarrItemSell = amarrSell[item.itemID]
item.dodixieItemSell = dodixieSell[item.itemID]
item.hekItemSell = hekSell[item.itemID]
item.jitaItemSell = jitaSell[item.itemID]
item.reproAmarrBuy = reproAmarrBuy
item.reproDodixieBuy = reproDodixieBuy
item.reproHekBuy = reproHekBuy
item.reproJitaBuy = reproJitaBuy
item.reproAmarrSell = reproAmarrSell
item.reproDodixieSell = reproDodixieSell
item.reproHekSell = reproHekSell
item.reproJitaSell = reproJitaSell
item.lastQuery = queryTime
processTime = ((time.clock() - t) * 1000)
timingMsg = 'Fetch: %0.2f ms / Process: %0.2f ms' % (fetchTime, processTime)
self.updateDisplay(quickbarList)
self.statusbar.SetStatusText('Nett - Idle - %s' % timingMsg)
# Save the updated quickbarList to the cache file.
self.updateCache()
def OnExport(self, event):
# Export the contents of the Quickbar as csv.
if quickbarList != []:
self.dirname = ''
wildcard = "Comma Separated (*.csv)|*.csv|All files (*.*)|*.*"
dlg = wx.FileDialog(self, 'Export Price Data to File', self.dirname, 'export.csv', wildcard, wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
f = file(path, 'w')
""" Item(itemID, itemName, marketGroupID,
amarrItemBuy, dodixieItemBuy, hekItemBuy, jitaItemBuy,
amarrItemSell, dodixieItemSell, hekItemSell, jitaItemSell,
reproAmarrBuy, reproDodixieBuy, reproHekBuy, reproJitaBuy,
reproAmarrSell, reproDodixieSell, reproHekSell, reproJitaSell)"""
columns = ('Item Name', 'Amarr Market Buy Orders', 'Amarr Market Sell Orders', 'Amarr Material Buy Orders', 'Amarr Material Sell Orders',
'Dodixie Market Buy Orders', 'Dodixie Market Sell Orders', 'Dodixie Material Buy Orders', 'Dodixie Material Sell Orders',
'Hek Market Buy Orders', 'Hek Market Sell Orders', 'Hek Material Buy Orders', 'Hek Material Sell Orders',
'Jita Market Buy Orders', 'Jita Market Sell Orders', 'Jita Material Buy Orders', 'Jita Material Sell Orders')
dataExport = ('%s%s' % (','.join(columns), '\n'))
for row in quickbarList:
dataExport = ('%s%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n' % (dataExport, row.itemName,
row.amarrItemBuy, row.amarrItemSell, row.reproAmarrBuy, row.reproAmarrSell,
row.dodixieItemBuy, row.dodixieItemSell, row.reproDodixieBuy, row.reproDodixieSell,
row.hekItemBuy, row.hekItemSell, row.reproHekBuy, row.reproHekSell,
row.jitaItemBuy, row.jitaItemSell, row.reproJitaBuy, row.reproJitaSell))
f.write(dataExport)
f.close()
dlg.Destroy()
else:
onError('The Quickbar list is empty. There is no data to export yet.')
def OnAbout(self, e):
description = """A tool designed for our corporate industrialists to
compare items at the main market hubs.
If you like my work please consider an ISK donation to Elusive One.
This application uses data provided by Eve-Central.com
All EVE-Online related materials are property of CCP hf."""
licence = """NETT is released under GNU GPLv3:
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>."""
info = wx.AboutDialogInfo()
# info.SetIcon(wx.Icon('', wx.BITMAP_TYPE_PNG))
info.SetName('Nova Echo Trade Tool')
info.SetVersion(config.version)
info.SetDescription(description)
# info.SetCopyright('(C) 2013 Tim Cumming')
info.SetWebSite('https://github.com/EluOne/Nett')
info.SetLicence(licence)
info.AddDeveloper('Tim Cumming aka Elusive One')
# info.AddDocWriter('')
# info.AddArtist('')
# info.AddTranslator('')
wx.AboutBox(info)
def OnExit(self, e):
dlg = wx.MessageDialog(self, 'Are you sure to quit Nett?', 'Please Confirm', wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION)
if dlg.ShowModal() == wx.ID_YES:
self.Close(True)
# end of class MainWindow
class MyApp(wx.App):
def OnInit(self):
frame = MainWindow(None, -1, '')
self.SetTopWindow(frame)
frame.Center()
frame.Show()
return 1
# end of class MyApp
if __name__ == '__main__':
app = MyApp(0)
app.MainLoop()
|
gpl-3.0
| 5,782,214,964,515,885,000
| 51.315594
| 177
| 0.606752
| false
| 3.596614
| false
| false
| false
|
mleger45/turnex
|
msn/tests/test_consumer.py
|
1
|
1441
|
# -*- coding: utf-8 -*-
from channels import Group
from channels.test import ChannelTestCase, WSClient, apply_routes
#TODO: use apply_routes here, these tests are wrong.
from msn import consumer
class MSNConsumerTest(ChannelTestCase):
def test_ws_connect(self):
client = WSClient()
default = 'turnex'
# Inject a message onto the channel to use in a consumer
#Channel("input").send({"value": 33})
# Run the consumer with the new Message object
#message = self.get_next_message("input", require=True)
#consumer.ws_connect(message)
# Verify there's a reply and that it's accurate
#result = self.get_next_message(message.reply_channel.name,
# require=True)
#self.assertIsNotNone(result)
client.send_and_consume('websocket.connect', path='/')
self.assertIsNone(client.receive())
Group(default).send({'text': 'ok'}, immediately=True)
self.assertEqual(client.receive(json=False), 'ok')
client.send_and_consume('websocket.receive',
text={'message': 'hey'},
path='/')
self.assertEqual(client.receive(), {'event': 'error', 'body': 'Stop Hacking.'})
client.send_and_consume('websocket.disconnect',
text={'message': 'hey'},
path='/')
|
mit
| 8,777,828,271,155,742,000
| 36.921053
| 87
| 0.575295
| false
| 4.340361
| true
| false
| false
|
nens/raster-tools
|
raster_tools/txt2tif.py
|
1
|
2465
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Create tif rasters from xyz files by linear interpolation using griddata.
"""
import argparse
import math
import os
from osgeo import gdal
from osgeo import osr
import numpy as np
from raster_tools import datasets
WIDTH = 0.5
HEIGHT = 0.5
NO_DATA_VALUE = np.finfo('f4').min.item()
DRIVER = gdal.GetDriverByName('gtiff')
OPTIONS = ['compress=deflate', 'tiled=yes']
PROJECTION = osr.GetUserInputAsWKT('epsg:28992')
def rasterize(points):
""" Create array. """
xmin, ymin = points[:, :2].min(0)
xmax, ymax = points[:, :2].max(0)
p = math.floor(xmin / WIDTH) * WIDTH
q = math.floor(ymax / HEIGHT) * HEIGHT
geo_transform = p, WIDTH, 0, q, 0, -HEIGHT
indices = np.empty((len(points), 3), 'u4')
indices[:, 2] = (points[:, 0] - p) / WIDTH
indices[:, 1] = (q - points[:, 1]) / HEIGHT
order = indices.view('u4,u4,u4').argsort(order=['f1', 'f2'], axis=0)[:, 0]
indices = indices[order]
indices[0, 0] = 0
py, px = indices[0, 1:]
for i in range(1, len(indices)):
same1 = indices[i, 1] == indices[i - 1, 1]
same2 = indices[i, 2] == indices[i - 1, 2]
if same1 and same2:
indices[i, 0] = indices[i - 1, 0] + 1
else:
indices[i, 0] = 0
array = np.full(indices.max(0) + 1, NO_DATA_VALUE)
array[tuple(indices.transpose())] = points[:, 2][order]
array = np.ma.masked_values(array, NO_DATA_VALUE)
return {'array': array,
'projection': PROJECTION,
'no_data_value': NO_DATA_VALUE,
'geo_transform': geo_transform}
def txt2tif(source_path):
root, ext = os.path.splitext(source_path)
points = np.loadtxt(source_path)
kwargs = rasterize(points)
array = kwargs.pop('array')
for statistic in 'min', 'max':
func = getattr(np.ma, statistic)
kwargs['array'] = func(array, 0).filled(NO_DATA_VALUE)[np.newaxis]
target_path = root + '_' + statistic + '.tif'
with datasets.Dataset(**kwargs) as dataset:
DRIVER.CreateCopy(target_path, dataset, options=OPTIONS)
def get_parser():
""" Return argument parser. """
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('source_path', metavar='FILE')
return parser
def main():
""" Call txt2tif with args from parser. """
return txt2tif(**vars(get_parser().parse_args()))
if __name__ == '__main__':
exit(main())
|
gpl-3.0
| -6,831,315,423,260,359,000
| 27.011364
| 78
| 0.599189
| false
| 3.176546
| false
| false
| false
|
BorgERP/borg-erp-6of3
|
verticals/garage61/acy_work_order/workorder.py
|
1
|
5022
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2010 Acysos S.L. (http://acysos.com) All Rights Reserved.
# Ignacio Ibeas <ignacio@acysos.com>
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import osv, fields
import tools
import os
import time
from datetime import datetime, date
# Word order
class workorder(osv.osv):
_description = 'Work Order'
_name = 'workorder'
_columns = {
'name': fields.char('Work Order Reference', size=64, readonly=False, required=True, select=True),
'partner_id': fields.many2one('res.partner', 'Customer', readonly=False, states={'draft': [('readonly', False)]}, required=True, change_default=True, select=True),
'partner_workorder_id': fields.many2one('res.partner.address', 'Address', readonly=False, required=True, states={'draft': [('readonly', False)]}, help="The name and address of the contact that requested the workorder."),
'sale_order_ids': fields.one2many('sale.order', 'workorder_id', 'Sale orders'),
'project_ids': fields.one2many('project.project', 'workorder_id', 'Projects'),
'date_created': fields.date('Created Date'),
'date_appointment': fields.date('Appointment Date'),
'date_work': fields.date('Work Date'),
'date_delivery': fields.date('Delivery Date'),
'number_sale_orders': fields.integer('Number Sale Orders'),
'user_id': fields.many2one('res.users', 'Salesman', readonly=False, select=True),
}
_defaults = {
'name': lambda obj, cr, uid, context: obj.pool.get('ir.sequence').get(cr, uid, 'workorder'),
'number_sale_orders': lambda *a: 0,
'date_created': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'user_id': lambda obj, cr, uid, context: uid,
}
_sql_constraints = [
('name_uniq', 'unique (name)', 'The Code of the Workorder must be unique !')
]
def onchange_partner_id(self, cr, uid, ids, part):
if not part:
return {'value': {'partner_workorder_id': False}}
addr = self.pool.get('res.partner').address_get(cr, uid, [part], ['delivery', 'invoice', 'contact'])
val = {
'partner_workorder_id': addr['contact'],
}
return {'value': val}
def copy(self, cr, uid, id, default=None, context={}):
if not default:
default = {}
default.update({
'name': self.pool.get('ir.sequence').get(cr, uid, 'workorder'),
})
return super(workorder, self).copy(cr, uid, id, default, context)
def save_workorder(self, cr, uid, ids,name,partner_id,partner_workorder_id,date_appointment,date_work,date_delivery,sale_order_ids,project_ids, context={}):
wo_exist_id = self.pool.get('workorder').search(cr, uid, [('name','=',name)], context=context)
if not wo_exist_id:
wo_id = self.pool.get('workorder').create(cr, uid, {'name':name,'partner_id':partner_id,'partner_workorder_id':partner_workorder_id,'date_appointment':date_appointment,'date_work':date_work,'date_delivery':date_delivery,'sale_order_ids':sale_order_ids,'project_ids':project_ids},{'workorder':True})
self.write(cr, uid, ids, {'name':name,'partner_id':partner_id,'partner_workorder_id':partner_workorder_id,'date_appointment':date_appointment,'date_work':date_work,'date_delivery':date_delivery,'sale_order_ids':sale_order_ids,'project_ids':project_ids})
return {'value': {'id': wo_id}}
def create(self, cr, uid, vals, context=None):
if context.get('workorder', False):
return super(workorder,self).create(cr, uid, vals, context)
else:
sql = "SELECT workorder.id FROM workorder WHERE name = '%s'" % (vals.get('name'))
cr.execute(sql)
ids = cr.fetchone()[0]
super(workorder,self).write(cr, uid, ids, vals, context={})
return ids
workorder()
# Project
class project_project(osv.osv):
_inherit = 'project.project'
_columns = {
'workorder_id': fields.many2one('workorder', 'Work Order', readonly=True, required=False, select=True),
}
project_project()
|
agpl-3.0
| 4,570,658,077,266,191,400
| 45.943925
| 310
| 0.612505
| false
| 3.671053
| false
| false
| false
|
galad-loth/DescHash
|
DeepHash/TestDH.py
|
1
|
1206
|
import numpy as npy
import mxnet as mx
import logging
from symbols.symbol_dh import DHMidLayer,DHLossLayer
from common.data import SiftSmallIter
batchsize=50
opProj1=DHMidLayer(96,0.0001,0.0001)
opProj2=DHMidLayer(64,0.0001,0.0001)
opOut=DHLossLayer(0.001)
data = mx.symbol.Variable('data')
lm1=opProj1(data=data, name='lm1')
lm2=opProj2(data=lm1, name="lm2")
netDH=opOut(data=lm2)
ex = netDH.simple_bind(ctx=mx.cpu(), data=(batchsize, 128))
listArgs = dict(zip(netDH.list_arguments(), ex.arg_arrays))
for arg in listArgs:
data = listArgs[arg]
if 'weight' in arg:
data[:] = mx.random.uniform(-0.1, 0.1, data.shape)
if 'bias' in arg:
data[:] = 0
dataPath="E:\\DevProj\\Datasets\\SIFT1M\\siftsmall"
trainIter, valIter=SiftSmallIter(dataPath,21000,4000,batchsize)
learning_rate=0.01
for ii in range(200):
print "Deep Hash Training at iteration "+str(ii)
trainbatch=trainIter.next()
listArgs['data'][:] = trainbatch.data[0]
ex.forward(is_train=True)
ex.backward()
for arg, grad in zip(ex.arg_arrays, ex.grad_arrays):
arg[:] -= learning_rate * (grad / batchsize)
xx=ex.outputs[0].asnumpy()
|
apache-2.0
| 6,880,003,447,890,738,000
| 26.046512
| 63
| 0.667496
| false
| 2.778802
| false
| false
| false
|
c86j224s/snippet
|
Python_asyncio_binary_echo/pyclient2/echoclient/cli.py
|
1
|
3199
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Dummy echo client based on binary protocol with asyncio
'''
import asyncio
import struct
class conn_mgr:
def __init__(self, addr, port, asyncio_loop):
''' initialize object member variables '''
# network connection information
self.addr = addr
self.port = port
# asyncio streams, tasks
self.loop = asyncio_loop
self.reader = None
self.writer = None
self.read_task = None
# transaction map
self.tid = 1
self.transactions = {}
def transactionid(self):
''' issue new transaction id '''
tid = self.tid
self.tid += 1
return tid
async def open_connection(self):
''' open connection and start packet read loop '''
self.reader, self.writer, = await asyncio.open_connection(self.addr, self.port, loop=self.loop)
self.read_task = self.loop.create_task(self._read_loop())
async def _read_loop(self):
''' packet read loop handling response and notification messages '''
while True:
command, tid, message, = await self._read_message()
if (command, tid) in self.transactions:
self.transactions[(command, tid)].set_result(message)
print('handled response. {}, {}, {}'.format(command, tid, message))
else:
print('unhandled response. {}, {}, {}'.format(command, tid, message))
async def request(self, command, body):
''' request and wait response message '''
tid = self.transactionid()
self.transactions[(command, tid)] = self.loop.create_future()
await self._write_message(command, tid, body)
return await self.transactions[(command, tid)]
def close_connection(self):
''' close streams and stop the packet read loop '''
self.writer.close()
self.reader = None
self.writer = None
self.read_task.cancel()
async def _write_message(self, command, tid, body):
''' write a message to stream '''
payload = struct.pack('<ii{}s'.format(len(body)+4+4), command, tid, body)
self.writer.write(struct.pack('<i{}s'.format(len(payload)), len(payload), payload))
await self.writer.drain()
async def _read_message(self):
''' read a message from stream '''
length, = struct.unpack('<i', await self.reader.read(4))
command, = struct.unpack('<i', await self.reader.read(4))
payload = await self.reader.read(length-4)
tid, body, = struct.unpack('<i{}s'.format(len(payload)-4), payload)
return command, tid, body
async def tcp_echo_client(loop):
conn = conn_mgr('127.0.0.1', 9999, loop)
await conn.open_connection()
body = await conn.request(1, b'this is first data')
print('Received body = {}'.format(body.decode()))
body = await conn.request(2, b'this is second data')
print('Received body = {}'.format(body.decode()))
conn.close_connection()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(tcp_echo_client(loop))
loop.stop()
loop.close()
|
apache-2.0
| 257,500,368,639,662,980
| 31.642857
| 103
| 0.599562
| false
| 3.99875
| false
| false
| false
|
DemocracyClub/yournextrepresentative
|
ynr/apps/people/data_removal_helpers.py
|
1
|
4426
|
"""
A set of helpers that automate personal data removal. Used in the admin
interface, typically after a GDPR request for removal.
"""
import abc
from collections import defaultdict
DELETED_STR = "<DELETED>"
class BaseCheck(metaclass=abc.ABCMeta):
def __init__(self, person):
self.person = person
def collect(self):
return {self.__class__.__name__: self.run_collect()}
@abc.abstractmethod
def run_collect(self):
pass
@abc.abstractmethod
def run_remove(self):
pass
@abc.abstractmethod
def get_item_display_info(self, item):
pass
class PhotoCheck(BaseCheck):
def get_item_display_info(self, item):
return {
"title": "image",
"description": """Source: {source}
User: {user}
""".format(
source=item.source or None, user=item.uploading_user
),
"image": item.image.url,
}
def run_collect(self):
photos_to_remove = []
for photo in self.person.images.all():
photos_to_remove.append(self.get_item_display_info(photo))
return photos_to_remove
def run_remove(self):
self.person.images.all().delete()
class VersionHistoryCheck(BaseCheck):
def get_item_display_info(self, item):
return {
"title": item[0],
"description": "\n\t".join(sorted([x for x in item[1] if x])),
}
def run_collect(self, do_remove=False):
version_data_to_remove = []
never_remove = [
"death_date",
"honorific_prefix",
"id",
"wikipedia_url",
"candidacies",
"name",
"honorific_suffix",
"wikidata_id",
"other_names",
"slug",
]
never_remove_identifiers = ["uk.org.publicwhip"]
to_remove = defaultdict(set)
versions = self.person.versions
for version in versions:
for key, value in version.get("data").items():
if key not in never_remove:
if value or value == DELETED_STR:
if key == "identifiers":
for v in value:
if (
not v.get("scheme")
in never_remove_identifiers
):
if v["identifier"] == DELETED_STR:
continue
to_remove[
"Identifier: " + v.get("scheme")
].add(v["identifier"])
if do_remove:
v["identifier"] = DELETED_STR
else:
if str(value) == DELETED_STR:
continue
to_remove[key].add(str(value))
if do_remove:
version["data"][key] = DELETED_STR
for remove in to_remove.items():
if not remove[1]:
continue
version_data_to_remove.append(self.get_item_display_info(remove))
if do_remove:
self.person.versions = versions
self.person.save()
return sorted(version_data_to_remove, key=lambda item: item["title"])
def run_remove(self):
self.run_collect(do_remove=True)
class DataRemover:
def __init__(self, person):
self.person = person
self.to_remove = {}
self._collected = False
self.checks = [PhotoCheck, VersionHistoryCheck]
def collect(self):
"""
Runs all checks and collects the data that will be removed without
performing any actions.
:return:
"""
for check in self.checks:
self.to_remove.update(check(self.person).collect())
self._collected = True
return self.to_remove
def remove(self):
"""
Removes all data found in the checks.
:return:
"""
if not self._collected:
raise ValueError("Can't remove data without calling collect first")
for check in self.checks:
check(self.person).run_remove()
return self.to_remove
|
agpl-3.0
| -711,959,650,726,068,900
| 29.736111
| 79
| 0.491414
| false
| 4.558187
| false
| false
| false
|
jstacoder/flask-basehead
|
flask_basehead/core.py
|
1
|
2152
|
'''
new_bc.core.py
core api calls for new_bc api library
'''
import os
import requests
API_URL = 'https://basecamp.com/{}/api/v1/'
MY_BC_NUMBER = '2361076'
def make_api_url(account_num=None,call=None,*args):
if account_num is None:
account_num = MY_BC_NUMBER
if call is None:
call = ''
u = API_URL.format(account_num) + call
u = u + '.json' if not args else u + '/' + '/'.join(map(str,args)) + '.json'
return u
def get_auth(username=None,passwd=None):
if username and passwd:
return (username,passwd)
elif os.environ.get('BC_AUTH',False):
return os.environ['BC_AUTH'].split(' ')
else:
if os.path.exists('auth.txt'):
return tuple([str(x[:-1]) for x in tuple(open('auth.txt').readlines())])
def create_session(auth=None,oauth2=False):
if not oauth2:
req = requests.session()
else:
import os
url = os.environ.get('INIT_REQUEST_URL',None)
import requests_oauthlib
req = requests_oauthlib.OAuth2Session(url)
if auth is None:
req.auth = get_auth()
else:
if len(auth) == 2:
req.auth = get_auth(auth)
else:
raise IOError('unsupported authentication')
return req
def send_request(url,json=True,post=False,session=None,**kwargs):
if session is None:
req = create_session()
else:
req = session
if url is None:
if kwargs == {}:
raise IOError('need a url to send request to')
else:
account_num = kwargs.pop('account_num',None)
call = kwargs.pop('call',None)
args = kwargs.values()
if args:
url = make_api_url(account_num=account_num,call=call,*args)
else:
url = make_api_url(account_num=account_num,call=call)
if not post:
if json:
return req.get(url).json()
else:
return req.get(url)
else:
data = kwargs.get('post_data',None)
if json:
return req.post(url,data=data).json()
else:
return req.post(url,data=data)
|
bsd-3-clause
| -6,049,897,316,050,391,000
| 27.693333
| 84
| 0.560409
| false
| 3.557025
| false
| false
| false
|
kaushik94/sympy
|
sympy/assumptions/satask.py
|
2
|
5183
|
from __future__ import print_function, division
from sympy import Symbol, S
from sympy.assumptions.ask_generated import get_all_known_facts
from sympy.assumptions.assume import global_assumptions, AppliedPredicate
from sympy.assumptions.sathandlers import fact_registry
from sympy.core import oo
from sympy.logic.inference import satisfiable
from sympy.assumptions.cnf import CNF, EncodedCNF
def satask(proposition, assumptions=True, context=global_assumptions,
use_known_facts=True, iterations=oo):
props = CNF.from_prop(proposition)
_props = CNF.from_prop(~proposition)
if context:
tmp = CNF()
context = tmp.extend(context)
assumptions = CNF.from_prop(assumptions)
sat = get_all_relevant_facts(props, assumptions, context,
use_known_facts=use_known_facts, iterations=iterations)
if context:
sat.add_from_cnf(context)
sat.add_from_cnf(assumptions)
return check_satisfiability(props, _props, sat)
def check_satisfiability(prop, _prop, factbase):
sat_true = factbase.copy()
sat_false = factbase.copy()
sat_true.add_from_cnf(prop)
sat_false.add_from_cnf(_prop)
can_be_true = satisfiable(sat_true)
can_be_false = satisfiable(sat_false)
if can_be_true and can_be_false:
return None
if can_be_true and not can_be_false:
return True
if not can_be_true and can_be_false:
return False
if not can_be_true and not can_be_false:
# TODO: Run additional checks to see which combination of the
# assumptions, global_assumptions, and relevant_facts are
# inconsistent.
raise ValueError("Inconsistent assumptions")
def get_relevant_facts(proposition, assumptions=None,
context=None, exprs=None,
relevant_facts=None):
newexprs = set()
if not assumptions:
assumptions = CNF({S.true})
if not relevant_facts:
relevant_facts = set()
def find_symbols(pred):
if isinstance(pred, CNF):
symbols = set()
for a in pred.all_predicates():
symbols |= find_symbols(a)
return symbols
if isinstance(pred.args, AppliedPredicate):
return {pred.args[0]}
return pred.atoms(Symbol)
if not exprs:
req_keys = find_symbols(proposition)
keys = proposition.all_predicates()
# XXX: We need this since True/False are not Basic
lkeys = set()
lkeys |= assumptions.all_predicates()
if context:
lkeys |= context.all_predicates()
lkeys = lkeys - {S.true, S.false}
tmp_keys = None
while tmp_keys != set():
tmp = set()
for l in lkeys:
syms = find_symbols(l)
if (syms & req_keys) != set():
tmp |= syms
tmp_keys = tmp - req_keys
req_keys |= tmp_keys
keys |= {l for l in lkeys if find_symbols(l) & req_keys != set()}
exprs = {key.args[0] if isinstance(key, AppliedPredicate) else key for key in keys}
return exprs, relevant_facts
for expr in exprs:
for fact in fact_registry[expr.func]:
cnf_fact = CNF.to_CNF(fact)
newfact = cnf_fact.rcall(expr)
relevant_facts = relevant_facts._and(newfact)
newexprs |= set([key.args[0] for key in newfact.all_predicates()
if isinstance(key, AppliedPredicate)])
return newexprs - exprs, relevant_facts
def get_all_relevant_facts(proposition, assumptions=True,
context=global_assumptions, use_known_facts=True, iterations=oo):
# The relevant facts might introduce new keys, e.g., Q.zero(x*y) will
# introduce the keys Q.zero(x) and Q.zero(y), so we need to run it until
# we stop getting new things. Hopefully this strategy won't lead to an
# infinite loop in the future.
i = 0
relevant_facts = CNF()
exprs = None
all_exprs = set()
while exprs != set():
exprs, relevant_facts = get_relevant_facts(proposition,
assumptions, context, exprs=exprs,
relevant_facts=relevant_facts)
all_exprs |= exprs
i += 1
if i >= iterations:
break
if use_known_facts:
known_facts_CNF = CNF()
known_facts_CNF.add_clauses(get_all_known_facts())
kf_encoded = EncodedCNF()
kf_encoded.from_cnf(known_facts_CNF)
def translate_literal(lit, delta):
if lit > 0:
return lit + delta
else:
return lit - delta
def translate_data(data, delta):
return [{translate_literal(i, delta) for i in clause} for clause in data]
data = []
symbols = []
n_lit = len(kf_encoded.symbols)
for i, expr in enumerate(all_exprs):
symbols += [pred(expr) for pred in kf_encoded.symbols]
data += translate_data(kf_encoded.data, i * n_lit)
encoding = dict(list(zip(symbols, range(1, len(symbols)+1))))
ctx = EncodedCNF(data, encoding)
else:
ctx = EncodedCNF()
ctx.add_from_cnf(relevant_facts)
return ctx
|
bsd-3-clause
| -4,291,670,985,093,664,300
| 31.803797
| 91
| 0.612001
| false
| 3.753077
| false
| false
| false
|
umaptechnologies/must
|
details/factories.py
|
1
|
4044
|
import inspect
from class_pattern import ClassPattern
from primitive_musts import SafeObject
class Factory(object):
''' WRITEME '''
def __init__(self, obj_constructor, constructor_args, product_pattern, universe, known_parameters):
self._obj_constructor = obj_constructor
self._constructor_args = constructor_args
self._factory_header = constructor_args
self._product_pattern = product_pattern
self._universe = universe
self._known_parameters = known_parameters
def make(self, *args):
arg_index = 0
dependencies = []
for i in range(len(self._constructor_args)):
a = self._constructor_args[i]
if a in self._factory_header:
dependencies.append(args[arg_index])
arg_index += 1
else:
namehint = str(self._obj_constructor)+' needs '+('an' if a[0] in 'aeiou' else 'a')+' "'+a+'" that'
dependencies.append(self._universe.create_with_namehint(namehint, self._product_pattern._constructor.param_signatures[i].get_param_mold()))
# TODO: Incorporate self._known_parameters
result = self._obj_constructor(*dependencies)
result.must_return = lambda x: SafeObject()
return result
def must_make(self, obj_type, parameters):
new_factory_header = parameters.split(', ')
assert self._factory_header == self._constructor_args or new_factory_header == self._factory_header, "Factory parameters cannot be %s; already specified as %s." % (new_factory_header, self._factory_header)
self._factory_header = new_factory_header
return self
def that_must_make(self, obj_type, parameters):
return self.must_make(obj_type, parameters)
def and_must_make(self, obj_type, parameters):
return self.must_make(obj_type, parameters)
def must(self, action, taking='', returning=''):
return self
def must_have(self, *attributes):
return self
def must_use(self, **known_parameters):
return self
def that_must(self, action, taking='', returning=''):
return self.must(action, taking, returning)
def that_must_have(self, *attributes):
return self.must_have(*attributes)
def that_must_use(self, **known_parameters):
return self.must_use(**known_parameters)
def and_must(self, action, taking='', returning=''):
return self.must(action, taking, returning)
def and_must_have(self, *attributes):
return self.must_have(*attributes)
def and_must_use(self, **known_parameters):
return self.must_use(**known_parameters)
def __str__(self):
result = str(self._obj_constructor)+" factory("
result += ', '.join(self._constructor_args)
result += ")"
return result
class FactoryPattern(object):
''' WRITEME '''
def __init__(self, constructor, ignore_warnings=False):
self._constructor = constructor
self._constructor_args = inspect.getargspec(constructor.__init__).args[1:] # Ignore 'self'
self._product = ClassPattern(constructor)
def reflects_class(self, possible_class):
return False
def create(self, universe, aliases, known_parameters):
return Factory(self._constructor, self._constructor_args, self._product, universe, known_parameters)
def matches(self, requirements, aliases):
is_factory = requirements.type == 'factory'
has_parameters = self.has_parameters(requirements.parameters)
product_matches = (requirements.product is None) or \
(self._product.matches(requirements.product, aliases))
return is_factory and has_parameters and product_matches
def has_parameters(self, parameters):
return all([x in self._constructor_args for x in parameters])
def __str__(self):
result = str(self._constructor)+" factory("
result += ', '.join(self._constructor_args)
result += ")"
return result
|
apache-2.0
| 4,343,050,456,781,097,000
| 37.514286
| 213
| 0.638229
| false
| 4.190674
| false
| false
| false
|
mjirik/io3d
|
io3d/fsbrowser.py
|
1
|
17535
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from loguru import logger
import glob
import numpy as np
import os
# TODO remove cv2 - done
import matplotlib.pyplot as plt
from fnmatch import fnmatch
try:
import pydicom as pdicom
except ImportError:
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import dicom as pdicom
logger.debug("dicom imported - it would be better use pydicom")
from os import listdir
from os.path import isfile, join
from . import datareader
from skimage import io
# TODO - PyQt5 - done
from PyQt5.QtWidgets import QFileDialog, QLabel, QVBoxLayout
from PyQt5.QtGui import QPixmap
from PyQt5.QtCore import Qt
import sys
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
# FileSystemBrowser("c:/jkdfaldkfj/asdfasjfh")
class FileSystemBrowser:
def __init__(self, path=None):
self.path = path
self.preview_size = [100, 100]
self.nova_promenna = 5
pass
# metoda pouze na zobrazeni obrazku - volala by se v pripade ze tam nejaky bude
def get_path_info_preview(self, path):
path_lower = path.lower()
# name
name = os.path.basename(os.path.normpath(path))
name_final = "name: " + name
path_sl = path + "/"
if ".jpg" in path_lower:
preview = "Used path leads to current image."
img = io.imread(path)
io.imshow(img)
io.show()
elif ".png" in path_lower:
preview = "Used path leads to current image."
img = io.imread(path)
io.imshow(img)
io.show()
elif ".dcm" in path_lower:
preview = "Used path leads to current image."
ds = pdicom.dcmread(path)
plt.imshow(ds.pixel_array, cmap=plt.cm.bone)
else:
preview = "Preview of files in dir: " + name
only_files = [f for f in listdir(path) if isfile(join(path, f))]
for x in only_files:
if (".dcm" or ".Dcm" or ".DCM") in x:
ending = os.path.basename(os.path.normpath(path_sl + x))
preview_path = path_sl + ending
ds = pdicom.dcmread(preview_path)
plt.imshow(ds.pixel_array, cmap=plt.cm.bone)
break
elif (".jpg" or ".Jpg" or ".JPG") in x:
ending = os.path.basename(os.path.normpath(path_sl + x))
preview_path = path_sl + ending
img = io.imread(preview_path)
io.imshow(img)
io.show()
break
elif (".png" or ".Png" or ".PNG") in x:
ending = os.path.basename(os.path.normpath(path_sl + x))
preview_path = path_sl + ending
img = io.imread(preview_path)
io.imshow(img)
io.show()
break
else:
None
break
# Tady skutečně musí být (self, path). Self je odkaz na mateřský objekt, následují pak další parametry.
# def get_path_info(path): #(self, path)?
def get_path_info(self, path):
try:
path_sl = path + "/"
res_last = path[-1]
if res_last == "/":
path_sl = path
else:
path_sl = path + "/"
# name
name = os.path.basename(os.path.normpath(path))
name_final = "name: " + name
# type
type_ = os.path.isdir(path)
if type_ == 1:
type_res = "type: .dir"
if type_ == 0:
type_res = "type: " + name
# text - files, series, files
serie_counter = 0
study_counter = 0
all_names = []
for root, dirs, files in os.walk(path):
for d in dirs:
all_names.append(d.lower())
for f in files:
all_names.append(f.lower())
# lowercase - should be able to count all series,studies..
for i in all_names:
if "serie" in i:
serie_counter += 1
if "study" in i:
study_counter += 1
filescounter = sum([len(files) for r, d, files in os.walk(path)])
text = (
"Study: "
+ str(study_counter)
+ " Series: "
+ str(serie_counter)
+ " Files: "
+ str(filescounter)
)
path_lower = path.lower()
# preview - forced path,some pic. from serie?
if ".jpg" in path_lower:
preview = "Used path leads to current image."
elif ".png" in path_lower:
preview = "Used path leads to current image."
elif ".dcm" in path_lower:
preview = "Used path leads to current image."
else:
preview = "Preview of files in dir: " + name
only_files = [f for f in listdir(path) if isfile(join(path, f))]
for x in only_files:
if (".dcm" or ".Dcm" or ".DCM") in x:
print("dcm files")
break
elif (".jpg" or ".Jpg" or ".JPG") in x:
print("jpf files")
break
elif (".png" or ".Png" or ".PNG") in x:
print("png files")
break
else:
None
break
# add required endings..
# import io3d.datareader
# io3d.datareader.read(file_path)
# add required endings..
# path
text_path = "path: " + path
# Fallowing function can be used for directory analysis
# import io3d.dcmreaddata
# dd = io3d.dcmreaddata.DicomDirectory(dirpath=path)
# dd.get_stats_of_series_in_dir()
# dd = dcmreaddata.DicomDirectory(self.path)
# stats = dd.get_stats_of_series_in_dir()
# studies_and_series = dd.get_stats_of_studies_and_series_in_dir()
# import pydicom
# pydicom.read_file(stats[7].dcmfilelist[0])
# np.ndarray.resize()
# JPG
# import SimpleITK as Sitk
# image = Sitk.ReadImage(datapath)
# data3d = dcmtools.get_pixel_array_from_sitk(image)
# TODO
acquid = 0
modality = 0
path = text_path
name = name_final
retval = [name, type_res, preview, text, acquid, modality, path]
# "acquisition_date": ["2015-02-16", "2015-02-16"],
# "modality": "MRI",
# print(retval)
# print(retval[0])
# print(retval[1])
# print(retval[2])
# print(retval[3])
# print(retval[4])
# print(retval[5])
# print(retval[6])
except:
print("$Error$")
return None
return retval
def get_dir_list(self):
from . import dcmreaddata
# datareader.read()
# TODO check the design of output structure
retval = [
{
"name": "Study0545",
"type": "dir",
"preview": np.zeros(self.preview_size),
"text": "1 study, 3 series, 18321 files, acquisition_date=2017-02-16 to 2017-02-19",
"acquisition_date": ["2015-02-16", "2015-02-16"],
"modality": "MRI",
"path": "C:/data/Study0545",
},
{
"name": "Serie54864",
"type": "serie",
"preview": np.zeros(self.preview_size),
"text": "3 series, 18321 files, acquisition_date=2017-02-16 to 2017-02-19",
"acquisition_date": ["2015-02-16", "2015-02-16"],
"modality": "MRI",
"path": "c:/data/",
},
{ # maybe signle file make no sense
"name": "first.mhd",
"type": "file",
"preview": np.zeros(self.preview_size),
"text": "[1x512x512], voxelsize_mm=[5.0, 0.5, 0.5], acquisition_date=2015-08-16",
"voxelsize_mm": [5.0, 0.5, 0.5],
"acquisition_date": "2015-08-16",
"modality": "CT",
},
]
return retval
# def file_anonymization(self, filename, output_filename=None):
# pass
def recursive_anonymization(self, path, output_path=None):
dirlist = glob.glob(path)
pass
def getOpenFileName(path, *other_params):
# TODO naimplementovat na základě fsbrowser_test.py:test_devel_qt_dialog_fsbrowser()
filename = ""
return filename
# Widget - dcm browser
# dcm preview widget + dir/img info widget
# getOpenFileName - fcn. to get path of chosen file
class DCMage(QFileDialog):
def __init__(self, *args, **kwargs):
QFileDialog.__init__(self, *args, **kwargs)
self.setOption(QFileDialog.DontUseNativeDialog, True)
box = QVBoxLayout()
self.setFixedSize(self.width() + 450, self.height() + 500)
self.mpPreview = QLabel("Preview", self)
self.mpPreview.setFixedSize(500, 500)
self.mpPreview.setAlignment(Qt.AlignCenter)
self.mpPreview.setObjectName("DCMage")
box.addWidget(self.mpPreview)
box.addStretch()
self.layout().addLayout(box, 1, 3, 1, 1)
self.mpPreview_1 = QLabel("Preview", self)
self.mpPreview_1.setFixedSize(500, 500)
self.mpPreview_1.setAlignment(Qt.AlignCenter)
self.mpPreview_1.setObjectName("DCMage")
box.addWidget(self.mpPreview_1)
box.addStretch()
self.layout().addLayout(box, 3, 3, 1, 1)
self.currentChanged.connect(self.onChange)
self.fileSelected.connect(self.getOpenFileName)
self._fileSelected = None
def dcm2png(self, path):
ds1 = pdicom.read_file(path, force=True)
x = plt.imsave("tempfile.png", ds1.pixel_array, cmap=plt.cm.gray)
img = io.imread("tempfile.png")
def onChange_text(self, path):
path_l = path.lower()
if ".dcm" in path_l:
temp_text = self.get_path_info(path_l)
self.mpPreview_1.setText(temp_text)
elif "study" in path_l:
temp_text = self.get_path_info(path_l)
self.mpPreview_1.setText(temp_text)
elif "serie" in path_l:
temp_text = self.get_path_info(path_l)
self.mpPreview_1.setText(temp_text)
elif "case" in path_l:
temp_text = self.get_path_info(path_l)
self.mpPreview_1.setText(temp_text)
elif "series" in path_l:
temp_text = self.get_path_info(path_l)
self.mpPreview_1.setText(temp_text)
else:
temp_text = "go to dir with dcm files"
def onChange(self, path):
self._fileSelected = path
path_l = path.lower()
self.onChange_text(path_l)
if ".dcm" in path_l:
try:
self.dcm2png(path)
except:
print("no dcm to display")
self.get_path_info(path_l)
elif "image_" in path_l:
try:
self.dcm2png(path)
except:
print("no dcm to display")
self.get_path_info(path_l)
elif "study" in path_l:
try:
self.dcm2png(path)
except:
print("no dcm to display")
self.get_path_info(path_l)
elif "serie" in path_l:
try:
self.dcm2png(path)
except:
print("no dcm to display")
elif "case" in path_l:
try:
self.dcm2png(path)
except:
print("no dcm to display")
elif "series" in path_l:
try:
self.dcm2png(path)
except:
print("no dcm to display")
self.get_path_info(path_l)
else:
self.mpPreview.setText("Preview")
pixmap = QPixmap("tempfile.png")
if pixmap.isNull():
self.mpPreview.setText("Preview")
else:
self.mpPreview.setPixmap(
pixmap.scaled(
self.mpPreview.width(),
self.mpPreview.height(),
Qt.KeepAspectRatio,
Qt.SmoothTransformation,
)
)
# self.get_path_info("tempfile.png")
try:
os.remove("tempfile.png")
except:
print("")
def getOpenFileName(self, file):
self.show()
self.exec_()
temp = self._fileSelected
# print(temp)
return temp
def get_path_info(self, path):
# problem with text len for qlabel - recomended for noneditable text //*textlen set to 00 needs to be edited
if len(path) >= 50 & len(path) < 100:
path1 = path[:50]
path2 = path[50:100]
path_formated = path1 + "\n" + path2
# prepared cases for longer paths...
elif len(path) >= 100 & len(path) < 150:
path1 = path[:50]
path2 = path[50:100]
path3 = path[100:150]
path_formated = path1 + "\n" + path2 + "\n" + path3
elif len(path) >= 150 & len(path) < 200:
path1 = path[:50]
path2 = path[50:100]
path3 = path[100:150]
path4 = path[150:200]
path_formated = path1 + "\n" + path2 + "\n" + path3 + "\n" + path4
elif len(path) >= 240 & len(path) < 300:
path1 = path[:60]
path2 = path[60:120]
path3 = path[120:180]
path4 = path[180:240]
path5 = path[240:300]
path_formated = (
path1 + "\n" + path2 + "\n" + path3 + "\n" + path4 + "\n" + path5
)
else:
print("too long path")
path_formated = path
try:
path_sl = path + "/"
res_last = path[-1]
if res_last == "/":
path_sl = path
else:
path_sl = path + "/"
# name
name = os.path.basename(os.path.normpath(path))
name_final = "name: " + name + "\n"
# type
type_ = os.path.isdir(path)
if type_ == 1:
type_res = "type: .dir" + "\n"
if type_ == 0:
type_res = "type: " + name + "\n"
# text - files, series, files
serie_counter = 0
study_counter = 0
all_names = []
counter_fail = 0
for root, dirs, files in os.walk(path):
for d in dirs:
all_names.append(d.lower())
# TODO fix limit
for f in files:
all_names.append(f.lower())
# lowercase - should be able to count all series,studies..
for i in all_names:
if "serie" in i:
serie_counter += 1
if "study" in i:
study_counter += 1
filescounter = sum([len(files) for r, d, files in os.walk(path)])
text = (
"Study: "
+ str(study_counter)
+ "\n"
+ " Series: "
+ str(serie_counter)
+ " Files: "
+ str(filescounter)
+ "\n"
)
path_lower = path.lower()
# preview - forced path,some pic. from serie?
if ".jpg" in path_lower:
preview = "image."
elif ".png" in path_lower:
preview = "image."
elif ".dcm" in path_lower:
preview = "image."
else:
preview = "Type: " + name
only_files = [f for f in listdir(path) if isfile(join(path, f))]
for x in only_files:
if (".dcm" or ".Dcm" or ".DCM") in x:
print("dcm files")
break
elif (".jpg" or ".Jpg" or ".JPG") in x:
print("jpf files")
break
elif (".png" or ".Png" or ".PNG") in x:
print("png files")
break
else:
None
break
text_path = "path: " + path
acquid = 0
modality = 0
path = text_path
name = name_final
retval = [name, type_res, preview, text, acquid, modality, path_formated]
# retval = [path_formated, path_formated1]
retval_str = "".join(map(str, retval))
# "acquisition_date": ["2015-02-16", "2015-02-16"],
# "modality": "MRI",
return retval_str
except:
print("$$$")
return None
return None
|
mit
| -8,339,100,194,311,851,000
| 31.814607
| 116
| 0.47315
| false
| 3.863947
| false
| false
| false
|
sfu-discourse-lab/SFU_Comment_Extractor
|
Source_Code/CSV_creation/duplicate_threads.py
|
1
|
2389
|
import pandas as pd
import re
import ast
import multiprocessing as mp
from multiprocessing import cpu_count
import sys
def check_match(thread_df):
pat = "source2_\d+_\d+"
for i, row in thread_df.iterrows():
duplicate = ast.literal_eval(row.duplicate_flag)
if not duplicate['exact_match']:
return False
return re.findall(pat, " ".join(duplicate['exact_match']))
def thread_length(orig_length, comment_id, threads_df, orig_comment_id):
orig_df = threads_df[threads_df.comment_counter.str.contains(orig_comment_id + "$|" + orig_comment_id + "_")]
for id in comment_id:
counter = 0
temp_df = threads_df[threads_df.comment_counter.str.contains(id + "$|" + id + "_")]
if len(temp_df) == orig_length:
for i, row in orig_df.iterrows():
match_list = ast.literal_eval(row.duplicate_flag)
if re.findall(id + "$|" + id + "_", " ".join(match_list['exact_match'])):
counter += 1
if counter == orig_length:
return id
return False
def parallelize(data, func):
cores = cpu_count()
df_list = []
for i, df_article_id in data.groupby('article_id'):
df_list.append(df_article_id)
print("Dataframes list prepared.")
pool = mp.Pool(cores)
data = pd.concat(pool.map(func, df_list))
pool.close()
pool.join()
return data
def remove_duplicate_threads(threads_df):
pattern = "source1_\d+_\d+$"
source1_df = threads_df[threads_df['comment_counter'].str.contains(pattern)]
root_comments = list(source1_df.comment_counter)
for comment in root_comments:
thread = threads_df[threads_df.comment_counter.str.contains(comment + "$|" + comment + "_")]
if thread.empty:
continue
match = check_match(thread)
if match:
match_id = thread_length(len(thread), match, threads_df, comment)
if match_id:
threads_df = threads_df[~threads_df['comment_counter'].str.contains(match_id + "$|" + match_id + "_")]
return threads_df
def main():
articles_df = pd.DataFrame.from_csv(sys.argv[1], encoding="ISO-8859-1", index_col=None)
df_processed = parallelize(articles_df, remove_duplicate_threads)
df_processed.to_csv("duplicates_removed.csv", index=False)
if __name__ == "__main__":
main()
|
mit
| -2,227,418,396,662,079,500
| 30.434211
| 118
| 0.609041
| false
| 3.523599
| false
| false
| false
|
leiferikb/bitpop
|
src/v8/tools/js2c.py
|
1
|
15907
|
#!/usr/bin/env python
#
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This is a utility for converting JavaScript source code into C-style
# char arrays. It is used for embedded JavaScript code in the V8
# library.
import os, re, sys, string
import optparse
import jsmin
import bz2
import textwrap
class Error(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
def ToCArray(byte_sequence):
result = []
for chr in byte_sequence:
result.append(str(ord(chr)))
joined = ", ".join(result)
return textwrap.fill(joined, 80)
def RemoveCommentsAndTrailingWhitespace(lines):
lines = re.sub(r'//.*\n', '\n', lines) # end-of-line comments
lines = re.sub(re.compile(r'/\*.*?\*/', re.DOTALL), '', lines) # comments.
lines = re.sub(r'\s+\n+', '\n', lines) # trailing whitespace
return lines
def ReadFile(filename):
file = open(filename, "rt")
try:
lines = file.read()
finally:
file.close()
return lines
EVAL_PATTERN = re.compile(r'\beval\s*\(')
WITH_PATTERN = re.compile(r'\bwith\s*\(')
def Validate(lines):
# Because of simplified context setup, eval and with is not
# allowed in the natives files.
if EVAL_PATTERN.search(lines):
raise Error("Eval disallowed in natives.")
if WITH_PATTERN.search(lines):
raise Error("With statements disallowed in natives.")
# Pass lines through unchanged.
return lines
def ExpandConstants(lines, constants):
for key, value in constants:
lines = key.sub(str(value), lines)
return lines
def ExpandMacroDefinition(lines, pos, name_pattern, macro, expander):
pattern_match = name_pattern.search(lines, pos)
while pattern_match is not None:
# Scan over the arguments
height = 1
start = pattern_match.start()
end = pattern_match.end()
assert lines[end - 1] == '('
last_match = end
arg_index = [0] # Wrap state into array, to work around Python "scoping"
mapping = { }
def add_arg(str):
# Remember to expand recursively in the arguments
replacement = expander(str.strip())
mapping[macro.args[arg_index[0]]] = replacement
arg_index[0] += 1
while end < len(lines) and height > 0:
# We don't count commas at higher nesting levels.
if lines[end] == ',' and height == 1:
add_arg(lines[last_match:end])
last_match = end + 1
elif lines[end] in ['(', '{', '[']:
height = height + 1
elif lines[end] in [')', '}', ']']:
height = height - 1
end = end + 1
# Remember to add the last match.
add_arg(lines[last_match:end-1])
result = macro.expand(mapping)
# Replace the occurrence of the macro with the expansion
lines = lines[:start] + result + lines[end:]
pattern_match = name_pattern.search(lines, start + len(result))
return lines
def ExpandMacros(lines, macros):
# We allow macros to depend on the previously declared macros, but
# we don't allow self-dependecies or recursion.
for name_pattern, macro in reversed(macros):
def expander(s):
return ExpandMacros(s, macros)
lines = ExpandMacroDefinition(lines, 0, name_pattern, macro, expander)
return lines
class TextMacro:
def __init__(self, args, body):
self.args = args
self.body = body
def expand(self, mapping):
result = self.body
for key, value in mapping.items():
result = result.replace(key, value)
return result
class PythonMacro:
def __init__(self, args, fun):
self.args = args
self.fun = fun
def expand(self, mapping):
args = []
for arg in self.args:
args.append(mapping[arg])
return str(self.fun(*args))
CONST_PATTERN = re.compile(r'^const\s+([a-zA-Z0-9_]+)\s*=\s*([^;]*);$')
MACRO_PATTERN = re.compile(r'^macro\s+([a-zA-Z0-9_]+)\s*\(([^)]*)\)\s*=\s*([^;]*);$')
PYTHON_MACRO_PATTERN = re.compile(r'^python\s+macro\s+([a-zA-Z0-9_]+)\s*\(([^)]*)\)\s*=\s*([^;]*);$')
def ReadMacros(lines):
constants = []
macros = []
for line in lines.split('\n'):
hash = line.find('#')
if hash != -1: line = line[:hash]
line = line.strip()
if len(line) is 0: continue
const_match = CONST_PATTERN.match(line)
if const_match:
name = const_match.group(1)
value = const_match.group(2).strip()
constants.append((re.compile("\\b%s\\b" % name), value))
else:
macro_match = MACRO_PATTERN.match(line)
if macro_match:
name = macro_match.group(1)
args = [match.strip() for match in macro_match.group(2).split(',')]
body = macro_match.group(3).strip()
macros.append((re.compile("\\b%s\\(" % name), TextMacro(args, body)))
else:
python_match = PYTHON_MACRO_PATTERN.match(line)
if python_match:
name = python_match.group(1)
args = [match.strip() for match in python_match.group(2).split(',')]
body = python_match.group(3).strip()
fun = eval("lambda " + ",".join(args) + ': ' + body)
macros.append((re.compile("\\b%s\\(" % name), PythonMacro(args, fun)))
else:
raise Error("Illegal line: " + line)
return (constants, macros)
INLINE_MACRO_PATTERN = re.compile(r'macro\s+([a-zA-Z0-9_]+)\s*\(([^)]*)\)\s*\n')
INLINE_MACRO_END_PATTERN = re.compile(r'endmacro\s*\n')
def ExpandInlineMacros(lines):
pos = 0
while True:
macro_match = INLINE_MACRO_PATTERN.search(lines, pos)
if macro_match is None:
# no more macros
return lines
name = macro_match.group(1)
args = [match.strip() for match in macro_match.group(2).split(',')]
end_macro_match = INLINE_MACRO_END_PATTERN.search(lines, macro_match.end());
if end_macro_match is None:
raise Error("Macro %s unclosed" % name)
body = lines[macro_match.end():end_macro_match.start()]
# remove macro definition
lines = lines[:macro_match.start()] + lines[end_macro_match.end():]
name_pattern = re.compile("\\b%s\\(" % name)
macro = TextMacro(args, body)
# advance position to where the macro defintion was
pos = macro_match.start()
def non_expander(s):
return s
lines = ExpandMacroDefinition(lines, pos, name_pattern, macro, non_expander)
HEADER_TEMPLATE = """\
// Copyright 2011 Google Inc. All Rights Reserved.
// This file was generated from .js source files by GYP. If you
// want to make changes to this file you should either change the
// javascript source files or the GYP script.
#include "v8.h"
#include "natives.h"
#include "utils.h"
namespace v8 {
namespace internal {
%(sources_declaration)s\
%(raw_sources_declaration)s\
template <>
int NativesCollection<%(type)s>::GetBuiltinsCount() {
return %(builtin_count)i;
}
template <>
int NativesCollection<%(type)s>::GetDebuggerCount() {
return %(debugger_count)i;
}
template <>
int NativesCollection<%(type)s>::GetIndex(const char* name) {
%(get_index_cases)s\
return -1;
}
template <>
int NativesCollection<%(type)s>::GetRawScriptsSize() {
return %(raw_total_length)i;
}
template <>
Vector<const char> NativesCollection<%(type)s>::GetRawScriptSource(int index) {
%(get_raw_script_source_cases)s\
return Vector<const char>("", 0);
}
template <>
Vector<const char> NativesCollection<%(type)s>::GetScriptName(int index) {
%(get_script_name_cases)s\
return Vector<const char>("", 0);
}
template <>
Vector<const byte> NativesCollection<%(type)s>::GetScriptsSource() {
return Vector<const byte>(sources, %(total_length)i);
}
template <>
void NativesCollection<%(type)s>::SetRawScriptsSource(Vector<const char> raw_source) {
ASSERT(%(raw_total_length)i == raw_source.length());
raw_sources = raw_source.start();
}
} // internal
} // v8
"""
SOURCES_DECLARATION = """\
static const byte sources[] = { %s };
"""
RAW_SOURCES_COMPRESSION_DECLARATION = """\
static const char* raw_sources = NULL;
"""
RAW_SOURCES_DECLARATION = """\
static const char* raw_sources = reinterpret_cast<const char*>(sources);
"""
GET_INDEX_CASE = """\
if (strcmp(name, "%(id)s") == 0) return %(i)i;
"""
GET_RAW_SCRIPT_SOURCE_CASE = """\
if (index == %(i)i) return Vector<const char>(raw_sources + %(offset)i, %(raw_length)i);
"""
GET_SCRIPT_NAME_CASE = """\
if (index == %(i)i) return Vector<const char>("%(name)s", %(length)i);
"""
def BuildFilterChain(macro_filename):
"""Build the chain of filter functions to be applied to the sources.
Args:
macro_filename: Name of the macro file, if any.
Returns:
A function (string -> string) that reads a source file and processes it.
"""
filter_chain = [ReadFile]
if macro_filename:
(consts, macros) = ReadMacros(ReadFile(macro_filename))
filter_chain.append(lambda l: ExpandConstants(l, consts))
filter_chain.append(lambda l: ExpandMacros(l, macros))
filter_chain.extend([
RemoveCommentsAndTrailingWhitespace,
ExpandInlineMacros,
Validate,
jsmin.JavaScriptMinifier().JSMinify
])
def chain(f1, f2):
return lambda x: f2(f1(x))
return reduce(chain, filter_chain)
class Sources:
def __init__(self):
self.names = []
self.modules = []
self.is_debugger_id = []
def IsDebuggerFile(filename):
return filename.endswith("-debugger.js")
def IsMacroFile(filename):
return filename.endswith("macros.py")
def PrepareSources(source_files):
"""Read, prepare and assemble the list of source files.
Args:
sources: List of Javascript-ish source files. A file named macros.py
will be treated as a list of macros.
Returns:
An instance of Sources.
"""
macro_file = None
macro_files = filter(IsMacroFile, source_files)
assert len(macro_files) in [0, 1]
if macro_files:
source_files.remove(macro_files[0])
macro_file = macro_files[0]
filters = BuildFilterChain(macro_file)
# Sort 'debugger' sources first.
source_files = sorted(source_files,
lambda l,r: IsDebuggerFile(r) - IsDebuggerFile(l))
result = Sources()
for source in source_files:
try:
lines = filters(source)
except Error as e:
raise Error("In file %s:\n%s" % (source, str(e)))
result.modules.append(lines);
is_debugger = IsDebuggerFile(source)
result.is_debugger_id.append(is_debugger);
name = os.path.basename(source)[:-3]
result.names.append(name if not is_debugger else name[:-9]);
return result
def BuildMetadata(sources, source_bytes, native_type, omit):
"""Build the meta data required to generate a libaries file.
Args:
sources: A Sources instance with the prepared sources.
source_bytes: A list of source bytes.
(The concatenation of all sources; might be compressed.)
native_type: The parameter for the NativesCollection template.
omit: bool, whether we should omit the sources in the output.
Returns:
A dictionary for use with HEADER_TEMPLATE.
"""
total_length = len(source_bytes)
raw_sources = "".join(sources.modules)
# The sources are expected to be ASCII-only.
assert not filter(lambda value: ord(value) >= 128, raw_sources)
# Loop over modules and build up indices into the source blob:
get_index_cases = []
get_script_name_cases = []
get_raw_script_source_cases = []
offset = 0
for i in xrange(len(sources.modules)):
native_name = "native %s.js" % sources.names[i]
d = {
"i": i,
"id": sources.names[i],
"name": native_name,
"length": len(native_name),
"offset": offset,
"raw_length": len(sources.modules[i]),
}
get_index_cases.append(GET_INDEX_CASE % d)
get_script_name_cases.append(GET_SCRIPT_NAME_CASE % d)
get_raw_script_source_cases.append(GET_RAW_SCRIPT_SOURCE_CASE % d)
offset += len(sources.modules[i])
assert offset == len(raw_sources)
# If we have the raw sources we can declare them accordingly.
have_raw_sources = source_bytes == raw_sources and not omit
raw_sources_declaration = (RAW_SOURCES_DECLARATION
if have_raw_sources else RAW_SOURCES_COMPRESSION_DECLARATION)
metadata = {
"builtin_count": len(sources.modules),
"debugger_count": sum(sources.is_debugger_id),
"sources_declaration": SOURCES_DECLARATION % ToCArray(source_bytes),
"sources_data": ToCArray(source_bytes) if not omit else "",
"raw_sources_declaration": raw_sources_declaration,
"raw_total_length": sum(map(len, sources.modules)),
"total_length": total_length,
"get_index_cases": "".join(get_index_cases),
"get_raw_script_source_cases": "".join(get_raw_script_source_cases),
"get_script_name_cases": "".join(get_script_name_cases),
"type": native_type,
}
return metadata
def CompressMaybe(sources, compression_type):
"""Take the prepared sources and generate a sequence of bytes.
Args:
sources: A Sources instance with the prepared sourced.
compression_type: string, describing the desired compression.
Returns:
A sequence of bytes.
"""
sources_bytes = "".join(sources.modules)
if compression_type == "off":
return sources_bytes
elif compression_type == "bz2":
return bz2.compress(sources_bytes)
else:
raise Error("Unknown compression type %s." % compression_type)
def JS2C(source, target, native_type, compression_type, raw_file, omit):
sources = PrepareSources(source)
sources_bytes = CompressMaybe(sources, compression_type)
metadata = BuildMetadata(sources, sources_bytes, native_type, omit)
# Optionally emit raw file.
if raw_file:
output = open(raw_file, "w")
output.write(sources_bytes)
output.close()
# Emit resulting source file.
output = open(target, "w")
output.write(HEADER_TEMPLATE % metadata)
output.close()
def main():
parser = optparse.OptionParser()
parser.add_option("--raw", action="store",
help="file to write the processed sources array to.")
parser.add_option("--omit", dest="omit", action="store_true",
help="Omit the raw sources from the generated code.")
parser.set_usage("""js2c out.cc type compression sources.js ...
out.cc: C code to be generated.
type: type parameter for NativesCollection template.
compression: type of compression used. [off|bz2]
sources.js: JS internal sources or macros.py.""")
(options, args) = parser.parse_args()
JS2C(args[3:], args[0], args[1], args[2], options.raw, options.omit)
if __name__ == "__main__":
main()
|
gpl-3.0
| -8,353,512,724,255,907,000
| 29.947471
| 101
| 0.663293
| false
| 3.540396
| false
| false
| false
|
fastflo/emma
|
emmalib/providers/sqlite/test.py
|
1
|
1782
|
# -*- coding: utf-8 -*-
# emma
#
# Copyright (C) 2006 Florian Schmidt (flo@fastflo.de)
# 2014 Nickolay Karnaukhov (mr.electronick@gmail.com)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import sqlite3
conn = sqlite3.connect(database='/home/nick/test_database.sqlite')
print conn
cur = conn.cursor()
print cur.execute("SELECT * FROM sqlite_master ORDER BY name")
print cur.description
res = cur.fetchall()
for row in res:
print row
# from SQLiteHost import SQLiteHost
#
# host = SQLiteHost(None, None, '/home/nick/test.sqlite')
# host.connect()
#
# host.databases['dummydb'].refresh()
# print host.databases['dummydb'].tables
#
# table = host.databases['dummydb'].tables['aaa']
# table.refresh()
#
# print "---------------------------"
# print "Table:"
# print table.__dict__
#
# print "---------------------------"
# print "Table fields:"
# for f in table.fields:
# print f.__dict__
#
# print "---------------------------"
# print "Table indexes:"
# for i in table.indexes:
# print i.__dict__
|
gpl-2.0
| 3,862,581,033,829,721,000
| 31.4
| 75
| 0.640853
| false
| 3.807692
| false
| false
| false
|
jbeyerstedt/RIOT-OTA-update
|
examples/ota_update/test2.py
|
1
|
6712
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2017 Jannik Beyerstedt <jannik.beyerstedt@haw-hamburg.de>
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
# module for integration in tests.py. No standalone use intended
import subprocess
import time
import sys
import os
import signal
from nbstreamreader import NonBlockingStreamReader as NBSR
from test_ethos_helpers import ethos_command
nbsr = None
ethos = None
def kill_ethos(ethos):
# kill the ethos process properly
os.killpg(os.getpgid(ethos.pid), signal.SIGTERM)
### call first to prepare and setup things
def prepare(tty_out):
global nbsr
global ethos
print("(Step 0) flashing with test firmware")
subprocess.call("FW_VERS=0x2 FW_VERS_2=0x3 make merge-test-hex >" + tty_out, shell=True)
## flash the devive with factory-hex
if subprocess.call("FW_VERS=0x1 make flash-test >" + tty_out, shell=True):
return -1
time.sleep(1)
## start ethos console
ethos = subprocess.Popen("make ethos 2>/dev/null", stdout=subprocess.PIPE, stdin=subprocess.PIPE, shell=True, preexec_fn=os.setsid)
time.sleep(1)
nbsr = NBSR(ethos.stdout)
# get first diagnostic lines from ethos console
ret_val = ethos_command(nbsr, ethos, "/dist/tools/ethos")
if ret_val < 0:
print(" [ERROR] no answer from ethos")
kill_ethos(ethos)
return -1
elif ret_val == 0:
print(" [ERROR] ethos not properly started")
kill_ethos(ethos)
return -1
ret_val, answer = ethos_command(nbsr, ethos, "command not found", command="h")
if ret_val < 0:
print(" [ERROR] no answer from ethos")
kill_ethos(ethos)
return -1
elif ret_val == 0:
print(" [ERROR] ethos shell does not answer correctly")
print(answer)
kill_ethos(ethos)
return -1
print(" [OK] both slots populated, ethos console started\n")
return 0
### Test 2a (update file with invalid file signature)
def do_part_a(tty_out):
global nbsr
global ethos
subprocess.call("cp -p fw_update-0xabc0123456789def-0x4-s1.bin fw_update-orig-0x4-s1", shell=True)
print("(Part A) testing FW update file signature validation")
# manipulate some bits of the vers 4, slot 1 file
subprocess.call("cat fw_update-orig-0x4-s1 | head -c -16 >fw_update-0xabc0123456789def-0x4-s1.bin", shell=True)
if subprocess.call("FW_VERS=0x4 make flash-updatefile-slot1 >" + tty_out, shell=True):
kill_ethos(ethos)
return -1
time.sleep(1)
## check running FW version
ret_val, answer = ethos_command(nbsr, ethos, "FW version 3, slot 2", command="fw_info")
if ret_val < 0:
print(" [ERROR] no answer from ethos")
kill_ethos(ethos)
return -1
elif ret_val == 0:
print(" [ERROR] wrong firmware version or slot started")
print("dumping fetched answer from device:\n" + answer)
kill_ethos(ethos)
return -1
print(" [OK] correct inital FW running")
## start update
ret_val, answer = ethos_command(nbsr, ethos, "[ota_file] INFO incorrect decrypted hash", command="ota_install", timeout=5)
if ret_val < 0:
print(" [ERROR] no answer from ethos")
kill_ethos(ethos)
return -1
elif ret_val == 0:
print(" [ERROR] detection of invalid signature not successful")
print("dumping fetched answer from device:\n\n" + answer)
kill_ethos(ethos)
return -1
print(" ==>[OK] broken file signature successfully detected\n")
# tidy up
subprocess.call("rm fw_update-0xabc0123456789def-0x4-s1.bin", shell=True)
subprocess.call("mv fw_update-orig-0x4-s1 fw_update-0xabc0123456789def-0x4-s1.bin", shell=True)
return 0
### Test 2b (update file with invalid hw_id)
def do_part_b(tty_out):
global nbsr
global ethos
print("(Part B) testing hardware ID validation")
if subprocess.call("HW_ID=0xbaadf00dbaadf00d FW_VERS=0x4 make flash-updatefile-slot1 >" + tty_out, shell=True):
kill_ethos(ethos)
return -1
time.sleep(1)
## check running FW version
ret_val, answer = ethos_command(nbsr, ethos, "FW version 3, slot 2", command="fw_info")
if ret_val < 0:
print(" [ERROR] no answer from ethos")
kill_ethos(ethos)
return -1
elif ret_val == 0:
print(" [ERROR] TODO")
print("dumping fetched answer from device:\n" + answer)
kill_ethos(ethos)
return -1
print(" [OK] correct inital FW running")
## start update
ret_val, answer = ethos_command(nbsr, ethos, "[ota_updater] ERROR update file is invalid", command="ota_install", timeout=5)
if ret_val < 0:
print(" [ERROR] no answer from ethos")
kill_ethos(ethos)
return -1
elif ret_val == 0:
print(" [ERROR] detection of invalid HW_ID not successful")
print("dumping fetched answer from device:\n\n" + answer)
kill_ethos(ethos)
return -1
print(" ==>[OK] file with wrong hardware id successfully detected\n")
return 0
### Test 2c (update file with lower fw_vers)
def do_part_c(tty_out):
global nbsr
global ethos
print("(Part C) testing FW update file signature validation")
if subprocess.call("FW_VERS=0x1 make flash-updatefile-slot1 >" + tty_out, shell=True):
kill_ethos(ethos)
return -1
time.sleep(1)
## check running FW version
ret_val, answer = ethos_command(nbsr, ethos, "FW version 3, slot 2", command="fw_info")
if ret_val < 0:
print(" [ERROR] no answer from ethos")
kill_ethos(ethos)
return -1
elif ret_val == 0:
print(" [ERROR] TODO")
print("dumping fetched answer from device:\n" + answer)
kill_ethos(ethos)
return -1
print(" [OK] correct inital FW running")
## start update
ret_val, answer = ethos_command(nbsr, ethos, "[ota_updater] ERROR update file is invalid", command="ota_install", timeout=5)
if ret_val < 0:
print(" [ERROR] no answer from ethos")
kill_ethos(ethos)
return -1
elif ret_val == 0:
print(" [ERROR] detection of downgrade attempt not successful")
print("dumping fetched answer from device:\n\n" + answer)
kill_ethos(ethos)
return -1
print(" ==>[OK] file with lower FW version successfully detected\n")
return 0
### call last to tidy up afterwards
def finish(tty_out):
global nbsr
global ethos
kill_ethos(ethos)
print("(Finish) tidying up done")
return 0
|
lgpl-2.1
| 2,028,957,324,872,644,600
| 30.218605
| 135
| 0.639005
| false
| 3.337643
| true
| false
| false
|
juancarlospaco/unicodemoticon
|
unicodemoticon/__main__.py
|
1
|
1742
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
from datetime import datetime
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QApplication, QStyle
from anglerfish import (make_logger, check_encoding,
make_post_exec_msg, set_process_name,
set_single_instance, set_process_priority)
try:
import qdarkstyle # https://github.com/ColinDuquesnoy/QDarkStyleSheet
except ImportError: # sudo pip3 install qdarkstyle
qdarkstyle = None # 100% optional
# if this script is executed directly: make relative imports work
if not __package__:
from pathlib import Path
parent_dir = Path(__file__).absolute().parent
sys.path.insert(0, str(parent_dir))
import unicodemoticon # noqa
__package__ = str("unicodemoticon")
from . import MainWidget # lint:ok noqa pragma:nocover
start_time = datetime.now()
def main(args=sys.argv):
make_logger("unicodemoticon", emoji=True)
lock = set_single_instance("unicodemoticon")
check_encoding()
set_process_name("unicodemoticon")
set_process_priority()
app = QApplication(args)
app.setApplicationName("unicodemoticon")
app.setOrganizationName("unicodemoticon")
app.setOrganizationDomain("unicodemoticon")
app.instance().setQuitOnLastWindowClosed(False) # no quit on dialog quit
if qdarkstyle:
app.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())
icon = QIcon(app.style().standardPixmap(QStyle.SP_FileIcon))
app.setWindowIcon(icon)
mainwindow = MainWidget()
mainwindow.show()
mainwindow.hide()
make_post_exec_msg(start_time)
sys.exit(app.exec())
# may be unicodemoticon.__main__
if __name__.endswith("__main__"):
main()
|
gpl-3.0
| 616,482,973,865,316,100
| 28.525424
| 77
| 0.691734
| false
| 3.54065
| false
| false
| false
|
cerisola/fiscomp
|
percolation/analysis/common.py
|
1
|
1592
|
import numpy as np
import scipy.stats as stats
import scipy.integrate as integrate
Z_normal = { None: 1, '90': 1.644854, '95': 1.959964, '99': 2.575829, '99.9': 3.290527, '99.99': 3.890592 }
# % Generic % #
def mean(v):
return np.mean(v)
def var(v):
return np.var(v, ddof=1)
def std(v):
return np.std(v, ddof=1)
def sem(v, ci=None):
Z = Z_normal[ci]
return Z*stats.sem(v)
def cdf_mean(F, x):
return (1 - integrate.simps(y=F, x=x))
def cdf_var(F, x):
return (2*integrate.simps(y=x*(1-F), x=x) - cdf_mean(F, x)**2)
def cdf_std(F, x):
return np.sqrt(cdf_var(F, x))
# % Binomial Distribution Aux % #
def binomial_var(p, n):
return n*p*(1-p)
def binomial_std(p, n):
return np.sqrt(n*p*(1 - p))
def binomial_sem(p, n, ci=None):
Z = Z_normal[ci]
return Z*np.sqrt(p*(1 - p)/n)
def binomial_ci_wald(p, n, ci=None):
Z = Z_normal[ci]
normal_stderr = Z*np.sqrt(p*(1 - p)/n)
p_min = p - normal_stderr
p_max = p + normal_stderr
return p_min, p_max
def binomial_ci_wilson(p, n, ci=None):
Z = Z_normal[ci]
p_min = (2*n*p + Z**2 - (Z*np.sqrt(Z**2 - 1/n + 4*n*p*(1-p) + (4*p - 2)) + 1))/(2*(n + Z**2))
p_max = (2*n*p + Z**2 + (Z*np.sqrt(Z**2 - 1/n + 4*n*p*(1-p) - (4*p - 2)) + 1))/(2*(n + Z**2))
p_min = np.maximum(0, p_min)
p_max = np.minimum(1, p_max)
return p_min, p_max
# % Utility function to apply above funtions to lists of different sizes of arrays % #
def listmap(func, v, args=None):
return np.array([func(v[idx], **args) if args else func(v[idx]) for idx in range(len(v))])
|
mit
| 1,011,829,247,394,902,300
| 20.808219
| 107
| 0.56407
| false
| 2.287356
| false
| false
| false
|
cligs/tmw
|
tmw_config.py
|
1
|
8798
|
#!/usr/bin/env python3
# Filename: my_tmw.py
# Author: #cf
# Version 0.2.0 (2015-08-27)
##################################################################
### CONFIG FILE for: Topic Modeling Workflow (tmw) ###
##################################################################
# Used in the following paper:
# Christof Schoech, "Topic Modeling French Crime Fiction",
# presented at the Digital Humanities Conference, Sydney, 2015.
# For information on requirements and usage, see the README file.
# This config file is structured as follows:
# 1. Preprocessing Texts
# 2. Topic Modeling
# 3. Posprocessing Data
# 4. Visualization
# 5. Other / Obsolete
import tmw
#print(help(topmod))
### Set the general working directory.
wdir = "/home/.../" # end with slash.
################################
### PREPROCESSING TEXTS ###
################################
### tei5reader_fulldocs (standard option)
### Extract selected plain text from XML/TEI files.
inpath = wdir + "master/*.xml"
outfolder = wdir + "1_txt/"
#tmw.tei5reader_fulldocs(inpath,outfolder)
### segmenter
### Split entire texts into smaller segments.
inpath = wdir + "1_txt/*.txt"
outfolder = wdir + "2_segs/"
target = 600
sizetolerancefactor = 1.1 # 1 = exact target; >1 = with some tolerance (1.1 = +/- 10%).
preserveparagraphs = True # True|False
#tmw.segmenter(inpath, outfolder, target, sizetolerancefactor, preserveparagraphs)
### segments_to_bins: inpath, outfile
### Currently not implemented any more / yet.
### pretokenize
### Perform some preliminary tokenization.
inpath = wdir + "2_segs/*.txt"
outfolder = wdir + "3_tokens/"
substitutionsFile = "./extras/fr_pretokenize_subs.csv"
#tmw.pretokenize(inpath, substitutionsFile, outfolder)
### call_treetagger
### Perform lemmatization and POS tagging.
infolder = wdir + "3_tokens/"
outfolder = wdir + "4_tagged/"
tagger = "/home/christof/Programs/TreeTagger/cmd/tree-tagger-french"
#tmw.call_treetagger(infolder, outfolder, tagger)
### make_lemmatext
### Extract selected lemmata from tagged text.
inpath = wdir + "4_tagged/*.trt"
outfolder = wdir + "5_lemmata/"
mode = "frN" # frN=nouns, esN=nouns, frNV=nouns+verbs, frNVAA=nouns+verbs+adj+adverbs
stoplist_errors = "./extras/fr_stopwords_errors.txt" # in tmw folder
#tmw.make_lemmatext(inpath, outfolder, mode, stoplist_errors)
################################
### TOPIC MODELING ###
################################
### call_mallet_import
### Imports text data into the Mallet corpus format.
mallet_path = "/home/christof/Programs/Mallet/bin/mallet"
infolder = wdir + "5_lemmata/"
outfolder = wdir + "6_mallet/"
outfile = outfolder + "corpus.mallet"
stoplist_project = "./extras/fr_stopwords_project.txt" # in tmw folder
#tmw.call_mallet_import(mallet_path, infolder, outfolder, outfile, stoplist_project)
### call_mallet_model
### Performs the actual topic modeling.
mallet_path = "/home/christof/Programs/Mallet/bin/mallet"
inputfile = wdir + "6_mallet/corpus.mallet"
outfolder = wdir + "6_mallet/"
num_topics = "250"
optimize_interval = "100"
num_iterations = "5000"
num_top_words = "200"
doc_topics_max = num_topics
num_threads = "4"
#tmw.call_mallet_modeling(mallet_path, inputfile, outfolder, num_topics, optimize_interval, num_iterations, num_top_words, doc_topics_max)
################################
### POSTPROCESSING DATA ###
################################
### create_mastermatrix
### Creates the mastermatrix with all information in one place.
corpuspath = wdir+"/2_segs/*.txt"
outfolder = wdir+"7_aggregates/"
mastermatrixfile = "mastermatrix.csv"
metadatafile = wdir+"/metadata.csv"
topics_in_texts = wdir+"/6_mallet/topics-in-texts.csv"
number_of_topics = 250
#tmw.create_mastermatrix(corpuspath, outfolder, mastermatrixfile, metadatafile, topics_in_texts, number_of_topics)
### calculate_averageTopicScores
### Based on the mastermatrix, calculates various average topic score datasets.
mastermatrixfile = wdir+"/7_aggregates/mastermatrix.csv"
outfolder = wdir+"7_aggregates/"
# targets: one or several:author|decade|subgenre|author-gender|idno|segmentID|narration
targets = ["author-name", "author-gender", "title", "decade", "subgenre",
"idno", "segmentID", "narration", "protagonist-policier"]
#tmw.calculate_averageTopicScores(mastermatrixfile, targets, outfolder)
### save_firstWords
### Saves the first words of each topic to a separate file.
topicWordFile = wdir+"6_mallet/topics-with-words.csv"
outfolder = wdir+"7_aggregates/"
filename = "firstWords.csv"
#tmw.save_firstWords(topicWordFile, outfolder, filename)
################################
### VISUALIZATION ###
################################
### make_wordle_from_mallet
### Creates a wordle for each topic.
word_weights_file = wdir + "6_mallet/" + "word-weights.txt"
topics = 250
words = 40
outfolder = wdir + "8_visuals/wordles/"
font_path = "/home/christof/.fonts/AlegreyaSans-Regular.otf"
dpi = 300
#tmw.make_wordle_from_mallet(word_weights_file,topics,words,outfolder,font_path,dpi)
### crop_images
### Crops the wordle image files, use if needed.
inpath = wdir + "8_visuals/wordles/*.png"
outfolder = wdir + "8_visuals/wordles/"
left = 225 # image start at the left
upper = 210 # image start at the top
right = 2225 # image end on the right
lower = 1310 # image end at the bottom
#tmw.crop_images(inpath, outfolder, left, upper, right, lower)
### plot_topTopics
### For each item from a category, creates a barchart of the top topics.
averageDatasets = wdir+"/7_aggregates/avg*.csv"
firstWordsFile = wdir+"/7_aggregates/firstWords.csv"
numberOfTopics = 250 # must be actual number of topics modeled.
targetCategories = ["author-name", "author-gender", "decade", "subgenre", "title"]
# one or several: "author-name", "author-gender", "decade", "subgenre", "title"
topTopicsShown = 30
fontscale = 1.0
height = 0 # 0=automatic and variable
dpi = 300
outfolder = wdir+"/8_visuals/topTopics/"
#tmw.plot_topTopics(averageDatasets, firstWordsFile, numberOfTopics, targetCategories, topTopicsShown, fontscale, height, dpi, outfolder)
### plot_topItems
### For each topic, creates a barchart with top items from a category.
averageDatasets = wdir+"/7_aggregates/avg*.csv"
outfolder = wdir+"/8_visuals/topItems/"
firstWordsFile = wdir+"/7_aggregates/firstWords.csv"
numberOfTopics = 250 # must be actual number of topics modeled.
targetCategories = ["author-name", "subgenre", "title", "decade", "author-gender"]
# choose one or several from: author-name, decade, subgenre, gender, idno, title, segmentID
topItemsShown = 30
fontscale = 0.8
height = 0 # 0=automatic and flexible
dpi = 300
#tmw.plot_topItems(averageDatasets, outfolder, firstWordsFile, numberOfTopics, targetCategories, topItemsShown, fontscale, height, dpi)
### plot_distinctiveness_heatmap
### For each category, make a heatmap of most distinctive topics.
averageDatasets = wdir+"/7_aggregates/avg*.csv"
firstWordsFile = wdir+"/7_aggregates/firstWords.csv"
outfolder = wdir+"/8_visuals/distinctiveness/"
targetCategories = ["author-name", "decade", "subgenre", "gender"]
# one or several: "author-name", "decade", "subgenre", "gender", "idno", "title"
numberOfTopics = 250 # must be actual number of topics modeled.
topTopicsShown = 20
fontscale = 1.0
dpi = 300
#tmw.plot_distinctiveness_heatmap(averageDatasets, firstWordsFile, outfolder, targetCategories, numberOfTopics, topTopicsShown, fontscale, dpi)
### plot_topicsOverTime
### Creates lineplots or areaplots for topic development over time.
averageDatasets = wdir+"/7_aggregates/avgtopicscores_by-decade.csv"
firstWordsFile = wdir+"/7_aggregates/firstWords.csv"
outfolder = wdir+"/8_visuals/overTime/"
numberOfTopics = 250 # must be actual number of topics modeled.
fontscale = 1.0
dpi = 300
height = 0 # for lineplot; 0=automatic
mode = "line" # area|line for areaplot or lineplot
topics = ["48","67","199"] # list of one or several topics
#tmw.plot_topicsOverTime(averageDatasets, firstWordsFile, outfolder, numberOfTopics, fontscale, dpi, height, mode, topics)
################################
### OTHER/OBSOLETE ###
################################
### 5c show segment
## To read a specific segment, better than looking in the folder.
segmentID = "rf0546§000083"
outfolder = wdir+"/9_sel-segs/"
#tmw.show_segment(wdir,segmentID, outfolder)
### 6b - create_topicscores_lineplot
inpath = wdir + "7_aggregates/*-lp.csv" # narrow down as needed
outfolder = wdir + "8_visuals/lineplots/"
topicwordfile = wdir + "6_mallet/topics-with-words.csv"
dpi = 300
height = 0.050
genres = ["detection","noir"] # User: set depending on metadata. Available: noir, detection, criminel, experim., archq., blanche, neopl., susp.
#tmw.create_topicscores_lineplot(inpath,outfolder,topicwordfile,dpi,height,genres)
|
mit
| 6,944,172,194,449,163,000
| 36.918103
| 143
| 0.69285
| false
| 3.145156
| false
| false
| false
|
schristakidis/p2ner
|
p2ner/components/plugin/holepuncher/holepuncher/holepuncher.py
|
1
|
8114
|
from p2ner.core.namespace import Namespace, initNS
# Copyright 2012 Loris Corazza, Sakis Christakidis
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import p2ner.util.utilities as util
from messages.messages import PunchMessage,PunchReplyMessage,KeepAliveMessage,AskServerPunchMessage,StartPunchingMessage
from twisted.internet import reactor,task,defer
from p2ner.core.pipeline import Pipeline
from p2ner.core.components import loadComponent
from time import time
from p2ner.base.ControlMessage import MessageSent, MessageError
from p2ner.base.Consts import MessageCodes as MSG
class HolePuncher(Namespace):
@initNS
def __init__(self):
self.peers=[]
self.registerMessages()
self.constructPipe()
self.loopingCall = task.LoopingCall(self.sendKeepAlive)
self.loopingCall.start(30)
self.checkPeers={}
self.mcount=0
self.requestingPeers=[]
def registerMessages(self):
self.messages = []
self.messages.append(PunchMessage())
self.messages.append(PunchReplyMessage())
self.messages.append(KeepAliveMessage())
self.messages.append(AskServerPunchMessage())
self.messages.append(StartPunchingMessage())
def constructPipe(self):
self.holePipe=self.trafficPipe
def check(self,msg,content,peer,d,pipe):
if not peer:
return
#print 'checkinggggggg ',peer
toCheck=[]
send=True
if not isinstance(peer, (list, tuple)):
peer=[peer]
for p in peer:
if not self.netChecker.hpunching and not p.hpunch:
p.conOk=True
elif p.conOk:
if msg.code!=MSG.KEEP_ALIVE:
p.lastSend=time()
elif p.conProb:
print "can't connect to peer ",p," as determined from previous try"
elif p.hpunch or self.netChecker.hpunching and p.dataPort:
send=False
pr=[i for i in peer if i!=p]
if self.checkPeers.has_key(p):
self.checkPeers[p].append({'msg':(msg,content,peer,d,pipe),'peers':pr,'id':self.mcount})
else:
self.checkPeers[p]=[{'msg':(msg,content,peer,d,pipe),'peers':pr,'id':self.mcount}]
if not p in self.requestingPeers:
toCheck.append(p)
self.mcount+=1
#print 'to check ',toCheck
if send:
if len(peer)==1:
peer=peer[0]
pipe._send(msg,content,peer,d)
else:
for p in toCheck:
reactor.callLater(0.1,self.startPunching,p)
def sendKeepAlive(self):
for p in self.peers:
print p, p.lastSend,time()-p.lastSend
oldPeers=[p for p in self.peers if p.lastSend and time()-p.lastSend>=60]
for p in oldPeers:
p.conOk=False
self.peers=[p for p in self.peers if not p.lastSend or time()-p.lastSend<60]
for p in self.peers:
print 'sending keep allive to ',p
KeepAliveMessage.send(p, self.controlPipe,self.keepAliveFailed)
KeepAliveMessage.send(p, self.holePipe,self.keepAliveFailed)
servers=[s.server for s in self.root.getAllStreams()]
try:
nat=self.root.netChecker.nat
except:
nat=False
if nat:
for p in servers:
KeepAliveMessage.send(p, self.controlPipe,self.keepAliveFailed)
def startPunching(self,peer):
if True:# peer.hpunch:
print 'sending ask server punch message to ',peer.learnedFrom,' for ',peer
AskServerPunchMessage.send(peer,peer.learnedFrom,self.controlPipe,self._startPunching,self.failedInterPunch,peer)
else:
self._startPunching(None,peer)
def failedInterPunch(self,server,peer):
print 'failed to start punching with ',peer,' through ',server
self.punchingFailed(peer)
def _startPunching(self,server,peer,init=True):
print 'punchingggggggggggggggggggggggg',peer
if not init:
self.requestingPeers.append(peer)
PunchMessage.send(peer,'port', self.controlPipe,self.punchingFailed)
PunchMessage.send(peer, 'dataPort', self.holePipe,self.punchingFailed)
def receivedReply(self,peer,port):
if port=='port':
peer.portOk=True
else:
peer.dataPortOk=True
if peer.portOk and peer.dataPortOk:
peer.lastSend=0
self.peers.append(peer)
peer.conOk=True
print 'okkkkkkkkkkkk ',peer
try:
self.requestingPeers.remove(peer)
except:
pass
self.sendMessage(peer)
def sendMessage(self,peer):
clean=[]
#print 'should send message'
if not peer in self.checkPeers.keys():
#print 'returning'
return
for m in self.checkPeers[peer]:
send=True
for p in m['peers']:
if not p.conOk:
send=False
break
msg=m['msg']
#print msg
if send:
print 'sending'
peer=msg[2]
if len(peer)==1:
peer=peer[0]
print peer
msg[-1]._send(msg[0],msg[1],peer,msg[3])
clean.append(m)
if clean:
self.cleanCheckPeers(peer,clean)
def cleanCheckPeers(self,peer,clean):
self.checkPeers[peer]=[m for m in self.checkPeers[peer] if m not in clean]
if not self.checkPeers[peer]:
self.checkPeers.pop(peer)
for m in clean:
id=m['id']
for p in m['peers']:
self.checkPeers[p]=[i for i in self.checkPeers[p] if i['id']!=id]
if not self.checkPeers[p]:
self.checkPeers.pop(p)
def punchingFailed(self,peer):
print "hole punching failed for ",peer
self.log.error("hole punching failed for %s",peer)
peer.conProb=True
try:
actions=self.checkPeers.pop(peer)
except:
return
for m in actions:
m['msg'][3].errback(defer.failure.Failure(MessageError(peer)))
id=m['id']
peers=[p for p in m['peers'] if p!=peer]
for p in peers:
for m1 in self.checkPeers[p]:
m1['peers'].remove[peer]
send=False
if peers:
send=True
for p in peers:
if not p.sendOk:
send=False
break
if send:
self.sendIdMessage(m)
def sendIdMessage(self,m):
id=m['id']
msg=m['msg']
peer=msg[2]
if len(peer)==1:
peer=peer[0]
msg[-1]._send(msg[0],msg[1],peer,msg[3])
clean=[]
for p in self.checkPeers.keys():
self.checkPeers[p]=[m1 for m1 in self.checkPeers[p] if m1['id']==id]
if not self.checkPeers[p]:
clean.append(p)
for p in clean:
self.checkPeers.pop(p)
def punchingRecipientFailed(self,peer):
peer.conProb=True
print "hole punching in recipient failed for ",peer
self.log.error("hole punching in recipient failed for %s",peer)
def keepAliveFailed(self,peer):
print "keep alive failed for ",peer
self.log.error("keep alive failed for %s",peer)
|
apache-2.0
| 1,949,153,275,522,752,800
| 32.390947
| 125
| 0.576165
| false
| 3.802249
| false
| false
| false
|
wowkin2/react-redux-api
|
apps/courses.py
|
1
|
3362
|
from flask_restful import reqparse, abort, Resource
from common import api, db
from constants import HttpStatus, EMPTY_JSON
from helpers import handle_bson
COLL_COURSES = 'courses'
course_parser = reqparse.RequestParser()
course_parser.add_argument('id', required=True)
course_parser.add_argument('title')
course_parser.add_argument('watchHref')
course_parser.add_argument('authorId')
course_parser.add_argument('category')
course_parser.add_argument('length')
class Course(Resource):
@staticmethod
def get(course_id):
course = db[COLL_COURSES].find_one({'id': course_id})
if course:
return handle_bson(course), HttpStatus.OK
else:
abort(HttpStatus.NOT_FOUND, message='Course "{}" not found'.format(course_id))
@staticmethod
def delete(course_id):
db[COLL_COURSES].remove({'id': course_id}, multi=False)
return EMPTY_JSON, HttpStatus.NO_CONTENT
@staticmethod
def post():
args = course_parser.parse_args()
course = {
'id': args.get('id'),
'authorId': args.get('authorId'),
'category': args.get('category'),
'watchHref': args.get('watchHref'),
'title': args.get('title'),
'length': args.get('length'),
}
if db[COLL_COURSES].find_one({'id': args.get('id')}) is None:
db[COLL_COURSES].insert_one(course)
return handle_bson(course), HttpStatus.CREATED
else:
return handle_bson(course), HttpStatus.CONFLICT
@staticmethod
def put(course_id):
args = course_parser.parse_args()
course = {
'id': args.get('id'),
'authorId': args.get('authorId'),
'category': args.get('category'),
'watchHref': args.get('watchHref'),
'title': args.get('title'),
'length': args.get('length'),
}
db[COLL_COURSES].update_one({'id': course_id}, {'$set': course}, upsert=True)
return handle_bson(course), HttpStatus.OK
class Courses(Resource):
@staticmethod
def get():
courses = list(db[COLL_COURSES].find({}))
return {'courses': handle_bson(courses)}, HttpStatus.OK
api.add_resource(Course, '/api/course', '/api/course/<course_id>')
api.add_resource(Courses, '/api/courses', '/api/courses/')
# @app.route('/api/courses', methods=['GET', 'POST'])
# def courses_handler():
# with open('courses.json', 'r') as f:
# courses = json.loads(f.read())
#
# if request.method == 'POST':
# new_course = request.json
# if new_course.get('id'):
# if new_course.get('id') in [x['id'] for x in courses]:
# # Update existing
# for course in courses:
# if course['id'] == new_course['id']:
# course.update(new_course)
# break
# else:
# # Add new
# courses.append(new_course)
#
# with open('courses.json', 'w') as f:
# f.write(json.dumps(courses, indent=4, separators=(',', ': ')))
#
# return Response(
# json.dumps(courses),
# mimetype='application/json',
# headers={
# 'Cache-Control': 'no-cache',
# 'Access-Control-Allow-Origin': '*'
# }
# )
|
mit
| 4,885,421,960,994,691,000
| 31.019048
| 90
| 0.560976
| false
| 3.565217
| false
| false
| false
|
Clinical-Developers/Clinical_Developer_Challenges
|
HCAs-Nurses/Solution.py
|
1
|
1260
|
# This is a maths problem not primarily a progamming one.
# As such the solution function call might look like this: This is probably the most compact/clever way to express it in Python.
def staffing(staff_units, patients):
HCA, nurse = 2*staff_units-patients/2, patients/2-staff_units
if HCA < 0 or nurse < 0 or not HCA == int(HCA) or not nurse == int(nurse):
return "No solutions"
return HCA, nurse
'''
So the equation is balanced as follows
with HCAs/nurses expressed as x and y respectively and staff_units and patients expressed as s and p respectively:
x = s*2 - p/2
y = p/2-s
But, there is no need to work them both out. Once you have calculated HCA's for instance you can just do:
y = s-x
since you know that s-x must leave only remainder y. If it doesn't then you have a problem and the equation can't be solved!
'''
# Programmatically this can be more clearly be expressed as: (This is my preferred option as it is much easier to read)
def staffing(staff_units, patients):
HCA = 2*staff_units-patients/2
nurse = staff-units-HCA
if HCA < 0 or nurse < 0 or HCA != int(HCA):
return "No solutions"
return HCA, nurse
# if you still don't believe me check out this repl: https://repl.it/Kewn/3
|
gpl-3.0
| -7,423,222,444,443,985,000
| 36.058824
| 128
| 0.706349
| false
| 3.255814
| false
| false
| false
|
bewareoftheapp/fluxapp
|
user/migrations/0001_initial.py
|
1
|
1219
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-09-18 23:37
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import user.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Department',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('members', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='RegistrationToken',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('token', models.CharField(default=user.models._generate_token, max_length=40)),
('email', models.EmailField(max_length=254)),
],
),
]
|
mit
| -2,177,746,098,601,376,000
| 32.861111
| 121
| 0.607055
| false
| 4.232639
| false
| false
| false
|
tkchafin/mrbait
|
tests/benchmark_MIS.py
|
1
|
5580
|
#!/usr/bin/python
import networkx as nx
import time
import networkx.algorithms.approximation as nxaa
import matplotlib.pyplot as plt
import numpy as np
from networkx.utils import powerlaw_sequence
"""Code for ATTEMPTING to approximate the maximal independent set in a graph
of conflicting sequences (e.g. aligned > threshold in pairwise alignment).
Unfortunately, this problem is NP-hard and can't be done efficiently... """
"""Conclusions: My naive version seems to be faster somehow."""
#TODO: Test that nodes retained in NAIVE definitely dont have any edges!!!
"""RESULTS: Mine is slighlty faster and finds more nodes at smaller network sizes.
But the approximate algorithm scales better.
Approximate, Nodes=10, 100 reps
Average number of nodes: 5.07
24 ms
Approximate, Nodes=100, 100 reps
Average number of nodes: 56.09
228 ms
Approximate, Nodes=1000, 100 reps
Average number of nodes: 632.4
3529 ms
Approximate, Nodes=10000, 100 reps
Average number of nodes: 6828.18
95647 ms
------
Naive, Nodes=10, 100 reps
Average number of nodes: 5.62
Average number of edges: 0.0
40 ms
Naive, Nodes=100, 100 reps
Average number of nodes: 62.5
Average number of edges: 0.0
344 ms
Naive, Nodes=1000, 100 reps
Average number of nodes: 676.74
Average number of edges: 0.0
4313 ms
Approximate, Nodes=10000, 100 reps
Average number of nodes: 6796.16
93200 ms
"""
def time_me(method):
def wrapper(*args, **kw):
startTime = int(round(time.time() * 1000))
result = method(*args, **kw)
endTime = int(round(time.time() * 1000))
print(endTime - startTime,'ms')
return result
return wrapper
def multiGraphFromList(data):
G = nx.MultiGraph()
G.add_edges_from(data)
return(G)
@time_me
#Function to use the built-in independent set function in networkx
def approximateIndependentSet(nodes, num):
array = np.empty(num)
for i in range(num):
z = nx.utils.create_degree_sequence(nodes,powerlaw_sequence)
G = nx.configuration_model(z)
graph=nx.Graph(G)
graph.remove_edges_from(graph.selfloop_edges())
new = nx.maximal_independent_set(graph, nodes=None)
array[i] = len(new)
avg = np.average(array)
print("Average number of nodes: ",avg)
@time_me
#Function to use VERY SLOW version I made
def naiveIndependentSet(nodes, num):
array = np.empty(num)
edges = np.empty(num)
for i in range(num):
z = nx.utils.create_degree_sequence(nodes,powerlaw_sequence)
G = nx.configuration_model(z)
G=nx.Graph(G)
G.remove_edges_from(G.selfloop_edges())
#Make a copy of graph
C = G.copy()
#Loop through ALL edges
for n in G.edges_iter():
#If either node has been trimmed from Copy, skip.
if C.has_edge(n[0],n[1]):
right = n[1]
left = n[0]
right_n = len(C.neighbors(right))
left_n = len(C.neighbors(left))
#print("Right neighbor <",right,"> has ", right_n, " connections.")
#print("Left neighbor <",left,"> has ", left_n, " connections.")
#Remove right if it has more neighbors, otherwise remove left
if (right_n > left_n):
C.remove_node(right)
else:
C.remove_node(left)
array[i] = C.number_of_nodes()
edges[i] = C.number_of_edges()
avg = np.average(array)
eavg = np.average(edges)
print("Average number of nodes: ",avg)
print("Average number of edges: ",eavg)
#Function to plot a complete graph, coloring a list of 'chosen' or 'excluded' (subset) nodes
def plotColorNodes(G, listnodes):
color_map = []
for node in G:
if node in listnodes:
color_map.append("red")
else:
color_map.append("black")
nx.draw(G, node_color = color_map, with_labels=True)
plt.show()
#Tests of functions
example_10 = [(1,2),(2,4),(1,3),(1,7),(3,2),(1,4),(5,6),(6,8),(3,7),(4,8),(9,10)]
example_100 = [(19,29),(28,48),(17,36),(16,72),(33,2),(1,47),(55,66),(62,87),(53,57),(64,68),(9,100),
(11,22),(24,46),(11,32),(89,78),(31,24),(19,45),(54,6),(16,88),(3,7),(4,88),(95,43),
(11,28),(27,4),(1,38),(13,7),(3,2),(1,48),(49,57),(61,8),(98,79),(81,80),(97,100),
(12,29),(26,4),(1,37),(1,71),(39,2),(1,47),(50,58),(36,8),(63,78),(24,82),(96,100),
(13,30),(25,4),(78,36),(12,7),(40,2),(1,46),(56,59),(61,99),(3,77),(4,83),(95,11),
(14,12),(24,4),(1,35),(14,15),(3,2),(1,42),(55,60),(6,100),(3,76),(4,84),(92,94),
(15,2),(23,4),(2,31),(1,71),(3,2),(1,43),(51,6),(63,64),(70,7),(4,85),(90,93),
(16,23),(21,34),(14,32),(12,7),(12,13),(1,41),(52,61),(62,8),(71,72),(4,86),(91,10),
(17,21),(22,64),(27,33),(14,7),(83,72),(1,45),(53,69),(65,8),(74,73),(4,87),(89,10),
(18,22),(20,4),(59,34),(1,45),(91,75),(19,44),(54,67),(66,68),(31,75),(45,18),(90,10)
]
G10 = multiGraphFromList(example_10)
G100 = multiGraphFromList(example_100)
# print("Approximate, Nodes=10, 100 reps")
# approximateIndependentSet(10,100)
# print("Approximate, Nodes=100, 100 reps")
# approximateIndependentSet(100,100)
# print("Approximate, Nodes=1000, 100 reps")
# approximateIndependentSet(1000,100)
# print("Approximate, Nodes=10000, 100 reps")
# approximateIndependentSet(10000,100)
# print("\n------\n")
#
# print("Naive, Nodes=10, 100 reps")
# naiveIndependentSet(10,100)
# print("Naive, Nodes=100, 100 reps")
# naiveIndependentSet(100,100)
# print("Naive, Nodes=1000, 100 reps")
# naiveIndependentSet(1000,100)
# print("Approximate, Nodes=10000, 100 reps")
# approximateIndependentSet(10000,100)
|
gpl-3.0
| 7,596,444,725,467,537,000
| 31.823529
| 101
| 0.62957
| false
| 2.907764
| false
| false
| false
|
skapfer/rubber
|
src/depend.py
|
1
|
11127
|
"""
This module contains code for handling dependency graphs.
"""
# vim: noet:ts=4
import logging
msg = logging.getLogger (__name__)
import os.path
import subprocess
import rubber.contents
from rubber.util import _
class MakeError (Exception):
def __init__ (self, msg, errors):
super (MakeError, self).__init__ (msg)
self.msg = msg
self.errors = errors
# Dictionnary allowing to find a Node by one of its products.
# It should not be used outside this module.
_producer = {}
def clean_all_products ():
"""Clean all products of all recipes."""
for path in _producer:
if os.path.exists (path):
msg.info (_("removing %s"), path)
os.remove (path)
def save_cache (cache_path, final):
msg.debug (_('Creating or overwriting cache file %s') % cache_path)
with open (cache_path, 'tw') as f:
for node in final.all_producers ():
if node.snapshots is not None:
f.write (node.primary_product ())
f.write ('\n')
for i in range (len (node.sources)):
f.write (' ')
f.write (rubber.contents.cs2str (node.snapshots [i]))
f.write (' ')
f.write (node.sources [i])
f.write ('\n')
def load_cache (cache_path):
msg.debug (_('Reading external cache file %s') % cache_path)
with open (cache_path) as f:
line = f.readline ()
while line:
product = line [:-1]
sources = []
snapshots = []
while True:
line = f.readline ()
if not line.startswith (' '): # Including end of file.
break
limit = 2 + rubber.contents.cs_str_len
snapshots.append (rubber.contents.str2cs (line [2:limit]))
sources.append (line [limit + 1:-1])
try:
node = _producer [product]
except KeyError:
msg.debug (_('%s: no such recipe anymore') % product)
else:
if node.sources != sources:
msg.debug (_('%s: depends on %s not anymore on %s'), product,
" ".join (node.sources), " ".join (sources))
elif node.snapshots is not None:
# FIXME: this should not happen. See cweb-latex test.
msg.debug (_('%s: rebuilt before cache read'), product)
else:
msg.debug (_('%s: using cached checksums'), product)
node.snapshots = snapshots
class Node (object):
"""
This is the base class to represent dependency nodes. It provides the base
functionality of date checking and recursive making, supposing the
existence of a method `run()' in the object.
"""
def __init__ (self):
"""
The node registers itself in the dependency set,
and if a given depedency is not known in the set, a leaf node is made
for it.
"""
self.product = None
# All prerequisites for this recipe.
self.sources = []
# A snapshot of each source as they were used during last
# successful build, or None if no build has been attempted
# yet. The order in the list is the one in self.sources,
# which does not change during build.
self.snapshots = None
# making is the lock guarding against making a node while making it
self.making = False
def all_producers (self):
def rec (node):
if not node.making:
node.making = True
try:
yield node
for source in node.sources:
try:
child = _producer [source]
except KeyError:
pass
else:
yield from rec (child)
finally:
self.making = False
yield from rec (self)
def all_leaves (self):
"""Show sources that are not produced."""
# We need to build a set in order to remove duplicates.
result = set ()
def rec (node):
if not node.making:
node.making = True
try:
for source in node.sources:
if source in _producer:
rec (_producer [source])
else:
result.add (source)
finally:
self.making = False
rec (self)
return result
def add_source (self, name):
"""
Register a new source for this node. If the source is unknown, a leaf
node is made for it.
"""
# Do nothing when the name is already listed.
# The same source may be inserted many times in the same
# document (an image containing a logo for example).
if name not in self.sources:
self.sources.append (name)
def remove_source (self, name):
"""
Remove a source for this node.
"""
# Fail if the name is not listed.
self.sources.remove (name)
def products (self):
"""An iterable with all all products for this recipe.
This function is not efficient, but called only once by
cmdline.py with a specific command-line option."""
return (key for key, value in _producer.items () if value is self)
def add_product (self, name):
"""
Register a new product for this node.
"""
# TODO: why does this break? assert name not in _producer, name
_producer [name] = self
if self.product is None:
self.product = name
def primary_product (self):
return self.product
def replace_product (self, name):
"""Trick for latex.py"""
# TODO: why does this break? assert name not in _producer, name
del _producer [self.product]
self.product = name
_producer [name] = self
def make (self):
"""
Make the destination file. This recursively makes all dependencies,
then compiles the target if dependencies were modified. The return
value is
- False when nothing had to be done
- True when something was recompiled (among all dependencies)
MakeError is raised in case of error.
"""
# The recurrence is similar to all_producers, except that we
# try each compilations a few times.
pp = self.primary_product ()
if self.making:
msg.debug (_("%s: cyclic dependency, pruning"), pp)
return False
rv = False
self.making = True
try:
for patience in range (5):
msg.debug (_('%s: made from %s attempt %i'),
self.product, ','.join (self.sources),
patience)
# make our sources
for source in self.sources:
try:
dep = _producer [source]
except KeyError:
msg.debug (_("%s: needs %s, leaf"), pp, source)
else:
msg.debug (_("%s: needs %s, making %s"), pp, source,
dep.primary_product ())
rv = dep.make () or rv
# Once all dependent recipes have been run, check the
# state of the sources on disk.
snapshots = tuple (map (rubber.contents.snapshot, self.sources))
missing = ','.join (
self.sources [i] for i in range (len (snapshots))
if snapshots [i] == rubber.contents.NO_SUCH_FILE)
if missing:
if isinstance (self, rubber.converters.latex.LaTeXDep) \
and self.snapshots is None \
and patience == 0:
msg.debug (_("%s: missing %s, but first LaTeX run"), pp, missing)
else:
msg.debug (_("%s: missing %s, pruning"), pp, missing)
return rv
if self.snapshots is None:
msg.debug (_("%s: first attempt or --force, building"), pp)
else:
# There has already been a successful build.
changed = ','.join (
self.sources [i] for i in range (len (snapshots))
if self.snapshots [i] != snapshots [i])
if not changed:
msg.debug (_("%s: sources unchanged since last build"), pp)
return rv
msg.debug (_("%s: some sources changed: %s"), pp, changed)
if not self.run ():
raise MakeError (_("Recipe for {} failed").format (pp),
self.get_errors ())
# Build was successful.
self.snapshots = snapshots
rv = True
# Patience exhausted.
raise MakeError (_("Contents of {} do not settle").format (pp),
self.get_errors ())
finally:
self.making = False
def run (self):
"""
This method is called when a node has to be (re)built. It is supposed
to rebuild the files of this node, returning true on success and false
on failure. It must be redefined by derived classes.
"""
return False
def get_errors (self):
"""
Report the errors that caused the failure of the last call to run, as
an iterable object.
"""
return []
def clean (self):
"""
Remove additional files for this recipe.
Nothing recursive happens here.
Files registered as products are removed by rubber.clean ().
"""
class Shell (Node):
"""
This class specializes Node for generating files using shell commands.
"""
def __init__ (self, command):
super ().__init__ ()
self.command = command
self.stdout = None
def run (self):
msg.info(_("running: %s") % ' '.join(self.command))
process = subprocess.Popen (self.command,
stdin=subprocess.DEVNULL,
stdout=self.stdout)
if process.wait() != 0:
msg.error(_("execution of %s failed") % self.command[0])
return False
return True
class Pipe (Shell):
"""
This class specializes Node for generating files using the stdout of shell commands.
The 'product' will receive the stdout of 'command'.
"""
def __init__ (self, command, product):
super ().__init__ (command)
self.add_product (product)
def run (self):
with open (self.primary_product (), 'bw') as self.stdout:
ret = super (Pipe, self).run ()
return ret
|
gpl-2.0
| 1,698,966,926,278,784,000
| 35.126623
| 89
| 0.512717
| false
| 4.665409
| false
| false
| false
|
bavardage/qtile
|
libqtile/widget/base.py
|
1
|
5948
|
from .. import command, utils, bar
LEFT = object()
CENTER = object()
class _Drawer:
"""
A helper class for drawing and text layout.
"""
_fallbackFont = "-*-fixed-bold-r-normal-*-15-*-*-*-c-*-*-*"
def __init__(self, qtile, window):
self.qtile, self.window = qtile, window
self.win = window.window
self.gc = self.win.create_gc()
self.colormap = qtile.display.screen().default_colormap
self.background, self.foreground = None, None
@utils.LRUCache(100)
def color(self, color):
return self.colormap.alloc_named_color(color).pixel
def setFont(self, font):
f = self.qtile.display.open_font(font)
if not f:
self.qtile.log.add("Could not open font %s, falling back."%font)
f = self.qtile.display.open_font(self._fallbackFont)
self.font = f
self.gc.change(font=f)
@utils.LRUCache(100)
def text_extents(self, font, i):
return font.query_text_extents(i)
def textsize(self, font, *text):
"""
Return a textheight, textwidth tuple, for a box large enough to
enclose any of the passed strings.
"""
textheight, textwidth = 0, 0
for i in text:
data = self.text_extents(font, i)
if data.font_ascent > textheight:
textheight = data.font_ascent
if data.overall_width > textwidth:
textwidth = data.overall_width
return textheight, textwidth
def change(self, **kwargs):
newargs = kwargs.copy()
newargs.pop("background", None)
newargs.pop("foreground", None)
if kwargs.has_key("background") and self.background != kwargs["background"]:
self.background = kwargs["background"]
newargs["background"] = self.color(kwargs["background"])
if kwargs.has_key("foreground") and self.background != kwargs["foreground"]:
self.background = kwargs["foreground"]
newargs["foreground"] = self.color(kwargs["foreground"])
if newargs:
self.gc.change(**newargs)
def textbox(self, text, x, y, width, height, padding = 0,
alignment=LEFT, background=None, **attrs):
"""
Draw text in the specified box using the current font. Text is
centered vertically, and left-aligned.
:background Fill box with the specified color first.
:padding Padding to the left of the text.
"""
text = text or " "
if background:
self.rectangle(x, y, width, height, background)
attrs["background"] = background
if attrs:
self.change(**attrs)
textheight, textwidth = self.textsize(self.font, text)
y = y + textheight + (height - textheight)/2
if alignment == LEFT:
x = x + padding
else:
x = x + (width - textwidth)/2
self.win.draw_text(self.gc, x, y, text)
def rectangle(self, x, y, width, height, fillColor=None, borderColor=None, borderWidth=1):
if fillColor:
self.change(foreground=fillColor)
self.win.fill_rectangle(self.gc, x, 0, width, height)
if borderColor:
self.change(
foreground=borderColor,
line_width=borderWidth
)
self.win.rectangle(self.gc, x, 0, width, height)
class _Widget(command.CommandObject):
"""
Each widget must set its own width attribute when the _configure method
is called. If this is set to the special value bar.STRETCH, the bar itself
will set the width to the maximum remaining space, after all other
widgets have been configured. Only ONE widget per bar can have the
bar.STRETCH width set.
The offset attribute is set by the Bar after all widgets have been
configured.
"""
font = "-*-luxi mono-*-r-*-*-12-*-*-*-*-*-*-*"
width = None
offset = None
name = None
@property
def win(self):
return self.bar.window.window
@property
def colormap(self):
return self.qtile.display.screen().default_colormap
def _configure(self, qtile, bar, event, theme):
self.qtile, self.bar, self.event, self.theme = qtile, bar, event, theme
self._drawer = _Drawer(qtile, self.bar.window)
self._drawer.setFont(self.font)
def clear(self):
self._drawer.rectangle(
self.offset, 0, self.width, self.bar.size,
self.bar.background
)
def info(self):
return dict(
name = self.__class__.__name__,
offset = self.offset,
width = self.width,
)
def click(self, x, y):
pass
def get(self, q, name):
"""
Utility function for quick retrieval of a widget by name.
"""
w = q.widgetMap.get(name)
if not w:
raise command.CommandError("No such widget: %s"%name)
return w
def _items(self, name):
if name == "bar":
return True, None
def _select(self, name, sel):
if name == "bar":
return self.bar
def cmd_info(self):
"""
Info for this object.
"""
return dict(name=self.name)
class _TextBox(_Widget):
PADDING = 5
def __init__(self, text=" ", width=bar.STRETCH):
self.width = width
self.text = text
def _configure(self, qtile, bar, event, theme):
_Widget._configure(self, qtile, bar, event, theme)
if theme.font:
self.font = theme.font
def draw(self):
self._drawer.textbox(
self.text,
self.offset, 0, self.width, self.bar.size,
padding = self.PADDING,
foreground=self.theme.fg_normal,
background=self.theme.bg_normal,
)
|
mit
| 11,768,831,008,628,596
| 31.502732
| 94
| 0.566745
| false
| 3.908016
| true
| false
| false
|
elopio/snapcraft
|
snapcraft/internal/lxd/_cleanbuilder.py
|
1
|
2770
|
#!/usr/bin/python3
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016-2017 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import os
import petname
import subprocess
from ._containerbuild import Containerbuild
from snapcraft.internal.errors import ContainerConnectionError
logger = logging.getLogger(__name__)
class Cleanbuilder(Containerbuild):
def __init__(self, *, output=None, source, project_options,
metadata=None, remote=None):
container_name = petname.Generate(3, '-')
super().__init__(output=output, source=source,
project_options=project_options, metadata=metadata,
container_name=container_name, remote=remote)
def _ensure_container(self):
try:
subprocess.check_call([
'lxc', 'launch', '-e', self._image, self._container_name])
except subprocess.CalledProcessError as e:
raise ContainerConnectionError('Failed to setup container')
self._configure_container()
self._wait_for_network()
self._container_run(['apt-get', 'update'])
# Because of https://bugs.launchpad.net/snappy/+bug/1628289
# Needed to run snapcraft as a snap and build-snaps
self._container_run(['apt-get', 'install', 'squashfuse', '-y'])
self._inject_snapcraft(new_container=True)
def _setup_project(self):
logger.info('Setting up container with project assets')
tar_filename = self._source
# os.sep needs to be `/` and on Windows it will be set to `\`
dst = '{}/{}'.format(self._project_folder,
os.path.basename(tar_filename))
self._container_run(['mkdir', self._project_folder])
self._push_file(tar_filename, dst)
self._container_run(['tar', 'xvf', os.path.basename(tar_filename)],
cwd=self._project_folder)
def _finish(self):
# os.sep needs to be `/` and on Windows it will be set to `\`
src = '{}/{}'.format(self._project_folder, self._snap_output)
self._pull_file(src, self._snap_output)
logger.info('Retrieved {}'.format(self._snap_output))
|
gpl-3.0
| 4,744,793,001,829,084,000
| 40.343284
| 76
| 0.646209
| false
| 4.049708
| false
| false
| false
|
i-wind/pyqt_todo
|
db/model.py
|
1
|
2729
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
"""
@script : model.py
@created : 2012-11-04 01:48:15.090
@changed : 2012-11-08 10:26:47.237
@creator : mkpy.py --version 0.0.27
@author : Igor A.Vetrov <qprostu@gmail.com>
@about : model of TODO application
"""
from __future__ import print_function
from argparse import ArgumentParser
from .sqlite import Table, Field
__revision__ = 11
__project__ = "Todo"
def getRevision():
"""Callback method for -r/--revision option"""
return str(__revision__)
class Priority(Table):
"""Priority model class"""
_fields = [
( "code" , Field(fieldtype="integer", notnull=True, primary=True) ),
( "name" , Field(notnull=True) ),
( "created", Field(fieldtype="timestamp", default="(datetime('now', 'localtime'))") ),
]
def __init__(self, db):
self.__class__._tableName = __project__ + self.__class__.__name__
super(Priority, self).__init__(db)
def setDefaults(self):
self.exec( "insert into {} (code, name) values(?, ?)".format(self._tableName), (1, "Low") )
self.exec( "insert into {} (code, name) values(?, ?)".format(self._tableName), (2, "Medium") )
self.exec( "insert into {} (code, name) values(?, ?)".format(self._tableName), (3, "High") )
self.db.commit()
def getCode(self, name):
row = self.select( "select code from {} where name=?;".format(self._tableName), (name,) )[0]
return row["code"]
def getName(self, _id):
return self.getValue(_id, "name")[0]
def listNames(self):
rows = self.select( "select name from {};".format(self._tableName) )
return [row["name"] for row in rows]
class Task(Table):
"""Task model class"""
_fields = [
( "name" , Field(notnull=True) ),
( "priority" , Field(fieldtype="integer", default=2, foreignkey="TodoPriority(code)") ),
( "deadline" , Field(fieldtype="date", notnull=True, default="(date('now', 'localtime'))") ),
# status may be 0 or 1, if 1 - task completed
( "status" , Field(fieldtype="integer", default=0, index=True) ),
( "completed", Field(fieldtype="timestamp") ),
( "created" , Field(fieldtype="timestamp", default="(datetime('now', 'localtime'))") ),
]
def __init__(self, db):
self.__class__._tableName = __project__ + self.__class__.__name__
super(Task, self).__init__(db)
if __name__ == '__main__':
# setup global parser
parser = ArgumentParser(description='Program description goes here...')
parser.add_argument('-r', '--revision', action='version', version='%(prog)s revision: ' + getRevision())
args = parser.parse_args()
# end of model.py
|
mit
| -2,385,009,913,102,353,000
| 30.011364
| 108
| 0.584463
| false
| 3.441362
| false
| false
| false
|
trevor/calendarserver
|
txdav/caldav/datastore/scheduling/test/test_utils.py
|
1
|
7711
|
##
# Copyright (c) 2013-2014 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Tests for txdav.caldav.datastore.utils
"""
from pycalendar.datetime import DateTime
from twisted.internet.defer import inlineCallbacks
from twisted.trial import unittest
from txdav.caldav.datastore.scheduling.utils import getCalendarObjectForRecord, \
extractEmailDomain, uidFromCalendarUserAddress
from txdav.common.datastore.test.util import populateCalendarsFrom, CommonCommonTests
now = DateTime.getToday().getYear()
ORGANIZER_ICS = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//Apple Inc.//iCal 4.0.1//EN
CALSCALE:GREGORIAN
BEGIN:VEVENT
CREATED:20100303T181216Z
UID:685BC3A1-195A-49B3-926D-388DDACA78A6
TRANSP:OPAQUE
SUMMARY:Ancient event
DTSTART:%(year)s0307T111500Z
DURATION:PT1H
DTSTAMP:20100303T181220Z
ORGANIZER:urn:uuid:user01
ATTENDEE;PARTSTAT=ACCEPTED:urn:uuid:user01
ATTENDEE;PARTSTAT=ACCEPTED:urn:uuid:user02
SEQUENCE:2
END:VEVENT
END:VCALENDAR
""".replace("\n", "\r\n") % {"year": now + 1}
ATTENDEE_ICS = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//Apple Inc.//iCal 4.0.1//EN
CALSCALE:GREGORIAN
BEGIN:VEVENT
CREATED:20100303T181216Z
UID:685BC3A1-195A-49B3-926D-388DDACA78A6
TRANSP:OPAQUE
SUMMARY:Ancient event
DTSTART:%(year)s0307T111500Z
DURATION:PT1H
DTSTAMP:20100303T181220Z
ORGANIZER:urn:uuid:user01
ATTENDEE;PARTSTAT=ACCEPTED:urn:uuid:user01
ATTENDEE;PARTSTAT=ACCEPTED:urn:uuid:user02
SEQUENCE:2
END:VEVENT
END:VCALENDAR
""".replace("\n", "\r\n") % {"year": now + 1}
class RecipientCopy(CommonCommonTests, unittest.TestCase):
"""
Tests for deleting events older than a given date
"""
metadata = {
"accessMode": "PUBLIC",
"isScheduleObject": True,
"scheduleTag": "abc",
"scheduleEtags": (),
"hasPrivateComment": False,
}
requirements = {
"user01" : {
"calendar1" : {
"1.ics" : (ORGANIZER_ICS, metadata,),
}
},
"user02" : {
"calendar2" : {
"2.ics" : (ATTENDEE_ICS, metadata,),
},
"calendar3" : {
"3.ics" : (ATTENDEE_ICS, metadata,),
}
}
}
@inlineCallbacks
def setUp(self):
yield super(RecipientCopy, self).setUp()
yield self.buildStoreAndDirectory()
yield self.populate()
@inlineCallbacks
def populate(self):
yield populateCalendarsFrom(self.requirements, self.storeUnderTest())
self.notifierFactory.reset()
def storeUnderTest(self):
"""
Create and return a L{CalendarStore} for testing.
"""
return self._sqlCalendarStore
@inlineCallbacks
def test_getCalendarObjectForRecord(self):
"""
Test that L{txdav.caldav.datastore.scheduling.utils.getCalendarObjectForRecord} detects and removes
resources with duplicate UIDs in the same calendar home.
"""
# Check that expected resources are present
txn = self.transactionUnderTest()
for home_uid, calendar_name, resource_name in (
("user01", "calendar1", "1.ics",),
("user02", "calendar2", "2.ics",),
("user02", "calendar3", "3.ics",),
):
resource = (yield self.calendarObjectUnderTest(txn, name=resource_name, calendar_name=calendar_name, home=home_uid))
self.assertNotEqual(resource, None)
yield self.commit()
# Look up resource by UID in home where only one exists
principal = yield self.directory.recordWithUID(u"user01")
txn = self.transactionUnderTest()
resource = (yield getCalendarObjectForRecord(txn, principal, "685BC3A1-195A-49B3-926D-388DDACA78A6"))
self.assertEqual(resource.name(), "1.ics")
self.assertEqual(resource._parentCollection.name(), "calendar1")
self.assertEqual(resource._parentCollection.viewerHome().uid(), "user01")
yield self.commit()
# Check that expected resources are still present
txn = self.transactionUnderTest()
for home_uid, calendar_name, resource_name in (
("user01", "calendar1", "1.ics",),
("user02", "calendar2", "2.ics",),
("user02", "calendar3", "3.ics",),
):
resource = (yield self.calendarObjectUnderTest(txn, name=resource_name, calendar_name=calendar_name, home=home_uid))
self.assertNotEqual(resource, None)
yield self.commit()
# Look up resource by UID in home where two exists
principal = yield self.directory.recordWithUID("user02")
txn = self.transactionUnderTest()
resource = (yield getCalendarObjectForRecord(txn, principal, "685BC3A1-195A-49B3-926D-388DDACA78A6"))
self.assertTrue(resource.name() in ("2.ics", "3.ics",))
self.assertTrue(resource._parentCollection.name() in ("calendar2", "calendar3",))
self.assertEqual(resource._parentCollection.viewerHome().uid(), "user02")
yield self.commit()
# Check that expected resources are still present, but the duplicate missing
txn = self.transactionUnderTest()
resource = (yield self.calendarObjectUnderTest(txn, name="1.ics", calendar_name="calendar1", home="user01"))
self.assertNotEqual(resource, None)
resource2 = (yield self.calendarObjectUnderTest(txn, name="2.ics", calendar_name="calendar2", home="user02"))
resource3 = (yield self.calendarObjectUnderTest(txn, name="3.ics", calendar_name="calendar3", home="user02"))
self.assertTrue((resource2 is not None) ^ (resource3 is not None))
yield self.commit()
# Look up resource where principal exists but home does not
principal = yield self.directory.recordWithUID("user102") # ASKCYRUS: but user102 doesn't exist
txn = self.transactionUnderTest()
resource = (yield getCalendarObjectForRecord(txn, principal, "685BC3A1-195A-49B3-926D-388DDACA78A6"))
self.assertTrue(resource is None)
yield self.commit()
def test_uidFromCalendarUserAddress(self):
"""
Test that L{uidFromCalendarUserAddress} returns the expected results.
"""
data = (
("urn:x-uid:foobar", "foobar"),
("urn:uuid:foobar", "foobar"),
("urn:uuid:49DE7436-F01C-4AD8-B685-A94303F40301", "49DE7436-F01C-4AD8-B685-A94303F40301"),
("/principals/__uids__/foobar", "foobar"),
("/principals/users/foobar", None),
("/principals/groups/foobar", None),
("mailto:foo@example.com", None),
)
for cuaddr, uid in data:
self.assertEqual(uidFromCalendarUserAddress(cuaddr), uid)
def test_extractEmailDomain(self):
"""
Test that L{extractEmailDomain} returns the expected results.
"""
data = (
("mailto:foo@example.com", "example.com"),
("mailto:foo@example.com?subject=bar", "example.com"),
("mailto:foo", ""),
("mailto:foo@", ""),
("http://foobar.com", ""),
)
for mailto, domain in data:
self.assertEqual(extractEmailDomain(mailto), domain)
|
apache-2.0
| 683,524,874,323,820,200
| 33.734234
| 128
| 0.652315
| false
| 3.62359
| true
| false
| false
|
synctree/synctree-awsebcli
|
ebcli/lib/aws.py
|
1
|
10403
|
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import time
import random
import warnings
import os
import botocore
import botocore.session
import botocore.exceptions
from botocore.loaders import Loader
from cement.utils.misc import minimal_logger
from ebcli import __version__
from ..objects.exceptions import ServiceError, NotAuthorizedError, \
CredentialsError, NoRegionError, ValidationError, \
InvalidProfileError, ConnectionError, AlreadyExistsError, NotFoundError
from .utils import static_var
from .botopatch import apply_patches
LOG = minimal_logger(__name__)
BOTOCORE_DATA_FOLDER_NAME = 'botocoredata'
_api_clients = {}
_profile = None
_profile_env_var = 'AWS_EB_PROFILE'
_id = None
_key = None
_region_name = None
_verify_ssl = True
_endpoint_url = None
_debug = False
apply_patches()
def _flush():
# Should be used for resetting tests only
global _api_clients, _profile, _id, _key, _region_name, _verify_ssl
_api_clients = {}
_get_botocore_session.botocore_session = None
_profile = None
_id = None
_key = None
_region_name = None
_verify_ssl = True
def set_session_creds(id, key):
global _api_clients, _id, _key
_id = id
_key = key
# invalidate all old clients
_api_clients = {}
def set_profile(profile):
global _profile, _api_clients
_profile = profile
# Invalidate session and old clients
_get_botocore_session.botocore_session = None
_api_clients = {}
def set_region(region_name):
global _region_name
_region_name = region_name
# Invalidate session and old clients
_get_botocore_session.botocore_session = None
_api_clients = {}
def set_endpoint_url(endpoint_url):
global _endpoint_url
_endpoint_url = endpoint_url
def no_verify_ssl():
global _verify_ssl
_verify_ssl = False
def set_profile_override(profile):
global _profile_env_var
set_profile(profile)
_profile_env_var = None
def set_debug():
global _debug
_debug = True
def _set_user_agent_for_session(session):
session.user_agent_name = 'eb-cli'
session.user_agent_version = __version__
def _get_data_loader():
# Creates a botocore data loader that loads custom data files
# FIRST, creating a precedence for custom files.
data_folder = os.path.join(os.path.dirname(os.path.realpath(__file__)),
BOTOCORE_DATA_FOLDER_NAME)
return Loader(extra_search_paths=[data_folder, Loader.BUILTIN_DATA_PATH],
include_default_search_paths=False)
def _get_client(service_name):
aws_access_key_id = _id
aws_secret_key = _key
if service_name in _api_clients:
return _api_clients[service_name]
session = _get_botocore_session()
if service_name == 'elasticbeanstalk':
endpoint_url = _endpoint_url
else:
endpoint_url = None
try:
LOG.debug('Creating new Botocore Client for ' + str(service_name))
client = session.create_client(service_name,
endpoint_url=endpoint_url,
# region_name=_region_name,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_key,
verify=_verify_ssl)
except botocore.exceptions.ProfileNotFound as e:
raise InvalidProfileError(e)
LOG.debug('Successfully created session for ' + service_name)
_api_clients[service_name] = client
return client
@static_var('botocore_session', None)
def _get_botocore_session():
global _region_name
if _get_botocore_session.botocore_session is None:
LOG.debug('Creating new Botocore Session')
LOG.debug('Botocore version: {0}'.format(botocore.__version__))
session = botocore.session.get_session({
'profile': (None, _profile_env_var, _profile, None),
})
if _region_name or not session.get_config_variable('region'):
session.set_config_variable('region', _region_name)
_region_name = session.get_config_variable('region')
session.register_component('data_loader', _get_data_loader())
_set_user_agent_for_session(session)
_get_botocore_session.botocore_session = session
if _debug:
session.set_debug_logger()
return _get_botocore_session.botocore_session
def get_region_name():
return _region_name
def make_api_call(service_name, operation_name, **operation_options):
try:
client = _get_client(service_name)
except botocore.exceptions.UnknownEndpointError as e:
raise NoRegionError(e)
except botocore.exceptions.PartialCredentialsError as e:
LOG.debug('Credentials incomplete')
raise CredentialsError('Your credentials are not complete. Error: {0}'
.format(e))
except botocore.exceptions.NoRegionError:
raise NoRegionError()
if not _verify_ssl:
warnings.filterwarnings("ignore")
operation = getattr(client, operation_name)
region = _region_name
if not region:
region = 'default'
MAX_ATTEMPTS = 10
attempt = 0
while True:
attempt += 1
if attempt > 1:
LOG.debug('Retrying -- attempt #' + str(attempt))
delay = _get_delay(attempt)
time.sleep(delay)
try:
LOG.debug('Making api call: (' +
service_name + ', ' + operation_name +
') to region: ' + region + ' with args:' + str(operation_options))
response_data = operation(**operation_options)
status = response_data['ResponseMetadata']['HTTPStatusCode']
LOG.debug('API call finished, status = ' + str(status))
if response_data:
LOG.debug('Response: ' + str(response_data))
return response_data
except botocore.exceptions.ClientError as e:
response_data = e.response
LOG.debug('Response: ' + str(response_data))
status = response_data['ResponseMetadata']['HTTPStatusCode']
LOG.debug('API call finished, status = ' + str(status))
try:
message = str(response_data['Error']['Message'])
except KeyError:
message = ""
if status == 400:
# Convert to correct 400 error
error = _get_400_error(response_data, message)
if isinstance(error, ThrottlingError):
LOG.debug('Received throttling error')
if attempt > MAX_ATTEMPTS:
raise MaxRetriesError('Max retries exceeded for '
'throttling error')
else:
raise error
elif status == 403:
LOG.debug('Received a 403')
if not message:
message = 'Are your permissions correct?'
raise NotAuthorizedError('Operation Denied. ' + message)
elif status == 404:
LOG.debug('Received a 404')
raise NotFoundError(message)
elif status == 409:
LOG.debug('Received a 409')
raise AlreadyExistsError(message)
elif status in (500, 503, 504):
LOG.debug('Received 5XX error')
if attempt > MAX_ATTEMPTS:
raise MaxRetriesError('Max retries exceeded for '
'service error (5XX)')
else:
raise ServiceError('API Call unsuccessful. '
'Status code returned ' + str(status))
except botocore.exceptions.NoCredentialsError:
LOG.debug('No credentials found')
raise CredentialsError('Operation Denied. You appear to have no'
' credentials')
except botocore.exceptions.PartialCredentialsError as e:
LOG.debug('Credentials incomplete')
raise CredentialsError(str(e))
except (botocore.exceptions.ValidationError,
botocore.exceptions.ParamValidationError) as e:
raise ValidationError(str(e))
except botocore.exceptions.BotoCoreError as e:
LOG.error('Botocore Error')
raise
except IOError as error:
if hasattr(error.args[0], 'reason') and str(error.args[0].reason) == \
'[Errno -2] Name or service not known':
raise ConnectionError()
LOG.error('Error while contacting Elastic Beanstalk Service')
LOG.debug('error:' + str(error))
raise ServiceError(error)
def _get_delay(attempt_number):
if attempt_number == 1:
return 0
# Exponential backoff
rand_int = random.randrange(0, 2**attempt_number)
delay = rand_int * 0.05 # delay time is 50 ms
LOG.debug('Sleeping for ' + str(delay) + ' seconds.')
return delay
def _get_400_error(response_data, message):
code = response_data['Error']['Code']
LOG.debug('Received a 400 Error')
if code == 'InvalidParameterValue':
return InvalidParameterValueError(message)
elif code == 'InvalidQueryParameter':
return InvalidQueryParameterError(message)
elif code.startswith('Throttling'):
return ThrottlingError(message)
elif code.startswith('ResourceNotFound'):
return NotFoundError(message)
else:
# Not tracking this error
return ServiceError(message, code=code)
class InvalidParameterValueError(ServiceError):
pass
class InvalidQueryParameterError(ServiceError):
pass
class ThrottlingError(ServiceError):
pass
class MaxRetriesError(ServiceError):
pass
|
apache-2.0
| -8,400,662,935,267,153,000
| 31.108025
| 88
| 0.612804
| false
| 4.230582
| false
| false
| false
|
dekked/dynamodb-mock
|
ddbmock/database/storage/sqlite.py
|
1
|
3773
|
# -*- coding: utf-8 -*-
from ..item import Item
from ddbmock import config
import sqlite3, cPickle as pickle
# I know, using global "variable" for this kind of state *is* bad. But it helps
# keeping execution times to a sane value. In particular, this allows to use
# in-memory version of sqlite
conn = sqlite3.connect(config.STORAGE_SQLITE_FILE)
class Store(object):
def __init__(self, name):
""" Initialize the sqlite store
By contract, we know the table name will only contain alphanum chars,
'_', '.' or '-' so that this is ~ safe
:param name: Table name.
"""
conn.execute('''CREATE TABLE IF NOT EXISTS `{}` (
`hash_key` blob NOT NULL,
`range_key` blob NOT NULL,
`data` blob NOT NULL,
PRIMARY KEY (`hash_key`,`range_key`)
);'''.format(name))
conn.commit()
self.name = name
def truncate(self):
"""Perform a full table cleanup. Might be a good idea in tests :)"""
conn.execute('DELETE FROM `{}`'.format(self.name))
conn.commit()
def _get_by_hash_range(self, hash_key, range_key):
request = conn.execute('''SELECT `data` FROM `{}`
WHERE `hash_key`=? AND `range_key`=?'''
.format(self.name),
(hash_key, range_key))
item = request.fetchone()
if item is None:
raise KeyError("No item found at ({}, {})".format(hash_key, range_key))
return pickle.loads(str(item[0]))
def _get_by_hash(self, hash_key):
items = conn.execute('''SELECT * FROM `{}`
WHERE `hash_key`=? '''.format(self.name),
(hash_key, ))
ret = {item[1]:pickle.loads(str(item[2])) for item in items}
if not ret:
raise KeyError("No item found at hash_key={}".format(hash_key))
return ret
def __getitem__(self, (hash_key, range_key)):
"""Get item at (``hash_key``, ``range_key``) or the dict at ``hash_key`` if
``range_key`` is None.
:param key: (``hash_key``, ``range_key``) Tuple. If ``range_key`` is None, all keys under ``hash_key`` are returned
:return: Item or item dict
:raise: KeyError
"""
if range_key is None:
return self._get_by_hash(hash_key)
return self._get_by_hash_range(hash_key, range_key)
def __setitem__(self, (hash_key, range_key), item):
"""Set the item at (``hash_key``, ``range_key``). Both keys must be
defined and valid. By convention, ``range_key`` may be ``False`` to
indicate a ``hash_key`` only key.
:param key: (``hash_key``, ``range_key``) Tuple.
:param item: the actual ``Item`` data structure to store
"""
db_item = buffer(pickle.dumps(item, 2))
conn.execute('''INSERT OR REPLACE INTO `{}` (`hash_key`,`range_key`, `data`)
VALUES (?, ?, ?)'''.format(self.name),
(hash_key, range_key, db_item))
conn.commit()
def __delitem__(self, (hash_key, range_key)):
"""Delete item at key (``hash_key``, ``range_key``)
:raises: KeyError if not found
"""
conn.execute('DELETE FROM `{}` WHERE `hash_key`=? AND `range_key`=?'
.format(self.name), (hash_key, range_key))
def __iter__(self):
""" Iterate all over the table, abstracting the ``hash_key`` and
``range_key`` complexity. Mostly used for ``Scan`` implementation.
"""
items = conn.execute('SELECT `data` FROM `{}`'.format(self.name))
for item in items:
yield pickle.loads(str(item[0]))
|
lgpl-3.0
| 7,474,325,584,043,636,000
| 37.111111
| 123
| 0.536973
| false
| 3.83825
| false
| false
| false
|
uclouvain/osis_louvain
|
base/business/proposal_xls.py
|
1
|
4218
|
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2018 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django.utils.translation import ugettext_lazy as _
from osis_common.document import xls_build
from base.business.learning_unit import get_entity_acronym
from base.business.xls import get_name_or_username
WORKSHEET_TITLE = 'Proposals'
XLS_FILENAME = 'Proposals'
XLS_DESCRIPTION = "List_proposals"
PROPOSAL_TITLES = [str(_('requirement_entity_small')), str(_('code')), str(_('title')), str(_('type')),
str(_('proposal_type')), str(_('proposal_status')), str(_('folder_num')),
str(_('type_declaration_vacant')), str(_('periodicity')), str(_('credits')),
str(_('allocation_entity_small')), str(_('proposal_date'))]
def prepare_xls_content(proposals):
return [extract_xls_data_from_proposal(proposal) for proposal in proposals]
def extract_xls_data_from_proposal(proposal):
return [get_entity_acronym(proposal.learning_unit_year.entities.get('REQUIREMENT_ENTITY')),
proposal.learning_unit_year.acronym,
proposal.learning_unit_year.complete_title,
xls_build.translate(proposal.learning_unit_year.learning_container_year.container_type),
xls_build.translate(proposal.type),
xls_build.translate(proposal.state),
proposal.folder,
xls_build.translate(proposal.learning_unit_year.learning_container_year.type_declaration_vacant),
xls_build.translate(proposal.learning_unit_year.periodicity),
proposal.learning_unit_year.credits,
get_entity_acronym(proposal.learning_unit_year.entities.get('ALLOCATION_ENTITY')),
proposal.date.strftime('%d-%m-%Y')]
def prepare_xls_parameters_list(user, working_sheets_data):
return {xls_build.LIST_DESCRIPTION_KEY: _(XLS_DESCRIPTION),
xls_build.FILENAME_KEY: _(XLS_FILENAME),
xls_build.USER_KEY: get_name_or_username(user),
xls_build.WORKSHEETS_DATA:
[{xls_build.CONTENT_KEY: working_sheets_data,
xls_build.HEADER_TITLES_KEY: PROPOSAL_TITLES,
xls_build.WORKSHEET_TITLE_KEY: _(WORKSHEET_TITLE),
}
]}
def create_xls(user, proposals, filters):
working_sheets_data = prepare_xls_content(proposals)
return xls_build.generate_xls(
xls_build.prepare_xls_parameters_list(working_sheets_data, configure_parameters(user)), filters)
def create_xls_proposal(user, proposals, filters):
return xls_build.generate_xls(prepare_xls_parameters_list(prepare_xls_content(proposals),
configure_parameters(user)), filters)
def configure_parameters(user):
return {xls_build.DESCRIPTION: XLS_DESCRIPTION,
xls_build.USER: get_name_or_username(user),
xls_build.FILENAME: XLS_FILENAME,
xls_build.HEADER_TITLES: PROPOSAL_TITLES,
xls_build.WS_TITLE: WORKSHEET_TITLE}
|
agpl-3.0
| -4,826,060,251,302,619,000
| 45.855556
| 109
| 0.64904
| false
| 3.799099
| false
| false
| false
|
digitalocean/netbox
|
netbox/dcim/tests/test_models.py
|
1
|
20944
|
from django.core.exceptions import ValidationError
from django.test import TestCase
from circuits.models import *
from dcim.choices import *
from dcim.models import *
from tenancy.models import Tenant
class RackGroupTestCase(TestCase):
def test_change_rackgroup_site(self):
"""
Check that all child RackGroups and Racks get updated when a RackGroup is moved to a new Site. Topology:
Site A
- RackGroup A1
- RackGroup A2
- Rack 2
- Rack 1
"""
site_a = Site.objects.create(name='Site A', slug='site-a')
site_b = Site.objects.create(name='Site B', slug='site-b')
rackgroup_a1 = RackGroup(site=site_a, name='RackGroup A1', slug='rackgroup-a1')
rackgroup_a1.save()
rackgroup_a2 = RackGroup(site=site_a, parent=rackgroup_a1, name='RackGroup A2', slug='rackgroup-a2')
rackgroup_a2.save()
rack1 = Rack.objects.create(site=site_a, group=rackgroup_a1, name='Rack 1')
rack2 = Rack.objects.create(site=site_a, group=rackgroup_a2, name='Rack 2')
powerpanel1 = PowerPanel.objects.create(site=site_a, rack_group=rackgroup_a1, name='Power Panel 1')
# Move RackGroup A1 to Site B
rackgroup_a1.site = site_b
rackgroup_a1.save()
# Check that all objects within RackGroup A1 now belong to Site B
self.assertEqual(RackGroup.objects.get(pk=rackgroup_a1.pk).site, site_b)
self.assertEqual(RackGroup.objects.get(pk=rackgroup_a2.pk).site, site_b)
self.assertEqual(Rack.objects.get(pk=rack1.pk).site, site_b)
self.assertEqual(Rack.objects.get(pk=rack2.pk).site, site_b)
self.assertEqual(PowerPanel.objects.get(pk=powerpanel1.pk).site, site_b)
class RackTestCase(TestCase):
def setUp(self):
self.site1 = Site.objects.create(
name='TestSite1',
slug='test-site-1'
)
self.site2 = Site.objects.create(
name='TestSite2',
slug='test-site-2'
)
self.group1 = RackGroup.objects.create(
name='TestGroup1',
slug='test-group-1',
site=self.site1
)
self.group2 = RackGroup.objects.create(
name='TestGroup2',
slug='test-group-2',
site=self.site2
)
self.rack = Rack.objects.create(
name='TestRack1',
facility_id='A101',
site=self.site1,
group=self.group1,
u_height=42
)
self.manufacturer = Manufacturer.objects.create(
name='Acme',
slug='acme'
)
self.device_type = {
'ff2048': DeviceType.objects.create(
manufacturer=self.manufacturer,
model='FrameForwarder 2048',
slug='ff2048'
),
'cc5000': DeviceType.objects.create(
manufacturer=self.manufacturer,
model='CurrentCatapult 5000',
slug='cc5000',
u_height=0
),
}
self.role = {
'Server': DeviceRole.objects.create(
name='Server',
slug='server',
),
'Switch': DeviceRole.objects.create(
name='Switch',
slug='switch',
),
'Console Server': DeviceRole.objects.create(
name='Console Server',
slug='console-server',
),
'PDU': DeviceRole.objects.create(
name='PDU',
slug='pdu',
),
}
def test_rack_device_outside_height(self):
rack1 = Rack(
name='TestRack2',
facility_id='A102',
site=self.site1,
u_height=42
)
rack1.save()
device1 = Device(
name='TestSwitch1',
device_type=DeviceType.objects.get(manufacturer__slug='acme', slug='ff2048'),
device_role=DeviceRole.objects.get(slug='switch'),
site=self.site1,
rack=rack1,
position=43,
face=DeviceFaceChoices.FACE_FRONT,
)
device1.save()
with self.assertRaises(ValidationError):
rack1.clean()
def test_rack_group_site(self):
rack_invalid_group = Rack(
name='TestRack2',
facility_id='A102',
site=self.site1,
u_height=42,
group=self.group2
)
rack_invalid_group.save()
with self.assertRaises(ValidationError):
rack_invalid_group.clean()
def test_mount_single_device(self):
device1 = Device(
name='TestSwitch1',
device_type=DeviceType.objects.get(manufacturer__slug='acme', slug='ff2048'),
device_role=DeviceRole.objects.get(slug='switch'),
site=self.site1,
rack=self.rack,
position=10,
face=DeviceFaceChoices.FACE_REAR,
)
device1.save()
# Validate rack height
self.assertEqual(list(self.rack.units), list(reversed(range(1, 43))))
# Validate inventory (front face)
rack1_inventory_front = self.rack.get_rack_units(face=DeviceFaceChoices.FACE_FRONT)
self.assertEqual(rack1_inventory_front[-10]['device'], device1)
del(rack1_inventory_front[-10])
for u in rack1_inventory_front:
self.assertIsNone(u['device'])
# Validate inventory (rear face)
rack1_inventory_rear = self.rack.get_rack_units(face=DeviceFaceChoices.FACE_REAR)
self.assertEqual(rack1_inventory_rear[-10]['device'], device1)
del(rack1_inventory_rear[-10])
for u in rack1_inventory_rear:
self.assertIsNone(u['device'])
def test_mount_zero_ru(self):
pdu = Device.objects.create(
name='TestPDU',
device_role=self.role.get('PDU'),
device_type=self.device_type.get('cc5000'),
site=self.site1,
rack=self.rack,
position=None,
face='',
)
self.assertTrue(pdu)
def test_change_rack_site(self):
"""
Check that child Devices get updated when a Rack is moved to a new Site.
"""
site_a = Site.objects.create(name='Site A', slug='site-a')
site_b = Site.objects.create(name='Site B', slug='site-b')
manufacturer = Manufacturer.objects.create(name='Manufacturer 1', slug='manufacturer-1')
device_type = DeviceType.objects.create(
manufacturer=manufacturer, model='Device Type 1', slug='device-type-1'
)
device_role = DeviceRole.objects.create(
name='Device Role 1', slug='device-role-1', color='ff0000'
)
# Create Rack1 in Site A
rack1 = Rack.objects.create(site=site_a, name='Rack 1')
# Create Device1 in Rack1
device1 = Device.objects.create(site=site_a, rack=rack1, device_type=device_type, device_role=device_role)
# Move Rack1 to Site B
rack1.site = site_b
rack1.save()
# Check that Device1 is now assigned to Site B
self.assertEqual(Device.objects.get(pk=device1.pk).site, site_b)
class DeviceTestCase(TestCase):
def setUp(self):
self.site = Site.objects.create(name='Test Site 1', slug='test-site-1')
manufacturer = Manufacturer.objects.create(name='Test Manufacturer 1', slug='test-manufacturer-1')
self.device_type = DeviceType.objects.create(
manufacturer=manufacturer, model='Test Device Type 1', slug='test-device-type-1'
)
self.device_role = DeviceRole.objects.create(
name='Test Device Role 1', slug='test-device-role-1', color='ff0000'
)
# Create DeviceType components
ConsolePortTemplate(
device_type=self.device_type,
name='Console Port 1'
).save()
ConsoleServerPortTemplate(
device_type=self.device_type,
name='Console Server Port 1'
).save()
ppt = PowerPortTemplate(
device_type=self.device_type,
name='Power Port 1',
maximum_draw=1000,
allocated_draw=500
)
ppt.save()
PowerOutletTemplate(
device_type=self.device_type,
name='Power Outlet 1',
power_port=ppt,
feed_leg=PowerOutletFeedLegChoices.FEED_LEG_A
).save()
InterfaceTemplate(
device_type=self.device_type,
name='Interface 1',
type=InterfaceTypeChoices.TYPE_1GE_FIXED,
mgmt_only=True
).save()
rpt = RearPortTemplate(
device_type=self.device_type,
name='Rear Port 1',
type=PortTypeChoices.TYPE_8P8C,
positions=8
)
rpt.save()
FrontPortTemplate(
device_type=self.device_type,
name='Front Port 1',
type=PortTypeChoices.TYPE_8P8C,
rear_port=rpt,
rear_port_position=2
).save()
DeviceBayTemplate(
device_type=self.device_type,
name='Device Bay 1'
).save()
def test_device_creation(self):
"""
Ensure that all Device components are copied automatically from the DeviceType.
"""
d = Device(
site=self.site,
device_type=self.device_type,
device_role=self.device_role,
name='Test Device 1'
)
d.save()
ConsolePort.objects.get(
device=d,
name='Console Port 1'
)
ConsoleServerPort.objects.get(
device=d,
name='Console Server Port 1'
)
pp = PowerPort.objects.get(
device=d,
name='Power Port 1',
maximum_draw=1000,
allocated_draw=500
)
PowerOutlet.objects.get(
device=d,
name='Power Outlet 1',
power_port=pp,
feed_leg=PowerOutletFeedLegChoices.FEED_LEG_A
)
Interface.objects.get(
device=d,
name='Interface 1',
type=InterfaceTypeChoices.TYPE_1GE_FIXED,
mgmt_only=True
)
rp = RearPort.objects.get(
device=d,
name='Rear Port 1',
type=PortTypeChoices.TYPE_8P8C,
positions=8
)
FrontPort.objects.get(
device=d,
name='Front Port 1',
type=PortTypeChoices.TYPE_8P8C,
rear_port=rp,
rear_port_position=2
)
DeviceBay.objects.get(
device=d,
name='Device Bay 1'
)
def test_multiple_unnamed_devices(self):
device1 = Device(
site=self.site,
device_type=self.device_type,
device_role=self.device_role,
name=''
)
device1.save()
device2 = Device(
site=device1.site,
device_type=device1.device_type,
device_role=device1.device_role,
name=''
)
device2.full_clean()
device2.save()
self.assertEqual(Device.objects.filter(name='').count(), 2)
def test_device_duplicate_names(self):
device1 = Device(
site=self.site,
device_type=self.device_type,
device_role=self.device_role,
name='Test Device 1'
)
device1.save()
device2 = Device(
site=device1.site,
device_type=device1.device_type,
device_role=device1.device_role,
name=device1.name
)
# Two devices assigned to the same Site and no Tenant should fail validation
with self.assertRaises(ValidationError):
device2.full_clean()
tenant = Tenant.objects.create(name='Test Tenant 1', slug='test-tenant-1')
device1.tenant = tenant
device1.save()
device2.tenant = tenant
# Two devices assigned to the same Site and the same Tenant should fail validation
with self.assertRaises(ValidationError):
device2.full_clean()
device2.tenant = None
# Two devices assigned to the same Site and different Tenants should pass validation
device2.full_clean()
device2.save()
class CableTestCase(TestCase):
def setUp(self):
site = Site.objects.create(name='Test Site 1', slug='test-site-1')
manufacturer = Manufacturer.objects.create(name='Test Manufacturer 1', slug='test-manufacturer-1')
devicetype = DeviceType.objects.create(
manufacturer=manufacturer, model='Test Device Type 1', slug='test-device-type-1'
)
devicerole = DeviceRole.objects.create(
name='Test Device Role 1', slug='test-device-role-1', color='ff0000'
)
self.device1 = Device.objects.create(
device_type=devicetype, device_role=devicerole, name='TestDevice1', site=site
)
self.device2 = Device.objects.create(
device_type=devicetype, device_role=devicerole, name='TestDevice2', site=site
)
self.interface1 = Interface.objects.create(device=self.device1, name='eth0')
self.interface2 = Interface.objects.create(device=self.device2, name='eth0')
self.interface3 = Interface.objects.create(device=self.device2, name='eth1')
self.cable = Cable(termination_a=self.interface1, termination_b=self.interface2)
self.cable.save()
self.power_port1 = PowerPort.objects.create(device=self.device2, name='psu1')
self.patch_pannel = Device.objects.create(
device_type=devicetype, device_role=devicerole, name='TestPatchPannel', site=site
)
self.rear_port1 = RearPort.objects.create(device=self.patch_pannel, name='RP1', type='8p8c')
self.front_port1 = FrontPort.objects.create(
device=self.patch_pannel, name='FP1', type='8p8c', rear_port=self.rear_port1, rear_port_position=1
)
self.rear_port2 = RearPort.objects.create(device=self.patch_pannel, name='RP2', type='8p8c', positions=2)
self.front_port2 = FrontPort.objects.create(
device=self.patch_pannel, name='FP2', type='8p8c', rear_port=self.rear_port2, rear_port_position=1
)
self.rear_port3 = RearPort.objects.create(device=self.patch_pannel, name='RP3', type='8p8c', positions=3)
self.front_port3 = FrontPort.objects.create(
device=self.patch_pannel, name='FP3', type='8p8c', rear_port=self.rear_port3, rear_port_position=1
)
self.rear_port4 = RearPort.objects.create(device=self.patch_pannel, name='RP4', type='8p8c', positions=3)
self.front_port4 = FrontPort.objects.create(
device=self.patch_pannel, name='FP4', type='8p8c', rear_port=self.rear_port4, rear_port_position=1
)
self.provider = Provider.objects.create(name='Provider 1', slug='provider-1')
self.circuittype = CircuitType.objects.create(name='Circuit Type 1', slug='circuit-type-1')
self.circuit = Circuit.objects.create(provider=self.provider, type=self.circuittype, cid='1')
self.circuittermination1 = CircuitTermination.objects.create(circuit=self.circuit, site=site, term_side='A')
self.circuittermination2 = CircuitTermination.objects.create(circuit=self.circuit, site=site, term_side='Z')
def test_cable_creation(self):
"""
When a new Cable is created, it must be cached on either termination point.
"""
interface1 = Interface.objects.get(pk=self.interface1.pk)
interface2 = Interface.objects.get(pk=self.interface2.pk)
self.assertEqual(self.cable.termination_a, interface1)
self.assertEqual(interface1._cable_peer, interface2)
self.assertEqual(self.cable.termination_b, interface2)
self.assertEqual(interface2._cable_peer, interface1)
def test_cable_deletion(self):
"""
When a Cable is deleted, the `cable` field on its termination points must be nullified. The str() method
should still return the PK of the string even after being nullified.
"""
self.cable.delete()
self.assertIsNone(self.cable.pk)
self.assertNotEqual(str(self.cable), '#None')
interface1 = Interface.objects.get(pk=self.interface1.pk)
self.assertIsNone(interface1.cable)
self.assertIsNone(interface1._cable_peer)
interface2 = Interface.objects.get(pk=self.interface2.pk)
self.assertIsNone(interface2.cable)
self.assertIsNone(interface2._cable_peer)
def test_cabletermination_deletion(self):
"""
When a CableTermination object is deleted, its attached Cable (if any) must also be deleted.
"""
self.interface1.delete()
cable = Cable.objects.filter(pk=self.cable.pk).first()
self.assertIsNone(cable)
def test_cable_validates_compatible_types(self):
"""
The clean method should have a check to ensure only compatible port types can be connected by a cable
"""
# An interface cannot be connected to a power port
cable = Cable(termination_a=self.interface1, termination_b=self.power_port1)
with self.assertRaises(ValidationError):
cable.clean()
def test_cable_cannot_have_the_same_terminination_on_both_ends(self):
"""
A cable cannot be made with the same A and B side terminations
"""
cable = Cable(termination_a=self.interface1, termination_b=self.interface1)
with self.assertRaises(ValidationError):
cable.clean()
def test_cable_front_port_cannot_connect_to_corresponding_rear_port(self):
"""
A cable cannot connect a front port to its corresponding rear port
"""
cable = Cable(termination_a=self.front_port1, termination_b=self.rear_port1)
with self.assertRaises(ValidationError):
cable.clean()
def test_cable_cannot_terminate_to_an_existing_connection(self):
"""
Either side of a cable cannot be terminated when that side already has a connection
"""
# Try to create a cable with the same interface terminations
cable = Cable(termination_a=self.interface2, termination_b=self.interface1)
with self.assertRaises(ValidationError):
cable.clean()
def test_rearport_connections(self):
"""
Test various combinations of RearPort connections.
"""
# Connecting a single-position RearPort to a multi-position RearPort is ok
Cable(termination_a=self.rear_port1, termination_b=self.rear_port2).full_clean()
# Connecting a single-position RearPort to an Interface is ok
Cable(termination_a=self.rear_port1, termination_b=self.interface3).full_clean()
# Connecting a single-position RearPort to a CircuitTermination is ok
Cable(termination_a=self.rear_port1, termination_b=self.circuittermination1).full_clean()
# Connecting a multi-position RearPort to another RearPort with the same number of positions is ok
Cable(termination_a=self.rear_port3, termination_b=self.rear_port4).full_clean()
# Connecting a multi-position RearPort to an Interface is ok
Cable(termination_a=self.rear_port2, termination_b=self.interface3).full_clean()
# Connecting a multi-position RearPort to a CircuitTermination is ok
Cable(termination_a=self.rear_port2, termination_b=self.circuittermination1).full_clean()
# Connecting a two-position RearPort to a three-position RearPort is NOT ok
with self.assertRaises(
ValidationError,
msg='Connecting a 2-position RearPort to a 3-position RearPort should fail'
):
Cable(termination_a=self.rear_port2, termination_b=self.rear_port3).full_clean()
def test_cable_cannot_terminate_to_a_virtual_interface(self):
"""
A cable cannot terminate to a virtual interface
"""
virtual_interface = Interface(device=self.device1, name="V1", type=InterfaceTypeChoices.TYPE_VIRTUAL)
cable = Cable(termination_a=self.interface2, termination_b=virtual_interface)
with self.assertRaises(ValidationError):
cable.clean()
def test_cable_cannot_terminate_to_a_wireless_interface(self):
"""
A cable cannot terminate to a wireless interface
"""
wireless_interface = Interface(device=self.device1, name="W1", type=InterfaceTypeChoices.TYPE_80211A)
cable = Cable(termination_a=self.interface2, termination_b=wireless_interface)
with self.assertRaises(ValidationError):
cable.clean()
|
apache-2.0
| 6,810,999,451,422,508,000
| 35.551483
| 116
| 0.603419
| false
| 3.795578
| true
| false
| false
|
adamwiggins/cocos2d
|
tools/skeleton/skeleton_editor.py
|
2
|
5454
|
import os, sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../..'))
#
import math
from math import pi, atan
import cPickle
import glob
from optparse import OptionParser
import pyglet
from pyglet.gl import *
from pyglet.window import key
import cocos
from cocos.director import director
from cocos.sprite import Sprite
from cocos.actions import CallFuncS, CallFunc, IntervalAction
from cocos import euclid
from animator import *
import ui
from cocos.skeleton import Bone, Skeleton, Skin, Animation, Animate, ColorSkin, \
BitmapSkin
class SkinControl(ui.BallWidget):
def __init__(self, skin, idx, bone, delta):
super(SkinControl, self).__init__(7, (0,0,255,255))
self.skin = skin
self.idx = idx
self.bone = bone
self.position = (bone.get_start()+bone.get_end())/2 + delta
def on_dragged(self, dx, dy):
super(SkinControl, self).on_dragged(dx, dy)
self.skin.move(self.idx, dx, dy)
class BonePositionControl(ui.BallWidget):
def __init__(self, bone, delta):
super(BonePositionControl, self).__init__(10, (0,255,0,255))
self.bone = bone
self.position = (bone.get_start()) + delta
def on_dragged(self, dx, dy):
super(BonePositionControl, self).on_dragged(dx, dy)
self.bone.move(dx, dy)
class SkeletonEditorUI(ui.UILayer):
def __init__(self, skeleton, skin):
super(SkeletonEditorUI, self).__init__()
sk_file = imp.load_source("skeleton", args[0])
if skin is None:
self.user_skin = None
else:
skin_data = imp.load_source("skin", args[1]).skin
self.skin_filename = skin
self.user_skin = skin_data
self.skeleton_file = skeleton
self.skeleton = sk_file.skeleton
self.add_skin_for_skeleton(self.skeleton, (255,255,255,255))
def add_skin_for_skeleton(self, skeleton, color, z=-1, editable=False):
if self.user_skin:
skin = BitmapSkin(skeleton, self.user_skin, color[3])
else:
skin = ColorSkin(skeleton, color)
self.skin = skin
self.add( skin, z=z )
xs, ys = director.get_window_size()
skin.position = xs/2-6, ys/2-11
self.generate_control_points()
def on_key_press(self, k, mod):
if k == key.S:
f = open(self.skin_filename, "w")
f.write("\nskin = [\n")
for p in self.skin.skin_parts:
f.write(" %s,\n"%(p,))
f.write(" ]\n")
f.close()
f = open(self.skeleton_file, "w")
f.write("""from cocos.skeleton import Bone, Skeleton\n
def Point2(*args): return args\n
root_bone = %s
skeleton = Skeleton( root_bone )"""%self.skeleton.bone.repr())
f.close()
def update_visual(self):
self.add_skin_for_skeleton(self.skeleton, -1, True)
def clean_control_points(self):
cps = [ cp for cp in self.get_children() if isinstance(cp, ui.BallWidget) ]
for cp in cps:
self.remove(cp)
def clean_skins(self):
skins = [ cp for cp in self.get_children() if isinstance(cp, Skin) ]
for skin in skins:
self.remove(skin)
def on_mouse_release(self, *args):
if self.dragging:
self.clean_control_points()
self.generate_control_points()
super(SkeletonEditorUI, self).on_mouse_release(*args)
def on_mouse_drag(self, x, y, dx, dy, button, modifiers):
if self.hovering:
cps = [ cp for cp in self.get_children()
if isinstance(cp, ui.BallWidget) and cp != self.hovering ]
for cp in cps:
self.remove(cp)
super(SkeletonEditorUI, self).on_mouse_drag(x, y, dx, dy, button, modifiers)
def generate_control_points(self):
skinpos = euclid.Point2(*self.skin.position)
for cp in self.skeleton.get_control_points():
if isinstance(cp, Skeleton):
self.add( SkeletonControl(cp, skinpos), z=3 )
else:
self.add( BoneControl(cp, skinpos), z=4 )
bones = self.skeleton.visit_children(lambda bone: (bone.label, bone))
bones = dict(bones)
for bone in bones.values():
self.add( BonePositionControl( bone, skinpos ), z=2 )
for idx, name in self.skin.get_control_points():
self.add( SkinControl(self.skin, idx, bones[name], skinpos ), z=5)
if __name__ == "__main__":
import sys, imp
director.init()
parser = OptionParser()
parser.add_option("-b", "--background", dest="background",
help="use file as background", default=False, metavar="FILE")
parser.add_option("-s", "--scale", dest="scale",
help="scale image by", default=1, metavar="SCALE")
(options, args) = parser.parse_args()
def usage():
return "python animator.py skeleton.py skin.py"
if len(args) not in [2]:
print usage()
print parser.error("incorrect number of arguments")
sys.exit()
animator = cocos.scene.Scene(SkeletonEditorUI(args[0], args[1]))
if options.background:
background = cocos.sprite.Sprite(options.background)
x,y = director.get_window_size()
animator.add( background, z=-10 )
background.position = x/2, y/2
background.scale = float(options.scale)
director.run(animator)
|
bsd-3-clause
| 6,242,549,518,128,177,000
| 31.464286
| 84
| 0.595526
| false
| 3.434509
| false
| false
| false
|
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
|
orcid_api_v3/models/work_group_v20.py
|
1
|
5166
|
# coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.external_i_ds_v20 import ExternalIDsV20 # noqa: F401,E501
from orcid_api_v3.models.last_modified_date_v20 import LastModifiedDateV20 # noqa: F401,E501
from orcid_api_v3.models.work_summary_v20 import WorkSummaryV20 # noqa: F401,E501
class WorkGroupV20(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'last_modified_date': 'LastModifiedDateV20',
'external_ids': 'ExternalIDsV20',
'work_summary': 'list[WorkSummaryV20]'
}
attribute_map = {
'last_modified_date': 'last-modified-date',
'external_ids': 'external-ids',
'work_summary': 'work-summary'
}
def __init__(self, last_modified_date=None, external_ids=None, work_summary=None): # noqa: E501
"""WorkGroupV20 - a model defined in Swagger""" # noqa: E501
self._last_modified_date = None
self._external_ids = None
self._work_summary = None
self.discriminator = None
if last_modified_date is not None:
self.last_modified_date = last_modified_date
if external_ids is not None:
self.external_ids = external_ids
if work_summary is not None:
self.work_summary = work_summary
@property
def last_modified_date(self):
"""Gets the last_modified_date of this WorkGroupV20. # noqa: E501
:return: The last_modified_date of this WorkGroupV20. # noqa: E501
:rtype: LastModifiedDateV20
"""
return self._last_modified_date
@last_modified_date.setter
def last_modified_date(self, last_modified_date):
"""Sets the last_modified_date of this WorkGroupV20.
:param last_modified_date: The last_modified_date of this WorkGroupV20. # noqa: E501
:type: LastModifiedDateV20
"""
self._last_modified_date = last_modified_date
@property
def external_ids(self):
"""Gets the external_ids of this WorkGroupV20. # noqa: E501
:return: The external_ids of this WorkGroupV20. # noqa: E501
:rtype: ExternalIDsV20
"""
return self._external_ids
@external_ids.setter
def external_ids(self, external_ids):
"""Sets the external_ids of this WorkGroupV20.
:param external_ids: The external_ids of this WorkGroupV20. # noqa: E501
:type: ExternalIDsV20
"""
self._external_ids = external_ids
@property
def work_summary(self):
"""Gets the work_summary of this WorkGroupV20. # noqa: E501
:return: The work_summary of this WorkGroupV20. # noqa: E501
:rtype: list[WorkSummaryV20]
"""
return self._work_summary
@work_summary.setter
def work_summary(self, work_summary):
"""Sets the work_summary of this WorkGroupV20.
:param work_summary: The work_summary of this WorkGroupV20. # noqa: E501
:type: list[WorkSummaryV20]
"""
self._work_summary = work_summary
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WorkGroupV20, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WorkGroupV20):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
mit
| 2,386,846,838,889,565,700
| 30.120482
| 119
| 0.590205
| false
| 3.863874
| false
| false
| false
|
argriffing/arbtkf91
|
repro/data_source.py
|
1
|
1854
|
"""
yield stuff from a hardcoded data source
"""
from __future__ import print_function, division
import os
import numpy as np
from numpy.testing import assert_array_equal, assert_equal
from Bio import SeqIO
__all__ = ['gen_files', 'gen_sequence_pairs']
#mypath = os.path.realpath('../../stamatakis/benchMark_data')
def _normalized_seq(s):
return ''.join(_normalized_chr(c) for c in s)
def _normalized_chr(c):
if c in 'ACGT':
return c
elif c.isalpha:
return 'A'
else:
msg = ('weird character:', c)
raise Exception(msg)
def gen_sequence_pairs(fin, force_acgt=False):
# yield (10 choose 2) = 45 nucleotide sequence pairs
fasta_objects = list(SeqIO.parse(fin, 'fasta'))
sequences = [str(x.seq) for x in fasta_objects]
available = len(sequences)
requested = 10
indices = _select_indices(available, requested)
selection = [sequences[i] for i in indices]
assert_equal(len(selection), requested)
k = 0
for i in range(requested):
for j in range(i):
a = selection[i]
b = selection[j]
if force_acgt:
a = _normalized_seq(a)
b = _normalized_seq(b)
yield a, b
k += 1
assert_equal(k, 45)
def gen_files(data_path):
# yield (name, handle) pairs
for filename in os.listdir(data_path):
if 'unaligned' in filename:
fullpath = os.path.join(data_path, filename)
with open(fullpath) as fin:
yield filename, fin
def _select_indices(available, requested):
incr = available // requested
return [i*incr for i in range(requested)]
def test():
indices = _select_indices(60, 10)
assert_array_equal(indices[:3], [0, 6, 12])
assert_array_equal(indices[-1:], [54])
if __name__ == '__main__':
test()
|
gpl-2.0
| 2,098,660,419,289,982,200
| 24.75
| 61
| 0.600863
| false
| 3.458955
| false
| false
| false
|
Scriptkiddi/Ankipubsub-Client
|
pubsub/gui/auto_gen/ankipubsub_settings.py
|
1
|
2166
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ankipubsub_settings.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(173, 169)
self.Login = QtGui.QPushButton(Form)
self.Login.setGeometry(QtCore.QRect(30, 120, 92, 27))
self.Login.setObjectName(_fromUtf8("Login"))
self.label = QtGui.QLabel(Form)
self.label.setGeometry(QtCore.QRect(10, 20, 171, 16))
self.label.setObjectName(_fromUtf8("label"))
self.label_2 = QtGui.QLabel(Form)
self.label_2.setGeometry(QtCore.QRect(10, 70, 141, 16))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.username = QtGui.QLineEdit(Form)
self.username.setGeometry(QtCore.QRect(20, 40, 113, 25))
self.username.setObjectName(_fromUtf8("username"))
self.password = QtGui.QLineEdit(Form)
self.password.setGeometry(QtCore.QRect(20, 90, 113, 25))
self.password.setObjectName(_fromUtf8("password"))
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "AnkiPubSub Settings", None))
self.Login.setText(_translate("Form", "Login", None))
self.label.setText(_translate("Form", "Insert your Username:", None))
self.label_2.setText(_translate("Form", "Insert your Password:", None))
self.username.setText(_translate("Form", "Username", None))
self.password.setText(_translate("Form", "Password", None))
|
gpl-3.0
| 377,500,660,763,704,260
| 38.381818
| 79
| 0.672207
| false
| 3.786713
| false
| false
| false
|
Squishymedia/feedingdb
|
src/feeddb/feed/migrations/0005_name2title_drop_name.py
|
1
|
36453
|
from south.db import db
from django.db import models
from feeddb.feed.models import *
class Migration:
def forwards(self, orm):
# Deleting field 'Trial.name'
db.delete_column('feed_trial', 'name')
# Deleting field 'Experiment.name'
db.delete_column('feed_experiment', 'name')
# Deleting field 'Session.name'
db.delete_column('feed_session', 'name')
# Deleting field 'Study.name'
db.delete_column('feed_study', 'name')
# Changing field 'Trial.title'
# (to signature: django.db.models.fields.CharField(default='new Trial - edit this', max_length=255))
db.alter_column('feed_trial', 'title', orm['feed.trial:title'])
# Changing field 'Experiment.title'
# (to signature: django.db.models.fields.CharField(default='new Experiment - edit this', max_length=255))
db.alter_column('feed_experiment', 'title', orm['feed.experiment:title'])
# Changing field 'Session.title'
# (to signature: django.db.models.fields.CharField(default='new Recording Session - edit this', max_length=255))
db.alter_column('feed_session', 'title', orm['feed.session:title'])
# Changing field 'Study.title'
# (to signature: django.db.models.fields.CharField(max_length=255))
db.alter_column('feed_study', 'title', orm['feed.study:title'])
def backwards(self, orm):
# Adding field 'Trial.name'
db.add_column('feed_trial', 'name', orm['feed.trial:name'])
# Adding field 'Experiment.name'
db.add_column('feed_experiment', 'name', orm['feed.experiment:name'])
# Adding field 'Session.name'
db.add_column('feed_session', 'name', orm['feed.session:name'])
# Adding field 'Study.name'
db.add_column('feed_study', 'name', orm['feed.study:name'])
# Changing field 'Trial.title'
# (to signature: django.db.models.fields.CharField(default='new Trial - edit this', max_length=255, null=True))
db.alter_column('feed_trial', 'title', orm['feed.trial:title'])
# Changing field 'Experiment.title'
# (to signature: django.db.models.fields.CharField(default='new Experiment - edit this', max_length=255, null=True))
db.alter_column('feed_experiment', 'title', orm['feed.experiment:title'])
# Changing field 'Session.title'
# (to signature: django.db.models.fields.CharField(default='new Recording Session - edit this', max_length=255, null=True))
db.alter_column('feed_session', 'title', orm['feed.session:title'])
# Changing field 'Study.title'
# (to signature: django.db.models.fields.CharField(max_length=255, null=True))
db.alter_column('feed_study', 'title', orm['feed.study:title'])
models = {
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'unique': 'True'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '30', 'unique': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'feed.anteriorposterioraxis': {
'controlled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'anteriorposterioraxis_related'", 'blank': 'True', 'null': 'True', 'to': "orm['auth.User']"}),
'deprecated': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
'feed.behavior': {
'controlled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'behavior_related'", 'blank': 'True', 'null': 'True', 'to': "orm['auth.User']"}),
'deprecated': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
'feed.channel': {
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'channel_related'", 'blank': 'True', 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'rate': ('django.db.models.fields.IntegerField', [], {}),
'setup': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feed.Setup']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
'feed.channellineup': {
'channel': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feed.Channel']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'channellineup_related'", 'blank': 'True', 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {}),
'session': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feed.Session']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
'feed.depthaxis': {
'controlled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'depthaxis_related'", 'blank': 'True', 'null': 'True', 'to': "orm['auth.User']"}),
'deprecated': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
'feed.developmentstage': {
'controlled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'developmentstage_related'", 'blank': 'True', 'null': 'True', 'to': "orm['auth.User']"}),
'deprecated': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
'feed.dorsalventralaxis': {
'controlled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'dorsalventralaxis_related'", 'blank': 'True', 'null': 'True', 'to': "orm['auth.User']"}),
'deprecated': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
'feed.electrodetype': {
'controlled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'electrodetype_related'", 'blank': 'True', 'null': 'True', 'to': "orm['auth.User']"}),
'deprecated': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
'feed.emgchannel': {
'channel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['feed.Channel']", 'unique': 'True', 'primary_key': 'True'}),
'emg_filtering': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feed.Emgfiltering']"}),
'emg_unit': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feed.Emgunit']"}),
'sensor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feed.EmgSensor']"})
},
'feed.emgelectrode': {
'axisap': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feed.AnteriorPosteriorAxis']", 'null': 'True', 'blank': 'True'}),
'axisdepth': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feed.DepthAxis']", 'null': 'True', 'blank': 'True'}),
'axisdv': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feed.DorsalVentralAxis']", 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'emgelectrode_related'", 'blank': 'True', 'null': 'True', 'to': "orm['auth.User']"}),
'electrode_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feed.ElectrodeType']", 'null': 'True', 'blank': 'True'}),
'emg_filtering': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feed.Emgfiltering']"}),
'emg_unit': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feed.Emgunit']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'muscle': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feed.Muscle']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'rate': ('django.db.models.fields.IntegerField', [], {}),
'setup': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feed.Setup']"}),
'side': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feed.Side']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
'feed.emgfiltering': {
'controlled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'emgfiltering_related'", 'blank': 'True', 'null': 'True', 'to': "orm['auth.User']"}),
'deprecated': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
'feed.emgsensor': {
'axisap': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feed.AnteriorPosteriorAxis']", 'null': 'True', 'blank': 'True'}),
'axisdepth': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feed.DepthAxis']", 'null': 'True', 'blank': 'True'}),
'axisdv': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feed.DorsalVentralAxis']", 'null': 'True', 'blank': 'True'}),
'electrode_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feed.ElectrodeType']", 'null': 'True', 'blank': 'True'}),
'muscle': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feed.Muscle']"}),
'sensor_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['feed.Sensor']", 'unique': 'True', 'primary_key': 'True'}),
'side': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feed.Side']"})
},
'feed.emgsetup': {
'preamplifier': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'setup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['feed.Setup']", 'unique': 'True', 'primary_key': 'True'})
},
'feed.emgunit': {
'controlled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'emgunit_related'", 'blank': 'True', 'null': 'True', 'to': "orm['auth.User']"}),
'deprecated': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
'feed.experiment': {
'accession': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'bookkeeping': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'experiment_related'", 'blank': 'True', 'null': 'True', 'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {}),
'end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'impl_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'study': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feed.Study']"}),
'subj_age': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '19', 'decimal_places': '5', 'blank': 'True'}),
'subj_devstage': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feed.DevelopmentStage']"}),
'subj_tooth': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'subj_weight': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '19', 'decimal_places': '5', 'blank': 'True'}),
'subject': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feed.Subject']"}),
'subject_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'default': "'new Experiment - edit this'", 'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
'feed.illustration': {
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'illustration_related'", 'blank': 'True', 'null': 'True', 'to': "orm['auth.User']"}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feed.Experiment']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'picture': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'setup': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feed.Setup']", 'null': 'True', 'blank': 'True'}),
'subject': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feed.Subject']", 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
'feed.muscle': {
'controlled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'muscle_related'", 'blank': 'True', 'null': 'True', 'to': "orm['auth.User']"}),
'deprecated': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
'feed.restraint': {
'controlled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'restraint_related'", 'blank': 'True', 'null': 'True', 'to': "orm['auth.User']"}),
'deprecated': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
'feed.sensor': {
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sensor_related'", 'blank': 'True', 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'setup': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feed.Setup']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
'feed.session': {
'accession': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'bookkeeping': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'channels': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['feed.Channel']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'session_related'", 'blank': 'True', 'null': 'True', 'to': "orm['auth.User']"}),
'end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feed.Experiment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {}),
'start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'subj_anesthesia_sedation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'subj_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'subj_restraint': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feed.Restraint']"}),
'title': ('django.db.models.fields.CharField', [], {'default': "'new Recording Session - edit this'", 'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
'feed.setup': {
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'setup_related'", 'blank': 'True', 'null': 'True', 'to': "orm['auth.User']"}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feed.Experiment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'technique': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feed.Technique']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
'feed.side': {
'controlled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'side_related'", 'blank': 'True', 'null': 'True', 'to': "orm['auth.User']"}),
'deprecated': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
'feed.sonochannel': {
'channel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['feed.Channel']", 'unique': 'True', 'primary_key': 'True'}),
'crystal1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'crystals1_related'", 'to': "orm['feed.SonoSensor']"}),
'crystal2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'crystals2_related'", 'to': "orm['feed.SonoSensor']"}),
'sono_unit': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feed.Sonounit']"})
},
'feed.sonosensor': {
'axisap': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feed.AnteriorPosteriorAxis']", 'null': 'True', 'blank': 'True'}),
'axisdepth': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feed.DepthAxis']", 'null': 'True', 'blank': 'True'}),
'axisdv': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feed.DorsalVentralAxis']", 'null': 'True', 'blank': 'True'}),
'muscle': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feed.Muscle']"}),
'sensor_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['feed.Sensor']", 'unique': 'True', 'primary_key': 'True'}),
'side': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feed.Side']"})
},
'feed.sonosetup': {
'setup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['feed.Setup']", 'unique': 'True', 'primary_key': 'True'}),
'sonomicrometer': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'feed.sonounit': {
'controlled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sonounit_related'", 'blank': 'True', 'null': 'True', 'to': "orm['auth.User']"}),
'deprecated': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
'feed.study': {
'accession': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'approval_secured': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'bookkeeping': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'study_related'", 'blank': 'True', 'null': 'True', 'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {}),
'end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'funding_agency': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
'feed.studyprivate': {
'approval': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'studyprivate_related'", 'blank': 'True', 'null': 'True', 'to': "orm['auth.User']"}),
'funding': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lab': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'pi': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'study': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feed.Study']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
'feed.subject': {
'breed': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subject_related'", 'blank': 'True', 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'sex': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'study': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feed.Study']"}),
'taxon': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feed.Taxon']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
'feed.taxon': {
'common_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'controlled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taxon_related'", 'blank': 'True', 'null': 'True', 'to': "orm['auth.User']"}),
'deprecated': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'genus': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'species': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
'feed.technique': {
'controlled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'technique_related'", 'blank': 'True', 'null': 'True', 'to': "orm['auth.User']"}),
'deprecated': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
'feed.trial': {
'accession': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'behavior_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'behavior_primary': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feed.Behavior']"}),
'behavior_secondary': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'bookkeeping': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'claimed_duration': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '4', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'trial_related'", 'blank': 'True', 'null': 'True', 'to': "orm['auth.User']"}),
'end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'food_property': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'food_size': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'food_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {}),
'session': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feed.Session']"}),
'start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'subj_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'subj_treatment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'default': "'new Trial - edit this'", 'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {}),
'waveform_picture': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['feed']
|
gpl-3.0
| -1,834,002,616,285,841,700
| 81.659864
| 189
| 0.551093
| false
| 3.606351
| false
| false
| false
|
dashea/redhat-upgrade-tool
|
redhat_upgrade_tool/sysprep.py
|
1
|
8052
|
# sysprep.py - utility functions for system prep
#
# Copyright (C) 2012 Red Hat Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Will Woods <wwoods@redhat.com>
import os, glob
from shutil import copy2
from . import _
from . import cachedir, packagedir, packagelist, update_img_dir
from . import upgradeconf, upgradelink, upgraderoot
from .media import write_systemd_unit
from .util import listdir, mkdir_p, rm_f, rm_rf, is_selinux_enabled, kernelver
from .conf import Config
from . import boot
import logging
log = logging.getLogger(__package__+".sysprep")
upgrade_target_requires = "/lib/systemd/system/system-upgrade.target.requires"
def setup_cleanup_post():
'''Set a flag in upgrade.conf to be read by preupgrade-assistant,
signalling it to cleanup old packages in the post scripts.'''
with Config(upgradeconf) as conf:
conf.set('postupgrade', 'cleanup', 'True')
def link_pkgs(pkgs):
'''link the named pkgs into packagedir, overwriting existing files.
also removes any .rpm files in packagedir that aren't in pkgs.
finally, write a list of packages to upgrade and a list of dirs
to clean up after successful upgrade.'''
log.info("linking required packages into packagedir")
log.info("packagedir = %s", packagedir)
mkdir_p(packagedir)
pkgbasenames = set()
for pkg in pkgs:
pkgpath = pkg.localPkg()
if pkg.remote_url.startswith("file://"):
pkgbasename = "media/%s" % pkg.relativepath
pkgbasenames.add(pkgbasename)
continue
if not os.path.exists(pkgpath):
log.warning("%s missing", pkgpath)
continue
pkgbasename = os.path.basename(pkgpath)
pkgbasenames.add(pkgbasename)
target = os.path.join(packagedir, pkgbasename)
if os.path.exists(target) and os.lstat(pkgpath) == os.lstat(target):
log.info("%s already in packagedir", pkgbasename)
continue
else:
if os.path.isdir(target):
log.info("deleting weirdo directory named %s", pkgbasename)
rm_rf(target)
elif os.path.exists(target):
os.remove(target)
try:
os.link(pkgpath, target)
except OSError as e:
if e.errno == 18:
copy2(pkgpath, target)
else:
raise
# remove spurious / leftover RPMs
for f in os.listdir(packagedir):
if f.endswith(".rpm") and f not in pkgbasenames:
os.remove(os.path.join(packagedir, f))
# write packagelist
with open(packagelist, 'w') as outf:
outf.writelines(p+'\n' for p in pkgbasenames)
# write cleanup data
with Config(upgradeconf) as conf:
# packagedir should probably be last, since it contains upgradeconf
cleanupdirs = [cachedir, packagedir]
conf.set("cleanup", "dirs", ';'.join(cleanupdirs))
def setup_upgradelink():
log.info("setting up upgrade symlink: %s->%s", upgradelink, packagedir)
try:
os.remove(upgradelink)
except OSError:
pass
os.symlink(packagedir, upgradelink)
def setup_media_mount(mnt):
# make a "media" subdir where all the packages are
mountpath = os.path.join(upgradelink, "media")
log.info("setting up mount for %s at %s", mnt.dev, mountpath)
mkdir_p(mountpath)
# make a directory to place a unit
mkdir_p(upgrade_target_requires)
# make a modified mnt entry that puts it at mountpath
mediamnt = mnt._replace(rawmnt=mountpath)
# finally, write out a systemd unit to mount media there
unit = write_systemd_unit(mediamnt, upgrade_target_requires)
log.info("wrote %s", unit)
def setup_upgraderoot():
if os.path.isdir(upgraderoot):
log.info("upgrade root dir %s already exists", upgraderoot)
return
else:
log.info("creating upgraderoot dir: %s", upgraderoot)
os.makedirs(upgraderoot, 0755)
def prep_upgrade(pkgs):
# put packages in packagedir (also writes packagelist)
link_pkgs(pkgs)
# make magic symlink
setup_upgradelink()
# make dir for upgraderoot
setup_upgraderoot()
def modify_bootloader(kernel, initrd):
log.info("adding new boot entry")
args = ["upgrade", "systemd.unit=system-upgrade.target"]
if not is_selinux_enabled():
args.append("selinux=0")
else:
# BLERG. SELinux enforcing will cause problems if the new policy
# disallows something that the previous system did differently.
# See https://bugzilla.redhat.com/bugzilla/show_bug.cgi?id=896010
args.append("enforcing=0")
boot.add_entry(kernel, initrd, banner=_("System Upgrade"), kargs=args)
def prep_boot(kernel, initrd):
# check for systems that need mdadm.conf
if boot.need_mdadmconf():
log.info("appending /etc/mdadm.conf to initrd")
boot.initramfs_append_files(initrd, "/etc/mdadm.conf")
# look for updates, and add them to initrd if found
updates = []
try:
updates = list(listdir(update_img_dir))
except (IOError, OSError) as e:
log.info("can't list update img dir %s: %s", update_img_dir, e.strerror)
if updates:
log.info("found updates in %s, appending to initrd", update_img_dir)
boot.initramfs_append_images(initrd, updates)
# make a dir in /lib/modules to hold a copy of the new kernel's modules
# (the initramfs will copy/bind them into place when we reboot)
kv = kernelver(kernel)
if kv:
moddir = os.path.join("/lib/modules", kv)
log.info("creating module dir %s", moddir)
mkdir_p(moddir)
else:
log.warn("can't determine version of kernel image '%s'", kernel)
# set up the boot args
modify_bootloader(kernel, initrd)
def reset_boot():
'''reset bootloader to previous default and remove our boot entry'''
conf = Config(upgradeconf)
kernel = conf.get("boot", "kernel")
if kernel:
boot.remove_entry(kernel)
def remove_boot():
'''remove boot images'''
conf = Config(upgradeconf)
kernel = conf.get("boot", "kernel")
initrd = conf.get("boot", "initrd")
if kernel:
rm_f(kernel)
if initrd:
rm_f(initrd)
def remove_cache():
'''remove our cache dirs'''
conf = Config(upgradeconf)
cleanup = conf.get("cleanup", "dirs") or ''
cleanup = cleanup.split(';')
cleanup += [cachedir, packagedir] # just to be sure
for d in cleanup:
log.info("removing %s", d)
rm_rf(d)
def misc_cleanup(clean_all_repos=True):
log.info("removing symlink %s", upgradelink)
rm_f(upgradelink)
for d in (upgraderoot, upgrade_target_requires):
log.info("removing %s", d)
rm_rf(d)
repodir = '/etc/yum.repos.d'
log.info("removing repo files")
# If clean_all_repos is false, leave behind the repos with regular
# URLs and just clean the ones with file:// URLs (i.e., repos added
# for upgrades from cdrom or other non-network sources)
for repo in glob.glob(repodir + '/redhat-upgrade-*.repo'):
rmrepo=True
if not clean_all_repos:
with open(repo, "r") as repofile:
for line in repofile:
if line.strip().startswith('baseurl') and 'file://' not in line:
rmrepo=False
break
if rmrepo:
rm_rf(repo)
|
gpl-2.0
| 5,812,619,654,188,032,000
| 34.315789
| 84
| 0.643939
| false
| 3.75735
| true
| false
| false
|
jefftc/changlab
|
Betsy/Betsy/modules/preprocess_mas5.py
|
1
|
1766
|
from Module import AbstractModule
class Module(AbstractModule):
def __init__(self):
AbstractModule.__init__(self)
def run(
self, network, antecedents, out_attributes, user_options, num_cores,
outfile):
"""preprocess the inputfile with MAS5
using preprocess.py will generate a output file"""
import os
import subprocess
from Betsy import module_utils as mlib
in_data = antecedents
#preprocess the cel file to text signal file
PREPROCESS_BIN = mlib.get_config("preprocess", which_assert_file=True)
#PREPROCESS_path = config.preprocess
#PREPROCESS_BIN = module_utils.which(PREPROCESS_path)
#assert PREPROCESS_BIN, 'cannot find the %s' % PREPROCESS_path
command = ['python', PREPROCESS_BIN, 'MAS5', in_data.identifier]
process = subprocess.Popen(
command, shell=False,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
error_message = process.communicate()[1]
if error_message:
if not "Loading required package: Biobase" in error_message:
raise ValueError(error_message)
outputfiles = os.listdir(".")
outputfile = None
for i in outputfiles:
if i.endswith('.mas5') and not i.endswith('.l2.mas5'):
outputfile = i
assert outputfile, "No output file created."
os.rename(outputfile, outfile)
def name_outfile(self, antecedents, user_options):
#from Betsy import module_utils
#original_file = module_utils.get_inputid(antecedents.identifier)
#filename = 'signal_mas5_' + original_file + '.jeffs'
#return filename
return "signal.txt"
|
mit
| -8,856,985,929,441,454,000
| 35.791667
| 78
| 0.620612
| false
| 4.14554
| false
| false
| false
|
MissionCriticalCloud/marvin
|
marvin/cloudstackAPI/updateNetwork.py
|
1
|
11415
|
"""Updates a network"""
from baseCmd import *
from baseResponse import *
class updateNetworkCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "true"
"""the ID of the network"""
"""Required"""
self.id = None
self.typeInfo['id'] = 'uuid'
"""Force update even if CIDR type is different"""
self.changecidr = None
self.typeInfo['changecidr'] = 'boolean'
"""an optional field, in case you want to set a custom id to the resource. Allowed to Root Admins only"""
self.customid = None
self.typeInfo['customid'] = 'string'
"""an optional field, whether to the display the network to the end user or not."""
self.displaynetwork = None
self.typeInfo['displaynetwork'] = 'boolean'
"""the new display text for the network"""
self.displaytext = None
self.typeInfo['displaytext'] = 'string'
"""The first DNS server of the network"""
self.dns1 = None
self.typeInfo['dns1'] = 'string'
"""The second DNS server of the network"""
self.dns2 = None
self.typeInfo['dns2'] = 'string'
"""CIDR for guest VMs, CloudStack allocates IPs to guest VMs only from this CIDR"""
self.guestvmcidr = None
self.typeInfo['guestvmcidr'] = 'string'
"""IP exclusion list for private networks"""
self.ipexclusionlist = None
self.typeInfo['ipexclusionlist'] = 'string'
"""the new name for the network"""
self.name = None
self.typeInfo['name'] = 'string'
"""network domain"""
self.networkdomain = None
self.typeInfo['networkdomain'] = 'string'
"""network offering ID"""
self.networkofferingid = None
self.typeInfo['networkofferingid'] = 'uuid'
self.required = ["id", ]
class updateNetworkResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""the id of the network"""
self.id = None
self.typeInfo['id'] = 'string'
"""the owner of the network"""
self.account = None
self.typeInfo['account'] = 'string'
"""ACL Id associated with the VPC network"""
self.aclid = None
self.typeInfo['aclid'] = 'string'
"""acl type - access type to the network"""
self.acltype = None
self.typeInfo['acltype'] = 'string'
"""Broadcast domain type of the network"""
self.broadcastdomaintype = None
self.typeInfo['broadcastdomaintype'] = 'string'
"""broadcast uri of the network. This parameter is visible to ROOT admins only"""
self.broadcasturi = None
self.typeInfo['broadcasturi'] = 'string'
"""list networks available for vm deployment"""
self.canusefordeploy = None
self.typeInfo['canusefordeploy'] = 'boolean'
"""Cloudstack managed address space, all CloudStack managed VMs get IP address from CIDR"""
self.cidr = None
self.typeInfo['cidr'] = 'string'
"""an optional field, whether to the display the network to the end user or not."""
self.displaynetwork = None
self.typeInfo['displaynetwork'] = 'boolean'
"""the displaytext of the network"""
self.displaytext = None
self.typeInfo['displaytext'] = 'string'
"""the first DNS for the network"""
self.dns1 = None
self.typeInfo['dns1'] = 'string'
"""the second DNS for the network"""
self.dns2 = None
self.typeInfo['dns2'] = 'string'
"""the domain name of the network owner"""
self.domain = None
self.typeInfo['domain'] = 'string'
"""the domain id of the network owner"""
self.domainid = None
self.typeInfo['domainid'] = 'string'
"""the network's gateway"""
self.gateway = None
self.typeInfo['gateway'] = 'string'
"""the cidr of IPv6 network"""
self.ip6cidr = None
self.typeInfo['ip6cidr'] = 'string'
"""the gateway of IPv6 network"""
self.ip6gateway = None
self.typeInfo['ip6gateway'] = 'string'
"""list of ip addresses and/or ranges of addresses to be excluded from the network for assignment"""
self.ipexclusionlist = None
self.typeInfo['ipexclusionlist'] = 'string'
"""true if network is default, false otherwise"""
self.isdefault = None
self.typeInfo['isdefault'] = 'boolean'
"""list networks that are persistent"""
self.ispersistent = None
self.typeInfo['ispersistent'] = 'boolean'
"""true if network is system, false otherwise"""
self.issystem = None
self.typeInfo['issystem'] = 'boolean'
"""the name of the network"""
self.name = None
self.typeInfo['name'] = 'string'
"""the network's netmask"""
self.netmask = None
self.typeInfo['netmask'] = 'string'
"""the network CIDR of the guest network configured with IP reservation. It is the summation of CIDR and RESERVED_IP_RANGE"""
self.networkcidr = None
self.typeInfo['networkcidr'] = 'string'
"""the network domain"""
self.networkdomain = None
self.typeInfo['networkdomain'] = 'string'
"""availability of the network offering the network is created from"""
self.networkofferingavailability = None
self.typeInfo['networkofferingavailability'] = 'string'
"""true if network offering is ip conserve mode enabled"""
self.networkofferingconservemode = None
self.typeInfo['networkofferingconservemode'] = 'boolean'
"""display text of the network offering the network is created from"""
self.networkofferingdisplaytext = None
self.typeInfo['networkofferingdisplaytext'] = 'string'
"""network offering id the network is created from"""
self.networkofferingid = None
self.typeInfo['networkofferingid'] = 'string'
"""name of the network offering the network is created from"""
self.networkofferingname = None
self.typeInfo['networkofferingname'] = 'string'
"""the physical network id"""
self.physicalnetworkid = None
self.typeInfo['physicalnetworkid'] = 'string'
"""the project name of the address"""
self.project = None
self.typeInfo['project'] = 'string'
"""the project id of the ipaddress"""
self.projectid = None
self.typeInfo['projectid'] = 'string'
"""related to what other network configuration"""
self.related = None
self.typeInfo['related'] = 'string'
"""the network's IP range not to be used by CloudStack guest VMs and can be used for non CloudStack purposes"""
self.reservediprange = None
self.typeInfo['reservediprange'] = 'string'
"""true network requires restart"""
self.restartrequired = None
self.typeInfo['restartrequired'] = 'boolean'
"""true if network supports specifying ip ranges, false otherwise"""
self.specifyipranges = None
self.typeInfo['specifyipranges'] = 'boolean'
"""state of the network"""
self.state = None
self.typeInfo['state'] = 'string'
"""true if network can span multiple zones"""
self.strechedl2subnet = None
self.typeInfo['strechedl2subnet'] = 'boolean'
"""true if users from subdomains can access the domain level network"""
self.subdomainaccess = None
self.typeInfo['subdomainaccess'] = 'boolean'
"""the traffic type of the network"""
self.traffictype = None
self.typeInfo['traffictype'] = 'string'
"""the type of the network"""
self.type = None
self.typeInfo['type'] = 'string'
"""The vlan of the network. This parameter is visible to ROOT admins only"""
self.vlan = None
self.typeInfo['vlan'] = 'string'
"""VPC the network belongs to"""
self.vpcid = None
self.typeInfo['vpcid'] = 'string'
"""zone id of the network"""
self.zoneid = None
self.typeInfo['zoneid'] = 'string'
"""the name of the zone the network belongs to"""
self.zonename = None
self.typeInfo['zonename'] = 'string'
"""If a network is enabled for 'streched l2 subnet' then represents zones on which network currently spans"""
self.zonesnetworkspans = None
self.typeInfo['zonesnetworkspans'] = 'set'
"""the list of services"""
self.service = []
"""the list of resource tags associated with network"""
self.tags = []
class capability:
def __init__(self):
""""can this service capability value can be choosable while creatine network offerings"""
self.canchooseservicecapability = None
""""the capability name"""
self.name = None
""""the capability value"""
self.value = None
class provider:
def __init__(self):
""""uuid of the network provider"""
self.id = None
""""true if individual services can be enabled/disabled"""
self.canenableindividualservice = None
""""the destination physical network"""
self.destinationphysicalnetworkid = None
""""the provider name"""
self.name = None
""""the physical network this belongs to"""
self.physicalnetworkid = None
""""services for this provider"""
self.servicelist = None
""""state of the network provider"""
self.state = None
class service:
def __init__(self):
""""the service name"""
self.name = None
""""the list of capabilities"""
self.capability = []
""""can this service capability value can be choosable while creatine network offerings"""
self.canchooseservicecapability = None
""""the capability name"""
self.name = None
""""the capability value"""
self.value = None
""""the service provider name"""
self.provider = []
""""uuid of the network provider"""
self.id = None
""""true if individual services can be enabled/disabled"""
self.canenableindividualservice = None
""""the destination physical network"""
self.destinationphysicalnetworkid = None
""""the provider name"""
self.name = None
""""the physical network this belongs to"""
self.physicalnetworkid = None
""""services for this provider"""
self.servicelist = None
""""state of the network provider"""
self.state = None
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
|
apache-2.0
| -8,208,851,891,698,272,000
| 40.061151
| 133
| 0.599124
| false
| 4.37524
| false
| false
| false
|
Eigenlabs/EigenD
|
plg_sampler2/sf2.py
|
1
|
7476
|
#
# Copyright 2009 Eigenlabs Ltd. http://www.eigenlabs.com
#
# This file is part of EigenD.
#
# EigenD is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# EigenD is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EigenD. If not, see <http://www.gnu.org/licenses/>.
#
import piw
from pi import riff
from pi.riff import List,Struct,StructList,String,Root
import math
import copy
import struct
def read_samples_indirect(name,pos,len):
return piw.create_samplearray(name,pos,len)
class Sample:
def read(self,c):
return c.read_indirect(read_samples_indirect)
GEN_STARTADDROFS=0
GEN_ENDADDROFS=1
GEN_STARTLOOPADDROFS=2
GEN_ENDLOOPADDROFS=3
GEN_STARTADDRCOARSEOFS=4
GEN_ENDADDRCOARSEOFS=12
GEN_PAN=17
GEN_INSTRUMENT=41
GEN_KEYRANGE=43
GEN_VELRANGE=44
GEN_INITIALATTENUATION=48
GEN_STARTLOOPADDRCOARSEOFS=45
GEN_ENDLOOPADDRCOARSEOFS=50
GEN_COARSETUNE=51
GEN_FINETUNE=52
GEN_SAMPLEID=53
GEN_SAMPLEMODE=54
GEN_OVERRIDEROOTKEY=58
GEN_AHDSR_DELAY=33
GEN_AHDSR_ATTACK=34
GEN_AHDSR_HOLD=35
GEN_AHDSR_DECAY=36
GEN_AHDSR_SUSTAIN=37
GEN_AHDSR_RELEASE=38
gen_defaults = {
GEN_AHDSR_DELAY: -12000.0,
GEN_AHDSR_ATTACK: -12000.0,
GEN_AHDSR_HOLD: -12000.0,
GEN_AHDSR_SUSTAIN: 0.0,
GEN_AHDSR_RELEASE: -12000.0,
GEN_AHDSR_DECAY: -12000.0,
GEN_COARSETUNE: 0.0,
GEN_FINETUNE: 0.0,
GEN_KEYRANGE: (0,127),
GEN_VELRANGE: (0,127),
}
gen_preset_ignore = ( GEN_KEYRANGE, GEN_VELRANGE )
class GenList:
def read1(self,r):
id = struct.unpack('<H',r[0:2])[0]
if id in (GEN_KEYRANGE,GEN_VELRANGE):
return (id,struct.unpack('BB',r[2:4]))
else:
return (id,struct.unpack('<h',r[2:4])[0])
def read(self,c):
s = []
while c.remain>=4:
s.append(self.read1(c.read(4)))
return s
SF2 = Root('sfbk', List(
INFO=List(
ifil=Struct('<HH'), isng=String(), INAM=String(), irom=String(),
iver=Struct('<HH'), ICRD=String(), IENG=String(), IPRD=String(),
ICOP=String(), ICMT=String(65536), ISFT=String()),
sdta=List(smpl=Sample()),
pdta=List(
phdr=StructList('<20sHHHLLL'), pbag=StructList('<HH'), pmod=StructList('<HHhHH'), pgen=GenList(),
inst=StructList('<20sH'), ibag=StructList('<HH'), imod=StructList('<HHhHH'), igen=GenList(),
shdr=StructList('<20sLLLLLBbHH'))))
def mtof(m,transpose):
m = float(m)-float(transpose)
return 440.0*math.pow(2.0,(m-69.0)/12.0)
def mtov(m):
return float(m)/127.0
def etot(m):
m = pow(2.0,float(m)/1200.0)
if m<0.01: m=0
return m
def etos(m):
if m <= 0: return 1.0
if m >= 1000: return 0.0
return 1.0-(float(m)/1000.0)
def etop(m):
return float(m)/500.0
class ZoneBuilder:
def __init__(self,bag,gen,mod,index,base=None,add=None):
self.gen = {}
if base:
self.gen.update(base.gen)
gs,ms = bag[index]
ge,me = bag[index+1]
for g in range(gs,ge): self.__addgen(gen[g])
for m in range(ms,me): self.__addmod(mod[m])
if add:
for k,v in add.gen.iteritems():
if k in gen_preset_ignore:
continue
o = self.genget(k)
if type(v)==tuple:
#self.gen[k] = (max(o[0],v[0]),min(o[1],v[1]))
self.gen[k] = v
else:
self.gen[k] = o+v
def __addgen(self,g):
self.gen[g[0]] = g[1]
def __addmod(self,m):
pass
def __adjustpos(self,val,fg,cg):
return val+self.genget(fg)+(32768*self.genget(cg))
def genget(self,k):
return self.gen.get(k,gen_defaults.get(k,0))
def zone(self,smpl,shdr,transpose):
kr = self.genget(GEN_KEYRANGE)
vr = self.genget(GEN_VELRANGE)
de = etot(self.genget(GEN_AHDSR_DELAY))
a = etot(self.genget(GEN_AHDSR_ATTACK))
h = etot(self.genget(GEN_AHDSR_HOLD))
dc = etot(self.genget(GEN_AHDSR_DECAY))
sus = etos(self.genget(GEN_AHDSR_SUSTAIN))
r = etot(self.genget(GEN_AHDSR_RELEASE))
p = etop(self.genget(GEN_PAN))
n,s,e,ls,le,sr,op,_,_,_ = shdr[self.gen[GEN_SAMPLEID]]
rk = float(self.gen.get(GEN_OVERRIDEROOTKEY,op))
rk -= float(self.genget(GEN_COARSETUNE))
rk -= (float(self.genget(GEN_FINETUNE))/100.0)
rf = mtof(rk,transpose)
looping = False
if self.gen.has_key(GEN_SAMPLEMODE):
if self.gen[GEN_SAMPLEMODE] != 0:
looping = True
start = self.__adjustpos(s,GEN_STARTADDROFS,GEN_STARTADDRCOARSEOFS)
end = self.__adjustpos(e,GEN_ENDADDROFS,GEN_ENDADDRCOARSEOFS)
if looping:
loopstart = self.__adjustpos(ls,GEN_STARTLOOPADDROFS,GEN_STARTLOOPADDRCOARSEOFS)
loopend = self.__adjustpos(le,GEN_ENDLOOPADDROFS,GEN_ENDLOOPADDRCOARSEOFS)
else:
loopstart = 0
loopend = 0
attcb = float(self.gen.get(GEN_INITIALATTENUATION,0))
att = math.pow(10.0,-attcb/200.0)
smpl = piw.create_sample(smpl,start,end,loopstart,loopend,sr,rf,att)
zz = piw.create_zone(mtof(float(kr[0])-0.5,transpose), mtof(float(kr[1])+0.5,transpose), mtov(float(vr[0])-0.5), mtov(float(vr[1])+0.5),de,a,h,dc,sus,r,p,smpl)
return zz
def __str__(self):
return str(self.gen)
def load_soundfont(file,bk,pre,transpose):
print 'loading bank',bk,'preset',pre,'from',file
f = open(file,'rb',0)
sf = SF2.read(f,name=file)
f.close()
pbs = None
pbe = None
for (n,p,b,i,l,g,m) in sf['pdta']['phdr']:
if pbs is not None:
pbe = i
break
if p==pre and b==bk:
pbs = i
if pbs is None or pbe is None:
raise RuntimeError('preset %d bank %d not found in soundfont %s' % (pre,bk,file))
p = piw.create_preset()
gpzb = None
gizb = None
for pi in range(pbs,pbe):
pzb = ZoneBuilder(sf['pdta']['pbag'],sf['pdta']['pgen'],sf['pdta']['pmod'],pi,base=gpzb)
inst = pzb.gen.get(GEN_INSTRUMENT)
if inst is not None:
for ii in range(sf['pdta']['inst'][inst][1],sf['pdta']['inst'][inst+1][1]):
izb = ZoneBuilder(sf['pdta']['ibag'],sf['pdta']['igen'],sf['pdta']['imod'],ii,base=gizb,add=pzb)
if izb.gen.has_key(GEN_SAMPLEID):
p.add_zone(izb.zone(sf['sdta']['smpl'],sf['pdta']['shdr'],transpose))
else:
if gizb is None:
gizb = izb
else:
if gpzb is None:
gpzb = pzb
return p
SF2info = Root('sfbk', List(pdta=List(phdr=StructList('<20sHH14x'))))
def __trim(s):
if s.count('\0'):
return s[:s.index('\0')]
return s
def sf_info(file):
file = open(file,'rb',0)
data = SF2info.read(file)
file.close()
for n,p,b in data['pdta']['phdr'][:-1]:
yield __trim(n),p,b
|
gpl-3.0
| 9,204,759,465,939,505,000
| 27.753846
| 167
| 0.58534
| false
| 2.820068
| false
| false
| false
|
qiyuangong/APA
|
anatomizer.py
|
1
|
3783
|
import random, heapq
import pdb
# by Qiyuan Gong
# qiyuangong@gmail.com
# @INPROCEEDINGS{
# author = {Xiao, Xiaokui and Tao, Yufei},
# title = {Anatomy: simple and effective privacy preservation},
# booktitle = {Proceedings of the 32nd international conference on Very large data
# bases},
# year = {2006},
# series = {VLDB '06},
# pages = {139--150},
# publisher = {VLDB Endowment},
# acmid = {1164141},
# location = {Seoul, Korea},
# numpages = {12}
# }
_DEBUG = True
class SABucket(object):
def __init__(self, data_index, index):
self.member_index = data_index[:]
self.index = index
def pop_element(self):
"""pop an element from SABucket
"""
return self.member_index.pop()
class Group(object):
def __init__(self):
self.index = 0
self.member_index = []
self.checklist = set()
def add_element(self, record_index, index):
"""add element pair (record, index) to Group
"""
self.member_index.append(record_index)
self.checklist.add(index)
def check_index(self, index):
"""Check if index is in checklist
"""
if index in self.checklist:
return True
return False
def list_to_str(value_list, sep=';'):
"""covert sorted str list (sorted by default) to str
value (splited by sep). This fuction is value safe, which means
value_list will not be changed.
"""
temp = value_list[:]
return sep.join(temp)
def anatomizer(data, L):
"""
only one SA is supported in anatomy.
Separation grouped member into QIT and SAT
Use heap to get l largest buckets
L is the denote l in l-diversity.
data is a list, i.e. [qi1,qi2,sa]
"""
groups = []
buckets = {}
result = []
suppress = []
h = []
if _DEBUG:
print '*' * 10
print "Begin Anatomizer!"
print "L=%d" % L
# Assign SA into buckets
for i, temp in enumerate(data):
# convert list to str
list_temp = list_to_str(temp[-1])
try:
buckets[list_temp].append(i)
except:
buckets[list_temp] = [i]
# group stage
# use heap to sort buckets
for i, temp in enumerate(buckets.values()):
# push to heap reversely
pos = len(temp) * -1
if pos == 0:
continue
heapq.heappush(h, (pos, SABucket(temp, i)))
while len(h) >= L:
newgroup = Group()
length_list = []
SAB_list = []
# chosse l largest buckets
for i in range(L):
(length, temp) = heapq.heappop(h)
length_list.append(length)
SAB_list.append(temp)
# pop a element from chosen buckets
for i in range(L):
temp = SAB_list[i]
length = length_list[i]
newgroup.add_element(temp.pop_element(), temp.index)
length += 1
if length == 0:
continue
# push new tuple to heap
heapq.heappush(h, (length, temp))
groups.append(newgroup)
# residue-assign stage
while len(h):
(length, temp) = heapq.heappop(h)
index = temp.index
while temp.member_index:
for g in groups:
if g.check_index(index) == False:
g.add_element(temp.pop_element(), index)
break
else:
suppress.extend(temp.member_index[:])
break
# transform result
for i, t in enumerate(groups):
t.index = i
result.append(t.member_index[:])
if _DEBUG:
print 'NO. of Suppress after anatomy = %d' % len(suppress)
print 'NO. of groups = %d' % len(result)
return result
|
mit
| 1,053,349,836,134,681,500
| 25.089655
| 84
| 0.548506
| false
| 3.562147
| false
| false
| false
|
xlqian/navitia
|
source/jormungandr/jormungandr/scenarios/ridesharing/ridesharing_journey.py
|
1
|
2827
|
# Copyright (c) 2001-2018, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# channel `#navitia` on riot https://riot.im/app/#/room/#navitia:matrix.org
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
class Gender(object):
"""
Used as an enum
"""
UNKNOWN = 0
MALE = 1
FEMALE = 2
class Individual(object):
# https://stackoverflow.com/a/28059785/1614576
__slots__ = ('alias', 'gender', 'image', 'rate', 'rate_count')
def __init__(self, alias, gender, image, rate, rate_count):
self.alias = alias
self.gender = gender
self.image = image
self.rate = rate
self.rate_count = rate_count
class Place(object):
__slots__ = ('addr', 'lat', 'lon')
def __init__(self, addr, lat, lon):
self.addr = addr
self.lat = lat
self.lon = lon
class MetaData(object):
__slots__ = ('system_id', 'network', 'rating_scale_min', 'rating_scale_max')
def __init__(self, system_id, network, rating_scale_min, rating_scale_max):
self.system_id = system_id
self.network = network
self.rating_scale_min = rating_scale_min
self.rating_scale_max = rating_scale_max
class RidesharingJourney(object):
__slots__ = (
'metadata',
'distance',
'shape', # a list of type_pb2.GeographicalCoord()
'ridesharing_ad',
'pickup_place',
'dropoff_place',
'pickup_date_time',
'dropoff_date_time',
# driver will be Individual
'driver',
'price',
'currency', # "centime" (EURO cents) is the preferred currency (price is filled accordingly)
'total_seats',
'available_seats',
)
|
agpl-3.0
| -8,374,270,853,840,085,000
| 30.411111
| 101
| 0.651928
| false
| 3.583016
| false
| false
| false
|
acca90/django-tests
|
cello/produto/migrations/0001_initial.py
|
1
|
1354
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-11 00:54
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Produto',
fields=[
('prod_codigo', models.BigIntegerField(db_column='prod_codigo', primary_key=True, serialize=False)),
('prod_alias', models.CharField(db_column='prod_alias', max_length=50)),
('prod_descricao', models.CharField(db_column='prod_descricao', max_length=255)),
('prod_valor_venda', models.DecimalField(db_column='prod_valor_venda', decimal_places=2, max_digits=13)),
('prod_valor_compra', models.DecimalField(db_column='prod_valor_compra', decimal_places=2, max_digits=13)),
('prod_peso_b', models.DecimalField(db_column='prod_peso_b', decimal_places=2, max_digits=13)),
('prod_peso_l', models.DecimalField(db_column='prod_peso_l', decimal_places=2, max_digits=13)),
],
options={
'verbose_name': 'Produto',
'verbose_name_plural': 'Produtos',
'managed': True,
'db_table': 'produto',
},
),
]
|
mit
| 7,758,737,332,258,354,000
| 38.823529
| 123
| 0.573855
| false
| 3.699454
| false
| false
| false
|
coxmediagroup/dolphin
|
dolphin/testutils.py
|
1
|
4503
|
import os
from sys import stdout, stderr
from contextlib import contextmanager
from django.db.models import get_apps
from django.utils import simplejson as sj
from django.core import serializers
from django.conf import settings
from django.utils.itercompat import product
from .middleware import LocalStoreMiddleware
@contextmanager
def set_active(key, val):
"""Allows a flag to be switched to enabled"""
overrides = LocalStoreMiddleware.local.setdefault('overrides', {})
overrides[key] = val
yield
del overrides[key]
def load_redis_fixtures(fixture_labels, backend):
# taken a modified from django.core.management.commands.loaddata
# Keep a count of the installed objects and fixtures
# changes marked by # + or # - and endchanges for +
# - removed intro code
fixture_count = 0
loaded_object_count = 0
fixture_object_count = 0
models = set()
humanize = lambda dirname: "'%s'" % dirname if dirname else 'absolute path'
# - removed cursor code, compression types
app_module_paths = []
for app in get_apps():
if hasattr(app, '__path__'):
# It's a 'models/' subpackage
for path in app.__path__:
app_module_paths.append(path)
else:
# It's a models.py module
app_module_paths.append(app.__file__)
app_fixtures = [os.path.join(os.path.dirname(path), 'fixtures') for path in app_module_paths]
# - remove try, connection constraint
for fixture_label in fixture_labels:
parts = fixture_label.split('.')
if len(parts) == 1: # - remove compression
fixture_name = parts[0]
formats = serializers.get_public_serializer_formats()
else:
fixture_name, format = '.'.join(parts[:-1]), parts[-1]
if format in serializers.get_public_serializer_formats():
formats = [format]
else:
formats = []
# - remove formats
if os.path.isabs(fixture_name):
fixture_dirs = [fixture_name]
else:
fixture_dirs = app_fixtures + list(settings.FIXTURE_DIRS) + ['']
for fixture_dir in fixture_dirs:
# - remove verbosity
label_found = False
# - remove compression formats, verbosity
for format in formats:
file_name = '.'.join(
p for p in [
fixture_name, format
]
if p
)
full_path = os.path.join(fixture_dir, file_name)
# - remove compression method
try:
fixture = open(full_path, 'r')
except IOError:
# - remove verbosity
pass
else:
try:
if label_found:
stderr.write("Multiple fixtures named '%s' in %s. Aborting.\n" %
(fixture_name, humanize(fixture_dir)))
# - remove commit
return
fixture_count += 1
objects_in_fixture = 0
loaded_objects_in_fixture = 0
# - remove verbosity
# - remove generalized loading of fixture
# + customized loading of fixture
objects = sj.load(fixture)
for obj in objects:
objects_in_fixture += 1
#begin customization
if obj['model'] == 'dolphin.featureflag':
fields = obj['fields']
key = fields['name']
backend.update(key, fields)
#endchanges
loaded_object_count += loaded_objects_in_fixture
fixture_object_count += objects_in_fixture
label_found = True
finally:
fixture.close()
# If the fixture we loaded contains 0 objects, assume that an
# error was encountered during fixture loading.
if objects_in_fixture == 0:
# - remove verbosity
return
# - remove everything else
|
mit
| 5,224,790,747,942,182,000
| 34.456693
| 97
| 0.50433
| false
| 5.093891
| false
| false
| false
|
cmollet/macro-chef
|
yummly.py
|
1
|
3288
|
import requests
import time
try:
import simplejson as json
except ImportError:
import json
from pymongo import Connection
from paleo import paleo_ingredients, excludedIngredients
app_key = open('app.key').read().strip()
app_id = open('app_id.key').read().strip()
def mongoconn(name='bespin'):
return Connection()[name]
# Constants
BMR = 2142.87
multipliers = {
'sedentary': 1.2,
'lightly active': 1.375,
'moderately active': 1.55,
'very active': 1.725,
'extra active': 1.9
}
def genmacros():
macros = dict()
macros['max_carbs'] = 150
macros['max_sugar'] = 50
macros['min_fiber'] = 50
macros['min_protein'] = 200
macros['max_cals'] = BMR * multipliers['moderately active']
macros['rest_cals'] = 2630
macros['rest_carbs'] = 226.3
macros['rest_fat'] = 100.6
macros['workout_cals'] = 3945
macros['workout_carbs'] = 390.6
macros['workout_fat'] = 173.6
return macros
def makeurl(q, course='Main+Dishes'):
URL = 'http://api.yummly.com/v1/api/recipes?_app_id=%s&_app_key=%s&allowedAllergy[]=Gluten-Free&allowedAllergy[]=Dairy-Free&allowedAllergy[]=Peanut-Free&allowedAllergy[]=Soy-Free&allowedAllergy[]=Wheat-Free' % (app_id, app_key)
for i in excludedIngredients:
URL += '&excludedIngredient[]=%s' % i
URL += '&allowedCourse[]=%s' % course
URL += '&q=%s' % q
return URL
def load_recipes(q):
# Generate URL based on query and course type
# Get (up to) 10 recipes
# run getrecipe()
r = requests.get(url=makeurl(q))
matches = json.loads(r.text)
recipes = []
for i in matches['matches']:
time.sleep(2)
r = getrecipe(i['id'])
if r:
recipes.append(r)
return recipes
def getrecipe(recipe_id):
URL = 'http://api.yummly.com/v1/api/recipe/'+ recipe_id + '?app_id=%s&_app_key=%s' % (app_id, app_key)
r = requests.get(URL, headers = {'X-Yummly-App-ID': app_id, 'X-Yummly-App-Key': app_key})
recipe = json.loads(r.text)
if recipe['nutritionEstimates']:
return recipe
def nutristats(recipe):
macros = { 'cals': 0, 'protein': 0, 'carbs': 0, 'fat': 0, 'fiber' : 0}
for i in recipe['nutritionEstimates']:
if i['attribute'] == 'ENERC_KCAL':
macros['cals'] += recipe['numberOfServings'] * i['value']
elif i['attribute'] == 'PROCNT':
macros['protein'] += recipe['numberOfServings'] * i['value']
elif i['attribute'] == 'CHOCDF':
macros['carbs'] += recipe['numberOfServings'] * i['value']
elif i['attribute'] == 'FAT':
macros['fat'] += recipe['numberOfServings'] * i['value']
elif i['attribute'] == 'FIBTG':
macros['fiber'] += recipe['numberOfServings'] * i['value']
return macros
def macro_resolve(recipe, day):
macros = genmacros()
rec_macros = nutristats(recipe)
new_macros = {}
new_macros['cals'] = macros[day+'_cals'] - rec_macros['cals']
new_macros['protein'] = macros['min_protein'] - rec_macros['protein']
new_macros['carbs'] = macros[day+'_carbs'] - rec_macros['carbs']
new_macros['fat'] = macros[day+'_fat'] - rec_macros['fat']
new_macros['fiber'] = macros['min_fiber'] - rec_macros['fiber']
return new_macros
|
mit
| -4,144,650,630,414,836,000
| 32.896907
| 231
| 0.601277
| false
| 3.058605
| false
| false
| false
|
shendo/peerz
|
peerz/examples/visualise.py
|
1
|
1704
|
# Peerz - P2P python library using ZeroMQ sockets and gevent
# Copyright (C) 2014-2015 Steve Henderson
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import tempfile
import urllib
import urllib2
import webbrowser
from peerz.persistence import LocalStorage
def get_tree(root, port):
local = LocalStorage(root, port)
return local.fetch('nodetree')
def render_graph(dot):
with tempfile.NamedTemporaryFile(suffix='.png', delete=False) as tmp:
data = urllib.urlencode({'cht': 'gv:dot', 'chl': dot})
print data
u = urllib2.urlopen('http://chart.apis.google.com/chart', data)
tmp.write(u.read())
tmp.close()
webbrowser.open_new_tab(tmp.name)
if __name__ == '__main__':
"""
Simple tool to read the state files from running helloworld example
and plot the routing tree for the chosen node using google charts.
"""
root = '/tmp/testing'
port = 7111
if len(sys.argv) > 2:
port = int(sys.argv[2])
if len(sys.argv) > 1:
root = sys.argv[1]
dot = get_tree(root, port).visualise()
render_graph(dot)
|
gpl-3.0
| 1,749,261,066,399,993,300
| 33.08
| 73
| 0.692488
| false
| 3.728665
| false
| false
| false
|
nathanielvarona/airflow
|
tests/providers/qubole/operators/test_qubole_check.py
|
1
|
7409
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from datetime import datetime
from unittest import mock
import pytest
from qds_sdk.commands import HiveCommand
from airflow.exceptions import AirflowException
from airflow.models import DAG
from airflow.providers.qubole.hooks.qubole import QuboleHook
from airflow.providers.qubole.hooks.qubole_check import QuboleCheckHook
from airflow.providers.qubole.operators.qubole_check import (
QuboleCheckOperator,
QuboleValueCheckOperator,
SQLCheckOperator,
SQLValueCheckOperator,
_QuboleCheckOperatorMixin,
)
# pylint: disable=unused-argument
@pytest.mark.parametrize(
"operator_class, kwargs, parent_check_operator",
[
(QuboleCheckOperator, dict(sql='Select * from test_table'), SQLCheckOperator),
(
QuboleValueCheckOperator,
dict(sql='Select * from test_table', pass_value=95),
SQLValueCheckOperator,
),
],
)
class TestQuboleCheckMixin:
def setup(self):
self.task_id = 'test_task'
def __construct_operator(self, operator_class, **kwargs):
dag = DAG('test_dag', start_date=datetime(2017, 1, 1))
return operator_class(task_id=self.task_id, dag=dag, command_type='hivecmd', **kwargs)
def test_get_hook_with_context(self, operator_class, kwargs, parent_check_operator):
operator = self.__construct_operator(operator_class=operator_class, **kwargs)
assert isinstance(operator.get_hook(), QuboleCheckHook)
context = {'exec_date': 'today'}
operator._hook_context = context
hook = operator.get_hook()
assert hook.context == context
@mock.patch.object(_QuboleCheckOperatorMixin, "get_db_hook")
@mock.patch.object(_QuboleCheckOperatorMixin, "get_hook")
def test_get_db_hook(
self, mock_get_hook, mock_get_db_hook, operator_class, kwargs, parent_check_operator
):
operator = self.__construct_operator(operator_class=operator_class, **kwargs)
operator.get_db_hook()
mock_get_db_hook.assert_called_once()
operator.get_hook()
mock_get_hook.assert_called_once()
def test_execute(self, operator_class, kwargs, parent_check_operator):
operator = self.__construct_operator(operator_class=operator_class, **kwargs)
with mock.patch.object(parent_check_operator, 'execute') as mock_execute:
operator.execute()
mock_execute.assert_called_once()
@mock.patch('airflow.providers.qubole.operators.qubole_check.handle_airflow_exception')
def test_execute_fail(self, mock_handle_airflow_exception, operator_class, kwargs, parent_check_operator):
operator = self.__construct_operator(operator_class=operator_class, **kwargs)
with mock.patch.object(parent_check_operator, 'execute') as mock_execute:
mock_execute.side_effect = AirflowException()
operator.execute()
mock_execute.assert_called_once()
mock_handle_airflow_exception.assert_called_once()
class TestQuboleValueCheckOperator(unittest.TestCase):
def setUp(self):
self.task_id = 'test_task'
self.conn_id = 'default_conn'
def __construct_operator(self, query, pass_value, tolerance=None, results_parser_callable=None):
dag = DAG('test_dag', start_date=datetime(2017, 1, 1))
return QuboleValueCheckOperator(
dag=dag,
task_id=self.task_id,
conn_id=self.conn_id,
query=query,
pass_value=pass_value,
results_parser_callable=results_parser_callable,
command_type='hivecmd',
tolerance=tolerance,
)
def test_pass_value_template(self):
pass_value_str = "2018-03-22"
operator = self.__construct_operator('select date from tab1;', "{{ ds }}")
result = operator.render_template(operator.pass_value, {'ds': pass_value_str})
assert operator.task_id == self.task_id
assert result == pass_value_str
@mock.patch.object(QuboleValueCheckOperator, 'get_hook')
def test_execute_pass(self, mock_get_hook):
mock_hook = mock.Mock()
mock_hook.get_first.return_value = [10]
mock_get_hook.return_value = mock_hook
query = 'select value from tab1 limit 1;'
operator = self.__construct_operator(query, 5, 1)
operator.execute(None)
mock_hook.get_first.assert_called_once_with(query)
@mock.patch.object(QuboleValueCheckOperator, 'get_hook')
def test_execute_assertion_fail(self, mock_get_hook):
mock_cmd = mock.Mock()
mock_cmd.status = 'done'
mock_cmd.id = 123
mock_cmd.is_success = mock.Mock(return_value=HiveCommand.is_success(mock_cmd.status))
mock_hook = mock.Mock()
mock_hook.get_first.return_value = [11]
mock_hook.cmd = mock_cmd
mock_get_hook.return_value = mock_hook
operator = self.__construct_operator('select value from tab1 limit 1;', 5, 1)
with pytest.raises(AirflowException, match='Qubole Command Id: ' + str(mock_cmd.id)):
operator.execute()
mock_cmd.is_success.assert_called_once_with(mock_cmd.status)
@mock.patch.object(QuboleValueCheckOperator, 'get_hook')
def test_execute_assert_query_fail(self, mock_get_hook):
mock_cmd = mock.Mock()
mock_cmd.status = 'error'
mock_cmd.id = 123
mock_cmd.is_success = mock.Mock(return_value=HiveCommand.is_success(mock_cmd.status))
mock_hook = mock.Mock()
mock_hook.get_first.return_value = [11]
mock_hook.cmd = mock_cmd
mock_get_hook.return_value = mock_hook
operator = self.__construct_operator('select value from tab1 limit 1;', 5, 1)
with pytest.raises(AirflowException) as ctx:
operator.execute()
assert 'Qubole Command Id: ' not in str(ctx.value)
mock_cmd.is_success.assert_called_once_with(mock_cmd.status)
@mock.patch.object(QuboleCheckHook, 'get_query_results')
@mock.patch.object(QuboleHook, 'execute')
def test_results_parser_callable(self, mock_execute, mock_get_query_results):
mock_execute.return_value = None
pass_value = 'pass_value'
mock_get_query_results.return_value = pass_value
results_parser_callable = mock.Mock()
results_parser_callable.return_value = [pass_value]
operator = self.__construct_operator(
'select value from tab1 limit 1;', pass_value, None, results_parser_callable
)
operator.execute()
results_parser_callable.assert_called_once_with([pass_value])
|
apache-2.0
| 7,868,135,222,770,688,000
| 36.419192
| 110
| 0.671751
| false
| 3.702649
| true
| false
| false
|
AvishaySebban/NTM-Monitoring
|
ansible/psutil-3.0.1/psutil/__init__.py
|
1
|
66350
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""psutil is a cross-platform library for retrieving information on
running processes and system utilization (CPU, memory, disks, network)
in Python.
"""
from __future__ import division
import collections
import errno
import functools
import os
import signal
import subprocess
import sys
import time
try:
import pwd
except ImportError:
pwd = None
from . import _common
from ._common import memoize
from ._compat import callable, long
from ._compat import PY3 as _PY3
from ._common import (STATUS_RUNNING, # NOQA
STATUS_SLEEPING,
STATUS_DISK_SLEEP,
STATUS_STOPPED,
STATUS_TRACING_STOP,
STATUS_ZOMBIE,
STATUS_DEAD,
STATUS_WAKING,
STATUS_LOCKED,
STATUS_IDLE, # bsd
STATUS_WAITING) # bsd
from ._common import (CONN_ESTABLISHED,
CONN_SYN_SENT,
CONN_SYN_RECV,
CONN_FIN_WAIT1,
CONN_FIN_WAIT2,
CONN_TIME_WAIT,
CONN_CLOSE,
CONN_CLOSE_WAIT,
CONN_LAST_ACK,
CONN_LISTEN,
CONN_CLOSING,
CONN_NONE)
from ._common import (NIC_DUPLEX_FULL, # NOQA
NIC_DUPLEX_HALF,
NIC_DUPLEX_UNKNOWN)
if sys.platform.startswith("linux"):
from . import _pslinux as _psplatform
from ._pslinux import (IOPRIO_CLASS_NONE, # NOQA
IOPRIO_CLASS_RT,
IOPRIO_CLASS_BE,
IOPRIO_CLASS_IDLE)
# Linux >= 2.6.36
if _psplatform.HAS_PRLIMIT:
from ._psutil_linux import (RLIM_INFINITY, # NOQA
RLIMIT_AS,
RLIMIT_CORE,
RLIMIT_CPU,
RLIMIT_DATA,
RLIMIT_FSIZE,
RLIMIT_LOCKS,
RLIMIT_MEMLOCK,
RLIMIT_NOFILE,
RLIMIT_NPROC,
RLIMIT_RSS,
RLIMIT_STACK)
# Kinda ugly but considerably faster than using hasattr() and
# setattr() against the module object (we are at import time:
# speed matters).
from . import _psutil_linux
try:
RLIMIT_MSGQUEUE = _psutil_linux.RLIMIT_MSGQUEUE
except AttributeError:
pass
try:
RLIMIT_NICE = _psutil_linux.RLIMIT_NICE
except AttributeError:
pass
try:
RLIMIT_RTPRIO = _psutil_linux.RLIMIT_RTPRIO
except AttributeError:
pass
try:
RLIMIT_RTTIME = _psutil_linux.RLIMIT_RTTIME
except AttributeError:
pass
try:
RLIMIT_SIGPENDING = _psutil_linux.RLIMIT_SIGPENDING
except AttributeError:
pass
del _psutil_linux
elif sys.platform.startswith("win32"):
from . import _pswindows as _psplatform
from ._psutil_windows import (ABOVE_NORMAL_PRIORITY_CLASS, # NOQA
BELOW_NORMAL_PRIORITY_CLASS,
HIGH_PRIORITY_CLASS,
IDLE_PRIORITY_CLASS,
NORMAL_PRIORITY_CLASS,
REALTIME_PRIORITY_CLASS)
from ._pswindows import CONN_DELETE_TCB # NOQA
elif sys.platform.startswith("darwin"):
from . import _psosx as _psplatform
elif sys.platform.startswith("freebsd"):
from . import _psbsd as _psplatform
elif sys.platform.startswith("sunos"):
from . import _pssunos as _psplatform
from ._pssunos import (CONN_IDLE, # NOQA
CONN_BOUND)
else:
raise NotImplementedError('platform %s is not supported' % sys.platform)
__all__ = [
# exceptions
"Error", "NoSuchProcess", "ZombieProcess", "AccessDenied",
"TimeoutExpired",
# constants
"version_info", "__version__",
"STATUS_RUNNING", "STATUS_IDLE", "STATUS_SLEEPING", "STATUS_DISK_SLEEP",
"STATUS_STOPPED", "STATUS_TRACING_STOP", "STATUS_ZOMBIE", "STATUS_DEAD",
"STATUS_WAKING", "STATUS_LOCKED", "STATUS_WAITING", "STATUS_LOCKED",
"CONN_ESTABLISHED", "CONN_SYN_SENT", "CONN_SYN_RECV", "CONN_FIN_WAIT1",
"CONN_FIN_WAIT2", "CONN_TIME_WAIT", "CONN_CLOSE", "CONN_CLOSE_WAIT",
"CONN_LAST_ACK", "CONN_LISTEN", "CONN_CLOSING", "CONN_NONE",
"AF_LINK",
"NIC_DUPLEX_FULL", "NIC_DUPLEX_HALF", "NIC_DUPLEX_UNKNOWN",
# classes
"Process", "Popen",
# functions
"pid_exists", "pids", "process_iter", "wait_procs", # proc
"virtual_memory", "swap_memory", # memory
"cpu_times", "cpu_percent", "cpu_times_percent", "cpu_count", # cpu
"net_io_counters", "net_connections", "net_if_addrs", # network
"net_if_stats",
"disk_io_counters", "disk_partitions", "disk_usage", # disk
"users", "boot_time", # others
]
__all__.extend(_psplatform.__extra__all__)
__author__ = "Giampaolo Rodola'"
__version__ = "3.0.1"
version_info = tuple([int(num) for num in __version__.split('.')])
AF_LINK = _psplatform.AF_LINK
_TOTAL_PHYMEM = None
_POSIX = os.name == 'posix'
_WINDOWS = os.name == 'nt'
_timer = getattr(time, 'monotonic', time.time)
# Sanity check in case the user messed up with psutil installation
# or did something weird with sys.path. In this case we might end
# up importing a python module using a C extension module which
# was compiled for a different version of psutil.
# We want to prevent that by failing sooner rather than later.
# See: https://github.com/giampaolo/psutil/issues/564
if (int(__version__.replace('.', '')) !=
getattr(_psplatform.cext, 'version', None)):
msg = "version conflict: %r C extension module was built for another " \
"version of psutil (different than %s)" % (_psplatform.cext.__file__,
__version__)
raise ImportError(msg)
# =====================================================================
# --- exceptions
# =====================================================================
class Error(Exception):
"""Base exception class. All other psutil exceptions inherit
from this one.
"""
def __init__(self, msg=""):
self.msg = msg
def __str__(self):
return self.msg
class NoSuchProcess(Error):
"""Exception raised when a process with a certain PID doesn't
or no longer exists.
"""
def __init__(self, pid, name=None, msg=None):
Error.__init__(self, msg)
self.pid = pid
self.name = name
self.msg = msg
if msg is None:
if name:
details = "(pid=%s, name=%s)" % (self.pid, repr(self.name))
else:
details = "(pid=%s)" % self.pid
self.msg = "process no longer exists " + details
class ZombieProcess(NoSuchProcess):
"""Exception raised when querying a zombie process. This is
raised on OSX, BSD and Solaris only, and not always: depending
on the query the OS may be able to succeed anyway.
On Linux all zombie processes are querable (hence this is never
raised). Windows doesn't have zombie processes.
"""
def __init__(self, pid, name=None, ppid=None, msg=None):
Error.__init__(self, msg)
self.pid = pid
self.ppid = ppid
self.name = name
self.msg = msg
if msg is None:
if name and ppid:
details = "(pid=%s, name=%s, ppid=%s)" % (
self.pid, repr(self.name), self.ppid)
elif name:
details = "(pid=%s, name=%s)" % (self.pid, repr(self.name))
else:
details = "(pid=%s)" % self.pid
self.msg = "process still exists but it's a zombie " + details
class AccessDenied(Error):
"""Exception raised when permission to perform an action is denied."""
def __init__(self, pid=None, name=None, msg=None):
Error.__init__(self, msg)
self.pid = pid
self.name = name
self.msg = msg
if msg is None:
if (pid is not None) and (name is not None):
self.msg = "(pid=%s, name=%s)" % (pid, repr(name))
elif (pid is not None):
self.msg = "(pid=%s)" % self.pid
else:
self.msg = ""
class TimeoutExpired(Error):
"""Raised on Process.wait(timeout) if timeout expires and process
is still alive.
"""
def __init__(self, seconds, pid=None, name=None):
Error.__init__(self, "timeout after %s seconds" % seconds)
self.seconds = seconds
self.pid = pid
self.name = name
if (pid is not None) and (name is not None):
self.msg += " (pid=%s, name=%s)" % (pid, repr(name))
elif (pid is not None):
self.msg += " (pid=%s)" % self.pid
# push exception classes into platform specific module namespace
_psplatform.NoSuchProcess = NoSuchProcess
_psplatform.ZombieProcess = ZombieProcess
_psplatform.AccessDenied = AccessDenied
_psplatform.TimeoutExpired = TimeoutExpired
# =====================================================================
# --- Process class
# =====================================================================
def _assert_pid_not_reused(fun):
"""Decorator which raises NoSuchProcess in case a process is no
longer running or its PID has been reused.
"""
@functools.wraps(fun)
def wrapper(self, *args, **kwargs):
if not self.is_running():
raise NoSuchProcess(self.pid, self._name)
return fun(self, *args, **kwargs)
return wrapper
class Process(object):
"""Represents an OS process with the given PID.
If PID is omitted current process PID (os.getpid()) is used.
Raise NoSuchProcess if PID does not exist.
Note that most of the methods of this class do not make sure
the PID of the process being queried has been reused over time.
That means you might end up retrieving an information referring
to another process in case the original one this instance
refers to is gone in the meantime.
The only exceptions for which process identity is pre-emptively
checked and guaranteed are:
- parent()
- children()
- nice() (set)
- ionice() (set)
- rlimit() (set)
- cpu_affinity (set)
- suspend()
- resume()
- send_signal()
- terminate()
- kill()
To prevent this problem for all other methods you can:
- use is_running() before querying the process
- if you're continuously iterating over a set of Process
instances use process_iter() which pre-emptively checks
process identity for every yielded instance
"""
def __init__(self, pid=None):
self._init(pid)
def _init(self, pid, _ignore_nsp=False):
if pid is None:
pid = os.getpid()
else:
if not _PY3 and not isinstance(pid, (int, long)):
raise TypeError('pid must be an integer (got %r)' % pid)
if pid < 0:
raise ValueError('pid must be a positive integer (got %s)'
% pid)
self._pid = pid
self._name = None
self._exe = None
self._create_time = None
self._gone = False
self._hash = None
# used for caching on Windows only (on POSIX ppid may change)
self._ppid = None
# platform-specific modules define an _psplatform.Process
# implementation class
self._proc = _psplatform.Process(pid)
self._last_sys_cpu_times = None
self._last_proc_cpu_times = None
# cache creation time for later use in is_running() method
try:
self.create_time()
except AccessDenied:
# we should never get here as AFAIK we're able to get
# process creation time on all platforms even as a
# limited user
pass
except ZombieProcess:
# Let's consider a zombie process as legitimate as
# tehcnically it's still alive (it can be queried,
# although not always, and it's returned by pids()).
pass
except NoSuchProcess:
if not _ignore_nsp:
msg = 'no process found with pid %s' % pid
raise NoSuchProcess(pid, None, msg)
else:
self._gone = True
# This pair is supposed to indentify a Process instance
# univocally over time (the PID alone is not enough as
# it might refer to a process whose PID has been reused).
# This will be used later in __eq__() and is_running().
self._ident = (self.pid, self._create_time)
def __str__(self):
try:
pid = self.pid
name = repr(self.name())
except ZombieProcess:
details = "(pid=%s (zombie))" % self.pid
except NoSuchProcess:
details = "(pid=%s (terminated))" % self.pid
except AccessDenied:
details = "(pid=%s)" % (self.pid)
else:
details = "(pid=%s, name=%s)" % (pid, name)
return "%s.%s%s" % (self.__class__.__module__,
self.__class__.__name__, details)
def __repr__(self):
return "<%s at %s>" % (self.__str__(), id(self))
def __eq__(self, other):
# Test for equality with another Process object based
# on PID and creation time.
if not isinstance(other, Process):
return NotImplemented
return self._ident == other._ident
def __ne__(self, other):
return not self == other
def __hash__(self):
if self._hash is None:
self._hash = hash(self._ident)
return self._hash
# --- utility methods
def as_dict(self, attrs=None, ad_value=None):
"""Utility method returning process information as a
hashable dictionary.
If 'attrs' is specified it must be a list of strings
reflecting available Process class' attribute names
(e.g. ['cpu_times', 'name']) else all public (read
only) attributes are assumed.
'ad_value' is the value which gets assigned in case
AccessDenied or ZombieProcess exception is raised when
retrieving that particular process information.
"""
excluded_names = set(
['send_signal', 'suspend', 'resume', 'terminate', 'kill', 'wait',
'is_running', 'as_dict', 'parent', 'children', 'rlimit'])
retdict = dict()
ls = set(attrs or [x for x in dir(self)])
for name in ls:
if name.startswith('_'):
continue
if name in excluded_names:
continue
try:
attr = getattr(self, name)
if callable(attr):
ret = attr()
else:
ret = attr
except (AccessDenied, ZombieProcess):
ret = ad_value
except NotImplementedError:
# in case of not implemented functionality (may happen
# on old or exotic systems) we want to crash only if
# the user explicitly asked for that particular attr
if attrs:
raise
continue
retdict[name] = ret
return retdict
def parent(self):
"""Return the parent process as a Process object pre-emptively
checking whether PID has been reused.
If no parent is known return None.
"""
ppid = self.ppid()
if ppid is not None:
ctime = self.create_time()
try:
parent = Process(ppid)
if parent.create_time() <= ctime:
return parent
# ...else ppid has been reused by another process
except NoSuchProcess:
pass
def is_running(self):
"""Return whether this process is running.
It also checks if PID has been reused by another process in
which case return False.
"""
if self._gone:
return False
try:
# Checking if PID is alive is not enough as the PID might
# have been reused by another process: we also want to
# check process identity.
# Process identity / uniqueness over time is greanted by
# (PID + creation time) and that is verified in __eq__.
return self == Process(self.pid)
except NoSuchProcess:
self._gone = True
return False
# --- actual API
@property
def pid(self):
"""The process PID."""
return self._pid
def ppid(self):
"""The process parent PID.
On Windows the return value is cached after first call.
"""
# On POSIX we don't want to cache the ppid as it may unexpectedly
# change to 1 (init) in case this process turns into a zombie:
# https://github.com/giampaolo/psutil/issues/321
# http://stackoverflow.com/questions/356722/
# XXX should we check creation time here rather than in
# Process.parent()?
if _POSIX:
return self._proc.ppid()
else:
self._ppid = self._ppid or self._proc.ppid()
return self._ppid
def name(self):
"""The process name. The return value is cached after first call."""
if self._name is None:
name = self._proc.name()
if _POSIX and len(name) >= 15:
# On UNIX the name gets truncated to the first 15 characters.
# If it matches the first part of the cmdline we return that
# one instead because it's usually more explicative.
# Examples are "gnome-keyring-d" vs. "gnome-keyring-daemon".
try:
cmdline = self.cmdline()
except AccessDenied:
pass
else:
if cmdline:
extended_name = os.path.basename(cmdline[0])
if extended_name.startswith(name):
name = extended_name
self._proc._name = name
self._name = name
return self._name
def exe(self):
"""The process executable as an absolute path.
May also be an empty string.
The return value is cached after first call.
"""
def guess_it(fallback):
# try to guess exe from cmdline[0] in absence of a native
# exe representation
cmdline = self.cmdline()
if cmdline and hasattr(os, 'access') and hasattr(os, 'X_OK'):
exe = cmdline[0] # the possible exe
# Attempt to guess only in case of an absolute path.
# It is not safe otherwise as the process might have
# changed cwd.
if (os.path.isabs(exe) and
os.path.isfile(exe) and
os.access(exe, os.X_OK)):
return exe
if isinstance(fallback, AccessDenied):
raise fallback
return fallback
if self._exe is None:
try:
exe = self._proc.exe()
except AccessDenied as err:
return guess_it(fallback=err)
else:
if not exe:
# underlying implementation can legitimately return an
# empty string; if that's the case we don't want to
# raise AD while guessing from the cmdline
try:
exe = guess_it(fallback=exe)
except AccessDenied:
pass
self._exe = exe
return self._exe
def cmdline(self):
"""The command line this process has been called with."""
return self._proc.cmdline()
def status(self):
"""The process current status as a STATUS_* constant."""
try:
return self._proc.status()
except ZombieProcess:
return STATUS_ZOMBIE
def username(self):
"""The name of the user that owns the process.
On UNIX this is calculated by using *real* process uid.
"""
if _POSIX:
if pwd is None:
# might happen if python was installed from sources
raise ImportError(
"requires pwd module shipped with standard python")
real_uid = self.uids().real
try:
return pwd.getpwuid(real_uid).pw_name
except KeyError:
# the uid can't be resolved by the system
return str(real_uid)
else:
return self._proc.username()
def create_time(self):
"""The process creation time as a floating point number
expressed in seconds since the epoch, in UTC.
The return value is cached after first call.
"""
if self._create_time is None:
self._create_time = self._proc.create_time()
return self._create_time
def cwd(self):
"""Process current working directory as an absolute path."""
return self._proc.cwd()
def nice(self, value=None):
"""Get or set process niceness (priority)."""
if value is None:
return self._proc.nice_get()
else:
if not self.is_running():
raise NoSuchProcess(self.pid, self._name)
self._proc.nice_set(value)
if _POSIX:
def uids(self):
"""Return process UIDs as a (real, effective, saved)
namedtuple.
"""
return self._proc.uids()
def gids(self):
"""Return process GIDs as a (real, effective, saved)
namedtuple.
"""
return self._proc.gids()
def terminal(self):
"""The terminal associated with this process, if any,
else None.
"""
return self._proc.terminal()
def num_fds(self):
"""Return the number of file descriptors opened by this
process (POSIX only).
"""
return self._proc.num_fds()
# Linux, BSD and Windows only
if hasattr(_psplatform.Process, "io_counters"):
def io_counters(self):
"""Return process I/O statistics as a
(read_count, write_count, read_bytes, write_bytes)
namedtuple.
Those are the number of read/write calls performed and the
amount of bytes read and written by the process.
"""
return self._proc.io_counters()
# Linux and Windows >= Vista only
if hasattr(_psplatform.Process, "ionice_get"):
def ionice(self, ioclass=None, value=None):
"""Get or set process I/O niceness (priority).
On Linux 'ioclass' is one of the IOPRIO_CLASS_* constants.
'value' is a number which goes from 0 to 7. The higher the
value, the lower the I/O priority of the process.
On Windows only 'ioclass' is used and it can be set to 2
(normal), 1 (low) or 0 (very low).
Available on Linux and Windows > Vista only.
"""
if ioclass is None:
if value is not None:
raise ValueError("'ioclass' must be specified")
return self._proc.ionice_get()
else:
return self._proc.ionice_set(ioclass, value)
# Linux only
if hasattr(_psplatform.Process, "rlimit"):
def rlimit(self, resource, limits=None):
"""Get or set process resource limits as a (soft, hard)
tuple.
'resource' is one of the RLIMIT_* constants.
'limits' is supposed to be a (soft, hard) tuple.
See "man prlimit" for further info.
Available on Linux only.
"""
if limits is None:
return self._proc.rlimit(resource)
else:
return self._proc.rlimit(resource, limits)
# Windows, Linux and BSD only
if hasattr(_psplatform.Process, "cpu_affinity_get"):
def cpu_affinity(self, cpus=None):
"""Get or set process CPU affinity.
If specified 'cpus' must be a list of CPUs for which you
want to set the affinity (e.g. [0, 1]).
(Windows, Linux and BSD only).
"""
# Automatically remove duplicates both on get and
# set (for get it's not really necessary, it's
# just for extra safety).
if cpus is None:
return list(set(self._proc.cpu_affinity_get()))
else:
self._proc.cpu_affinity_set(list(set(cpus)))
if _WINDOWS:
def num_handles(self):
"""Return the number of handles opened by this process
(Windows only).
"""
return self._proc.num_handles()
def num_ctx_switches(self):
"""Return the number of voluntary and involuntary context
switches performed by this process.
"""
return self._proc.num_ctx_switches()
def num_threads(self):
"""Return the number of threads used by this process."""
return self._proc.num_threads()
def threads(self):
"""Return threads opened by process as a list of
(id, user_time, system_time) namedtuples representing
thread id and thread CPU times (user/system).
"""
return self._proc.threads()
@_assert_pid_not_reused
def children(self, recursive=False):
"""Return the children of this process as a list of Process
instances, pre-emptively checking whether PID has been reused.
If recursive is True return all the parent descendants.
Example (A == this process):
A ─┐
│
├─ B (child) ─┐
│ └─ X (grandchild) ─┐
│ └─ Y (great grandchild)
├─ C (child)
└─ D (child)
>>> import psutil
>>> p = psutil.Process()
>>> p.children()
B, C, D
>>> p.children(recursive=True)
B, X, Y, C, D
Note that in the example above if process X disappears
process Y won't be listed as the reference to process A
is lost.
"""
if hasattr(_psplatform, 'ppid_map'):
# Windows only: obtain a {pid:ppid, ...} dict for all running
# processes in one shot (faster).
ppid_map = _psplatform.ppid_map()
else:
ppid_map = None
ret = []
if not recursive:
if ppid_map is None:
# 'slow' version, common to all platforms except Windows
for p in process_iter():
try:
if p.ppid() == self.pid:
# if child happens to be older than its parent
# (self) it means child's PID has been reused
if self.create_time() <= p.create_time():
ret.append(p)
except (NoSuchProcess, ZombieProcess):
pass
else:
# Windows only (faster)
for pid, ppid in ppid_map.items():
if ppid == self.pid:
try:
child = Process(pid)
# if child happens to be older than its parent
# (self) it means child's PID has been reused
if self.create_time() <= child.create_time():
ret.append(child)
except (NoSuchProcess, ZombieProcess):
pass
else:
# construct a dict where 'values' are all the processes
# having 'key' as their parent
table = collections.defaultdict(list)
if ppid_map is None:
for p in process_iter():
try:
table[p.ppid()].append(p)
except (NoSuchProcess, ZombieProcess):
pass
else:
for pid, ppid in ppid_map.items():
try:
p = Process(pid)
table[ppid].append(p)
except (NoSuchProcess, ZombieProcess):
pass
# At this point we have a mapping table where table[self.pid]
# are the current process' children.
# Below, we look for all descendants recursively, similarly
# to a recursive function call.
checkpids = [self.pid]
for pid in checkpids:
for child in table[pid]:
try:
# if child happens to be older than its parent
# (self) it means child's PID has been reused
intime = self.create_time() <= child.create_time()
except (NoSuchProcess, ZombieProcess):
pass
else:
if intime:
ret.append(child)
if child.pid not in checkpids:
checkpids.append(child.pid)
return ret
def cpu_percent(self, interval=None):
"""Return a float representing the current process CPU
utilization as a percentage.
When interval is 0.0 or None (default) compares process times
to system CPU times elapsed since last call, returning
immediately (non-blocking). That means that the first time
this is called it will return a meaningful 0.0 value.
When interval is > 0.0 compares process times to system CPU
times elapsed before and after the interval (blocking).
In this case is recommended for accuracy that this function
be called with at least 0.1 seconds between calls.
Examples:
>>> import psutil
>>> p = psutil.Process(os.getpid())
>>> # blocking
>>> p.cpu_percent(interval=1)
2.0
>>> # non-blocking (percentage since last call)
>>> p.cpu_percent(interval=None)
2.9
>>>
"""
blocking = interval is not None and interval > 0.0
num_cpus = cpu_count()
if _POSIX:
def timer():
return _timer() * num_cpus
else:
def timer():
return sum(cpu_times())
if blocking:
st1 = timer()
pt1 = self._proc.cpu_times()
time.sleep(interval)
st2 = timer()
pt2 = self._proc.cpu_times()
else:
st1 = self._last_sys_cpu_times
pt1 = self._last_proc_cpu_times
st2 = timer()
pt2 = self._proc.cpu_times()
if st1 is None or pt1 is None:
self._last_sys_cpu_times = st2
self._last_proc_cpu_times = pt2
return 0.0
delta_proc = (pt2.user - pt1.user) + (pt2.system - pt1.system)
delta_time = st2 - st1
# reset values for next call in case of interval == None
self._last_sys_cpu_times = st2
self._last_proc_cpu_times = pt2
try:
# The utilization split between all CPUs.
# Note: a percentage > 100 is legitimate as it can result
# from a process with multiple threads running on different
# CPU cores, see:
# http://stackoverflow.com/questions/1032357
# https://github.com/giampaolo/psutil/issues/474
overall_percent = ((delta_proc / delta_time) * 100) * num_cpus
except ZeroDivisionError:
# interval was too low
return 0.0
else:
return round(overall_percent, 1)
def cpu_times(self):
"""Return a (user, system) namedtuple representing the
accumulated process time, in seconds.
This is the same as os.times() but per-process.
"""
return self._proc.cpu_times()
def memory_info(self):
"""Return a tuple representing RSS (Resident Set Size) and VMS
(Virtual Memory Size) in bytes.
On UNIX RSS and VMS are the same values shown by 'ps'.
On Windows RSS and VMS refer to "Mem Usage" and "VM Size"
columns of taskmgr.exe.
"""
return self._proc.memory_info()
def memory_info_ex(self):
"""Return a namedtuple with variable fields depending on the
platform representing extended memory information about
this process. All numbers are expressed in bytes.
"""
return self._proc.memory_info_ex()
def memory_percent(self):
"""Compare physical system memory to process resident memory
(RSS) and calculate process memory utilization as a percentage.
"""
rss = self._proc.memory_info()[0]
# use cached value if available
total_phymem = _TOTAL_PHYMEM or virtual_memory().total
try:
return (rss / float(total_phymem)) * 100
except ZeroDivisionError:
return 0.0
def memory_maps(self, grouped=True):
"""Return process' mapped memory regions as a list of namedtuples
whose fields are variable depending on the platform.
If 'grouped' is True the mapped regions with the same 'path'
are grouped together and the different memory fields are summed.
If 'grouped' is False every mapped region is shown as a single
entity and the namedtuple will also include the mapped region's
address space ('addr') and permission set ('perms').
"""
it = self._proc.memory_maps()
if grouped:
d = {}
for tupl in it:
path = tupl[2]
nums = tupl[3:]
try:
d[path] = map(lambda x, y: x + y, d[path], nums)
except KeyError:
d[path] = nums
nt = _psplatform.pmmap_grouped
return [nt(path, *d[path]) for path in d] # NOQA
else:
nt = _psplatform.pmmap_ext
return [nt(*x) for x in it]
def open_files(self):
"""Return files opened by process as a list of
(path, fd) namedtuples including the absolute file name
and file descriptor number.
"""
return self._proc.open_files()
def connections(self, kind='inet'):
"""Return connections opened by process as a list of
(fd, family, type, laddr, raddr, status) namedtuples.
The 'kind' parameter filters for connections that match the
following criteria:
Kind Value Connections using
inet IPv4 and IPv6
inet4 IPv4
inet6 IPv6
tcp TCP
tcp4 TCP over IPv4
tcp6 TCP over IPv6
udp UDP
udp4 UDP over IPv4
udp6 UDP over IPv6
unix UNIX socket (both UDP and TCP protocols)
all the sum of all the possible families and protocols
"""
return self._proc.connections(kind)
if _POSIX:
def _send_signal(self, sig):
# XXX: according to "man 2 kill" PID 0 has a special
# meaning as it refers to <<every process in the process
# group of the calling process>>, so should we prevent
# it here?
try:
os.kill(self.pid, sig)
except OSError as err:
if err.errno == errno.ESRCH:
self._gone = True
raise NoSuchProcess(self.pid, self._name)
if err.errno == errno.EPERM:
raise AccessDenied(self.pid, self._name)
raise
@_assert_pid_not_reused
def send_signal(self, sig):
"""Send a signal to process pre-emptively checking whether
PID has been reused (see signal module constants) .
On Windows only SIGTERM is valid and is treated as an alias
for kill().
"""
if _POSIX:
self._send_signal(sig)
else:
if sig == signal.SIGTERM:
self._proc.kill()
else:
raise ValueError("only SIGTERM is supported on Windows")
@_assert_pid_not_reused
def suspend(self):
"""Suspend process execution with SIGSTOP pre-emptively checking
whether PID has been reused.
On Windows this has the effect ot suspending all process threads.
"""
if _POSIX:
self._send_signal(signal.SIGSTOP)
else:
self._proc.suspend()
@_assert_pid_not_reused
def resume(self):
"""Resume process execution with SIGCONT pre-emptively checking
whether PID has been reused.
On Windows this has the effect of resuming all process threads.
"""
if _POSIX:
self._send_signal(signal.SIGCONT)
else:
self._proc.resume()
@_assert_pid_not_reused
def terminate(self):
"""Terminate the process with SIGTERM pre-emptively checking
whether PID has been reused.
On Windows this is an alias for kill().
"""
if _POSIX:
self._send_signal(signal.SIGTERM)
else:
self._proc.kill()
@_assert_pid_not_reused
def kill(self):
"""Kill the current process with SIGKILL pre-emptively checking
whether PID has been reused.
"""
if _POSIX:
self._send_signal(signal.SIGKILL)
else:
self._proc.kill()
def wait(self, timeout=None):
"""Wait for process to terminate and, if process is a children
of os.getpid(), also return its exit code, else None.
If the process is already terminated immediately return None
instead of raising NoSuchProcess.
If timeout (in seconds) is specified and process is still alive
raise TimeoutExpired.
To wait for multiple Process(es) use psutil.wait_procs().
"""
if timeout is not None and not timeout >= 0:
raise ValueError("timeout must be a positive integer")
return self._proc.wait(timeout)
# =====================================================================
# --- Popen class
# =====================================================================
class Popen(Process):
"""A more convenient interface to stdlib subprocess module.
It starts a sub process and deals with it exactly as when using
subprocess.Popen class but in addition also provides all the
properties and methods of psutil.Process class as a unified
interface:
>>> import psutil
>>> from subprocess import PIPE
>>> p = psutil.Popen(["python", "-c", "print 'hi'"], stdout=PIPE)
>>> p.name()
'python'
>>> p.uids()
user(real=1000, effective=1000, saved=1000)
>>> p.username()
'giampaolo'
>>> p.communicate()
('hi\n', None)
>>> p.terminate()
>>> p.wait(timeout=2)
0
>>>
For method names common to both classes such as kill(), terminate()
and wait(), psutil.Process implementation takes precedence.
Unlike subprocess.Popen this class pre-emptively checks wheter PID
has been reused on send_signal(), terminate() and kill() so that
you don't accidentally terminate another process, fixing
http://bugs.python.org/issue6973.
For a complete documentation refer to:
http://docs.python.org/library/subprocess.html
"""
def __init__(self, *args, **kwargs):
# Explicitly avoid to raise NoSuchProcess in case the process
# spawned by subprocess.Popen terminates too quickly, see:
# https://github.com/giampaolo/psutil/issues/193
self.__subproc = subprocess.Popen(*args, **kwargs)
self._init(self.__subproc.pid, _ignore_nsp=True)
def __dir__(self):
return sorted(set(dir(Popen) + dir(subprocess.Popen)))
def __getattribute__(self, name):
try:
return object.__getattribute__(self, name)
except AttributeError:
try:
return object.__getattribute__(self.__subproc, name)
except AttributeError:
raise AttributeError("%s instance has no attribute '%s'"
% (self.__class__.__name__, name))
def wait(self, timeout=None):
if self.__subproc.returncode is not None:
return self.__subproc.returncode
ret = super(Popen, self).wait(timeout)
self.__subproc.returncode = ret
return ret
# =====================================================================
# --- system processes related functions
# =====================================================================
def pids():
"""Return a list of current running PIDs."""
return _psplatform.pids()
def pid_exists(pid):
"""Return True if given PID exists in the current process list.
This is faster than doing "pid in psutil.pids()" and
should be preferred.
"""
if pid < 0:
return False
elif pid == 0 and _POSIX:
# On POSIX we use os.kill() to determine PID existence.
# According to "man 2 kill" PID 0 has a special meaning
# though: it refers to <<every process in the process
# group of the calling process>> and that is not we want
# to do here.
return pid in pids()
else:
return _psplatform.pid_exists(pid)
_pmap = {}
def process_iter():
"""Return a generator yielding a Process instance for all
running processes.
Every new Process instance is only created once and then cached
into an internal table which is updated every time this is used.
Cached Process instances are checked for identity so that you're
safe in case a PID has been reused by another process, in which
case the cached instance is updated.
The sorting order in which processes are yielded is based on
their PIDs.
"""
def add(pid):
proc = Process(pid)
_pmap[proc.pid] = proc
return proc
def remove(pid):
_pmap.pop(pid, None)
a = set(pids())
b = set(_pmap.keys())
new_pids = a - b
gone_pids = b - a
for pid in gone_pids:
remove(pid)
for pid, proc in sorted(list(_pmap.items()) +
list(dict.fromkeys(new_pids).items())):
try:
if proc is None: # new process
yield add(pid)
else:
# use is_running() to check whether PID has been reused by
# another process in which case yield a new Process instance
if proc.is_running():
yield proc
else:
yield add(pid)
except NoSuchProcess:
remove(pid)
except AccessDenied:
# Process creation time can't be determined hence there's
# no way to tell whether the pid of the cached process
# has been reused. Just return the cached version.
yield proc
def wait_procs(procs, timeout=None, callback=None):
"""Convenience function which waits for a list of processes to
terminate.
Return a (gone, alive) tuple indicating which processes
are gone and which ones are still alive.
The gone ones will have a new 'returncode' attribute indicating
process exit status (may be None).
'callback' is a function which gets called every time a process
terminates (a Process instance is passed as callback argument).
Function will return as soon as all processes terminate or when
timeout occurs.
Typical use case is:
- send SIGTERM to a list of processes
- give them some time to terminate
- send SIGKILL to those ones which are still alive
Example:
>>> def on_terminate(proc):
... print("process {} terminated".format(proc))
...
>>> for p in procs:
... p.terminate()
...
>>> gone, alive = wait_procs(procs, timeout=3, callback=on_terminate)
>>> for p in alive:
... p.kill()
"""
def check_gone(proc, timeout):
try:
returncode = proc.wait(timeout=timeout)
except TimeoutExpired:
pass
else:
if returncode is not None or not proc.is_running():
proc.returncode = returncode
gone.add(proc)
if callback is not None:
callback(proc)
if timeout is not None and not timeout >= 0:
msg = "timeout must be a positive integer, got %s" % timeout
raise ValueError(msg)
gone = set()
alive = set(procs)
if callback is not None and not callable(callback):
raise TypeError("callback %r is not a callable" % callable)
if timeout is not None:
deadline = _timer() + timeout
while alive:
if timeout is not None and timeout <= 0:
break
for proc in alive:
# Make sure that every complete iteration (all processes)
# will last max 1 sec.
# We do this because we don't want to wait too long on a
# single process: in case it terminates too late other
# processes may disappear in the meantime and their PID
# reused.
max_timeout = 1.0 / len(alive)
if timeout is not None:
timeout = min((deadline - _timer()), max_timeout)
if timeout <= 0:
break
check_gone(proc, timeout)
else:
check_gone(proc, max_timeout)
alive = alive - gone
if alive:
# Last attempt over processes survived so far.
# timeout == 0 won't make this function wait any further.
for proc in alive:
check_gone(proc, 0)
alive = alive - gone
return (list(gone), list(alive))
# =====================================================================
# --- CPU related functions
# =====================================================================
@memoize
def cpu_count(logical=True):
"""Return the number of logical CPUs in the system (same as
os.cpu_count() in Python 3.4).
If logical is False return the number of physical cores only
(e.g. hyper thread CPUs are excluded).
Return None if undetermined.
The return value is cached after first call.
If desired cache can be cleared like this:
>>> psutil.cpu_count.cache_clear()
"""
if logical:
return _psplatform.cpu_count_logical()
else:
return _psplatform.cpu_count_physical()
def cpu_times(percpu=False):
"""Return system-wide CPU times as a namedtuple.
Every CPU time represents the seconds the CPU has spent in the given mode.
The namedtuple's fields availability varies depending on the platform:
- user
- system
- idle
- nice (UNIX)
- iowait (Linux)
- irq (Linux, FreeBSD)
- softirq (Linux)
- steal (Linux >= 2.6.11)
- guest (Linux >= 2.6.24)
- guest_nice (Linux >= 3.2.0)
When percpu is True return a list of namedtuples for each CPU.
First element of the list refers to first CPU, second element
to second CPU and so on.
The order of the list is consistent across calls.
"""
if not percpu:
return _psplatform.cpu_times()
else:
return _psplatform.per_cpu_times()
_last_cpu_times = cpu_times()
_last_per_cpu_times = cpu_times(percpu=True)
def cpu_percent(interval=None, percpu=False):
"""Return a float representing the current system-wide CPU
utilization as a percentage.
When interval is > 0.0 compares system CPU times elapsed before
and after the interval (blocking).
When interval is 0.0 or None compares system CPU times elapsed
since last call or module import, returning immediately (non
blocking). That means the first time this is called it will
return a meaningless 0.0 value which you should ignore.
In this case is recommended for accuracy that this function be
called with at least 0.1 seconds between calls.
When percpu is True returns a list of floats representing the
utilization as a percentage for each CPU.
First element of the list refers to first CPU, second element
to second CPU and so on.
The order of the list is consistent across calls.
Examples:
>>> # blocking, system-wide
>>> psutil.cpu_percent(interval=1)
2.0
>>>
>>> # blocking, per-cpu
>>> psutil.cpu_percent(interval=1, percpu=True)
[2.0, 1.0]
>>>
>>> # non-blocking (percentage since last call)
>>> psutil.cpu_percent(interval=None)
2.9
>>>
"""
global _last_cpu_times
global _last_per_cpu_times
blocking = interval is not None and interval > 0.0
def calculate(t1, t2):
t1_all = sum(t1)
t1_busy = t1_all - t1.idle
t2_all = sum(t2)
t2_busy = t2_all - t2.idle
# this usually indicates a float precision issue
if t2_busy <= t1_busy:
return 0.0
busy_delta = t2_busy - t1_busy
all_delta = t2_all - t1_all
busy_perc = (busy_delta / all_delta) * 100
return round(busy_perc, 1)
# system-wide usage
if not percpu:
if blocking:
t1 = cpu_times()
time.sleep(interval)
else:
t1 = _last_cpu_times
_last_cpu_times = cpu_times()
return calculate(t1, _last_cpu_times)
# per-cpu usage
else:
ret = []
if blocking:
tot1 = cpu_times(percpu=True)
time.sleep(interval)
else:
tot1 = _last_per_cpu_times
_last_per_cpu_times = cpu_times(percpu=True)
for t1, t2 in zip(tot1, _last_per_cpu_times):
ret.append(calculate(t1, t2))
return ret
# Use separate global vars for cpu_times_percent() so that it's
# independent from cpu_percent() and they can both be used within
# the same program.
_last_cpu_times_2 = _last_cpu_times
_last_per_cpu_times_2 = _last_per_cpu_times
def cpu_times_percent(interval=None, percpu=False):
"""Same as cpu_percent() but provides utilization percentages
for each specific CPU time as is returned by cpu_times().
For instance, on Linux we'll get:
>>> cpu_times_percent()
cpupercent(user=4.8, nice=0.0, system=4.8, idle=90.5, iowait=0.0,
irq=0.0, softirq=0.0, steal=0.0, guest=0.0, guest_nice=0.0)
>>>
interval and percpu arguments have the same meaning as in
cpu_percent().
"""
global _last_cpu_times_2
global _last_per_cpu_times_2
blocking = interval is not None and interval > 0.0
def calculate(t1, t2):
nums = []
all_delta = sum(t2) - sum(t1)
for field in t1._fields:
field_delta = getattr(t2, field) - getattr(t1, field)
try:
field_perc = (100 * field_delta) / all_delta
except ZeroDivisionError:
field_perc = 0.0
field_perc = round(field_perc, 1)
if _WINDOWS:
# XXX
# Work around:
# https://github.com/giampaolo/psutil/issues/392
# CPU times are always supposed to increase over time
# or at least remain the same and that's because time
# cannot go backwards.
# Surprisingly sometimes this might not be the case on
# Windows where 'system' CPU time can be smaller
# compared to the previous call, resulting in corrupted
# percentages (< 0 or > 100).
# I really don't know what to do about that except
# forcing the value to 0 or 100.
if field_perc > 100.0:
field_perc = 100.0
elif field_perc < 0.0:
field_perc = 0.0
nums.append(field_perc)
return _psplatform.scputimes(*nums)
# system-wide usage
if not percpu:
if blocking:
t1 = cpu_times()
time.sleep(interval)
else:
t1 = _last_cpu_times_2
_last_cpu_times_2 = cpu_times()
return calculate(t1, _last_cpu_times_2)
# per-cpu usage
else:
ret = []
if blocking:
tot1 = cpu_times(percpu=True)
time.sleep(interval)
else:
tot1 = _last_per_cpu_times_2
_last_per_cpu_times_2 = cpu_times(percpu=True)
for t1, t2 in zip(tot1, _last_per_cpu_times_2):
ret.append(calculate(t1, t2))
return ret
# =====================================================================
# --- system memory related functions
# =====================================================================
def virtual_memory():
"""Return statistics about system memory usage as a namedtuple
including the following fields, expressed in bytes:
- total:
total physical memory available.
- available:
the actual amount of available memory that can be given
instantly to processes that request more memory in bytes; this
is calculated by summing different memory values depending on
the platform (e.g. free + buffers + cached on Linux) and it is
supposed to be used to monitor actual memory usage in a cross
platform fashion.
- percent:
the percentage usage calculated as (total - available) / total * 100
- used:
memory used, calculated differently depending on the platform and
designed for informational purposes only:
OSX: active + inactive + wired
BSD: active + wired + cached
LINUX: total - free
- free:
memory not being used at all (zeroed) that is readily available;
note that this doesn't reflect the actual memory available
(use 'available' instead)
Platform-specific fields:
- active (UNIX):
memory currently in use or very recently used, and so it is in RAM.
- inactive (UNIX):
memory that is marked as not used.
- buffers (BSD, Linux):
cache for things like file system metadata.
- cached (BSD, OSX):
cache for various things.
- wired (OSX, BSD):
memory that is marked to always stay in RAM. It is never moved to disk.
- shared (BSD):
memory that may be simultaneously accessed by multiple processes.
The sum of 'used' and 'available' does not necessarily equal total.
On Windows 'available' and 'free' are the same.
"""
global _TOTAL_PHYMEM
ret = _psplatform.virtual_memory()
# cached for later use in Process.memory_percent()
_TOTAL_PHYMEM = ret.total
return ret
def swap_memory():
"""Return system swap memory statistics as a namedtuple including
the following fields:
- total: total swap memory in bytes
- used: used swap memory in bytes
- free: free swap memory in bytes
- percent: the percentage usage
- sin: no. of bytes the system has swapped in from disk (cumulative)
- sout: no. of bytes the system has swapped out from disk (cumulative)
'sin' and 'sout' on Windows are meaningless and always set to 0.
"""
return _psplatform.swap_memory()
# =====================================================================
# --- disks/paritions related functions
# =====================================================================
def disk_usage(path):
"""Return disk usage statistics about the given path as a namedtuple
including total, used and free space expressed in bytes plus the
percentage usage.
"""
return _psplatform.disk_usage(path)
def disk_partitions(all=False):
"""Return mounted partitions as a list of
(device, mountpoint, fstype, opts) namedtuple.
'opts' field is a raw string separated by commas indicating mount
options which may vary depending on the platform.
If "all" parameter is False return physical devices only and ignore
all others.
"""
return _psplatform.disk_partitions(all)
def disk_io_counters(perdisk=False):
"""Return system disk I/O statistics as a namedtuple including
the following fields:
- read_count: number of reads
- write_count: number of writes
- read_bytes: number of bytes read
- write_bytes: number of bytes written
- read_time: time spent reading from disk (in milliseconds)
- write_time: time spent writing to disk (in milliseconds)
If perdisk is True return the same information for every
physical disk installed on the system as a dictionary
with partition names as the keys and the namedtuple
described above as the values.
On recent Windows versions 'diskperf -y' command may need to be
executed first otherwise this function won't find any disk.
"""
rawdict = _psplatform.disk_io_counters()
if not rawdict:
raise RuntimeError("couldn't find any physical disk")
if perdisk:
for disk, fields in rawdict.items():
rawdict[disk] = _common.sdiskio(*fields)
return rawdict
else:
return _common.sdiskio(*[sum(x) for x in zip(*rawdict.values())])
# =====================================================================
# --- network related functions
# =====================================================================
def net_io_counters(pernic=False):
"""Return network I/O statistics as a namedtuple including
the following fields:
- bytes_sent: number of bytes sent
- bytes_recv: number of bytes received
- packets_sent: number of packets sent
- packets_recv: number of packets received
- errin: total number of errors while receiving
- errout: total number of errors while sending
- dropin: total number of incoming packets which were dropped
- dropout: total number of outgoing packets which were dropped
(always 0 on OSX and BSD)
If pernic is True return the same information for every
network interface installed on the system as a dictionary
with network interface names as the keys and the namedtuple
described above as the values.
"""
rawdict = _psplatform.net_io_counters()
if not rawdict:
raise RuntimeError("couldn't find any network interface")
if pernic:
for nic, fields in rawdict.items():
rawdict[nic] = _common.snetio(*fields)
return rawdict
else:
return _common.snetio(*[sum(x) for x in zip(*rawdict.values())])
def net_connections(kind='inet'):
"""Return system-wide connections as a list of
(fd, family, type, laddr, raddr, status, pid) namedtuples.
In case of limited privileges 'fd' and 'pid' may be set to -1
and None respectively.
The 'kind' parameter filters for connections that fit the
following criteria:
Kind Value Connections using
inet IPv4 and IPv6
inet4 IPv4
inet6 IPv6
tcp TCP
tcp4 TCP over IPv4
tcp6 TCP over IPv6
udp UDP
udp4 UDP over IPv4
udp6 UDP over IPv6
unix UNIX socket (both UDP and TCP protocols)
all the sum of all the possible families and protocols
On OSX this function requires root privileges.
"""
return _psplatform.net_connections(kind)
def net_if_addrs():
"""Return the addresses associated to each NIC (network interface
card) installed on the system as a dictionary whose keys are the
NIC names and value is a list of namedtuples for each address
assigned to the NIC. Each namedtuple includes 4 fields:
- family
- address
- netmask
- broadcast
'family' can be either socket.AF_INET, socket.AF_INET6 or
psutil.AF_LINK, which refers to a MAC address.
'address' is the primary address, 'netmask' and 'broadcast'
may be None.
Note: you can have more than one address of the same family
associated with each interface.
"""
has_enums = sys.version_info >= (3, 4)
if has_enums:
import socket
rawlist = _psplatform.net_if_addrs()
rawlist.sort(key=lambda x: x[1]) # sort by family
ret = collections.defaultdict(list)
for name, fam, addr, mask, broadcast in rawlist:
if has_enums:
try:
fam = socket.AddressFamily(fam)
except ValueError:
if os.name == 'nt' and fam == -1:
fam = _psplatform.AF_LINK
elif (hasattr(_psplatform, "AF_LINK") and
_psplatform.AF_LINK == fam):
# Linux defines AF_LINK as an alias for AF_PACKET.
# We re-set the family here so that repr(family)
# will show AF_LINK rather than AF_PACKET
fam = _psplatform.AF_LINK
ret[name].append(_common.snic(fam, addr, mask, broadcast))
return dict(ret)
def net_if_stats():
"""Return information about each NIC (network interface card)
installed on the system as a dictionary whose keys are the
NIC names and value is a namedtuple with the following fields:
- isup: whether the interface is up (bool)
- duplex: can be either NIC_DUPLEX_FULL, NIC_DUPLEX_HALF or
NIC_DUPLEX_UNKNOWN
- speed: the NIC speed expressed in mega bits (MB); if it can't
be determined (e.g. 'localhost') it will be set to 0.
- mtu: the maximum transmission unit expressed in bytes.
"""
return _psplatform.net_if_stats()
# =====================================================================
# --- other system related functions
# =====================================================================
def boot_time():
"""Return the system boot time expressed in seconds since the epoch."""
# Note: we are not caching this because it is subject to
# system clock updates.
return _psplatform.boot_time()
def users():
"""Return users currently connected on the system as a list of
namedtuples including the following fields.
- user: the name of the user
- terminal: the tty or pseudo-tty associated with the user, if any.
- host: the host name associated with the entry, if any.
- started: the creation time as a floating point number expressed in
seconds since the epoch.
"""
return _psplatform.users()
def test():
"""List info of all currently running processes emulating ps aux
output.
"""
import datetime
today_day = datetime.date.today()
templ = "%-10s %5s %4s %4s %7s %7s %-13s %5s %7s %s"
attrs = ['pid', 'cpu_percent', 'memory_percent', 'name', 'cpu_times',
'create_time', 'memory_info']
if _POSIX:
attrs.append('uids')
attrs.append('terminal')
print(templ % ("USER", "PID", "%CPU", "%MEM", "VSZ", "RSS", "TTY",
"START", "TIME", "COMMAND"))
for p in process_iter():
try:
pinfo = p.as_dict(attrs, ad_value='')
except NoSuchProcess:
pass
else:
if pinfo['create_time']:
ctime = datetime.datetime.fromtimestamp(pinfo['create_time'])
if ctime.date() == today_day:
ctime = ctime.strftime("%H:%M")
else:
ctime = ctime.strftime("%b%d")
else:
ctime = ''
cputime = time.strftime("%M:%S",
time.localtime(sum(pinfo['cpu_times'])))
try:
user = p.username()
except KeyError:
if _POSIX:
if pinfo['uids']:
user = str(pinfo['uids'].real)
else:
user = ''
else:
raise
except Error:
user = ''
if _WINDOWS and '\\' in user:
user = user.split('\\')[1]
vms = pinfo['memory_info'] and \
int(pinfo['memory_info'].vms / 1024) or '?'
rss = pinfo['memory_info'] and \
int(pinfo['memory_info'].rss / 1024) or '?'
memp = pinfo['memory_percent'] and \
round(pinfo['memory_percent'], 1) or '?'
print(templ % (
user[:10],
pinfo['pid'],
pinfo['cpu_percent'],
memp,
vms,
rss,
pinfo.get('terminal', '') or '?',
ctime,
cputime,
pinfo['name'].strip() or '?'))
del memoize, division
if sys.version_info < (3, 0):
del num
if __name__ == "__main__":
test()
|
artistic-2.0
| -1,882,063,374,360,191,700
| 34.048626
| 79
| 0.550685
| false
| 4.37703
| false
| false
| false
|
VirusTotal/msticpy
|
tools/toollib/url_checker_async.py
|
1
|
7616
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""Python file import analyzer."""
from collections import defaultdict, namedtuple
from pathlib import Path
from typing import Dict, Set, Optional, Iterable
from urllib import parse
import asyncio
import markdown
from bs4 import BeautifulSoup
from aiohttp import ClientSession, ClientResponseError, ClientConnectionError
# pylint: disable=relative-beyond-top-level
# from . import VERSION
# __version__ = VERSION
__author__ = "Ian Hellen"
UrlResult = namedtuple("UrlResult", "status, history, url, message")
def check_docs(
doc_path: str, recurse: bool = True, max_threads: int = 10, delay: float = 0
) -> Dict[str, Dict[str, UrlResult]]:
"""
Check multiple HTML files in `doc_path`.
Parameters
----------
doc_path : str
Path
recurse: bool
If True, recurse subfolders, default is True
max_threads: int, optional
The maximum number of async threads to run
delay: float, optional
Seconds delay between requests
Returns
-------
Dict[str, Dict[str, UrlResult]]
Dictionary of pages checked. Results for each page
is a dictionary of checked links for the page.
"""
page_results: Dict[str, Dict[str, UrlResult]] = defaultdict(dict)
link_results: Dict[str, UrlResult] = {}
links_to_check = _get_links_from_files(doc_path, recurse)
print(f"Checking links {len(links_to_check)}...")
checked_links = check_uris(links_to_check, max_threads, delay)
print("\ndone")
for result in checked_links:
link_results[result.url] = result
src_pages = links_to_check[result.url]
for src_page in src_pages:
page_results[src_page][result.url] = result
_print_url_results(page_results)
return page_results
# pyline: disable=broad-except
def _get_links_from_files(doc_path: str, recurse: bool = True) -> Dict[str, Set[str]]:
links_to_check: Dict[str, Set[str]] = defaultdict(set)
html_glob_pattern = "**/*.html" if recurse else "*.html"
all_files = list(Path(doc_path).glob(html_glob_pattern))
md_glob_pattern = "**/*.md" if recurse else "*.md"
md_files = list(Path(doc_path).glob(md_glob_pattern))
all_files.extend(md_files)
print(f"reading {len(all_files)} files...")
for file_name in all_files:
pg_links = _get_doc_links(file_name)
page = str(file_name.relative_to(Path(doc_path)))
for link in pg_links:
links_to_check[link].add(page)
return links_to_check
def _get_doc_links(doc_path: Path) -> Set[str]:
"""
Check links in an HTML or Markdown document.
Parameters
----------
doc_path : str
Path to the document
Returns
-------
Set[str]
Set of links
"""
html_content = None
try:
html_content = doc_path.read_text(encoding="utf-8")
except UnicodeDecodeError:
html_content = doc_path.read_text(encoding="mbcs")
if doc_path.suffix.casefold() == ".md":
html_content = markdown.markdown(html_content)
soup = BeautifulSoup(html_content, "html.parser")
links = soup.find_all("a")
links = {link.get("href") for link in links}
links = {link for link in links if link.casefold().startswith("http")}
return links
def _resolve_rel_link(
url_link: str, all_links: bool, page_url: str, top_root: str
) -> Optional[str]:
if url_link[0:4] == "http":
if all_links or (top_root.lower() not in url_link.lower()):
return url_link
else:
if url_link.startswith("#"):
# don't follow fragments
return None
url_link = parse.urljoin(page_url, url_link)
if all_links:
return url_link
return None
def check_uris(
uris_to_check: Iterable[str], max_threads: int = 10, delay: float = 0
) -> Iterable[UrlResult]:
"""
Check URIs.
Parameters
----------
uris_to_check : Iterable[str]
Iterable of URI strings
max_threads: int, optional
The maximum number of async threads to run
delay: float, optional
Seconds delay between requests
Returns
-------
Iterable[UrlResult]
Iterable of UrlResults
"""
loop = asyncio.get_event_loop()
future = asyncio.ensure_future(_check_uris_async(uris_to_check, max_threads, delay))
return loop.run_until_complete(future)
async def _check_url_async(url: str, session: ClientSession) -> UrlResult:
"""
Connect to URL and return response status.
Parameters
----------
url : str
URL to check
session : ClientSession
aiohttp client session
Returns
-------
UrlResult
Tuple of status code, redirect history, requested url,
status/error message.
"""
try:
async with session.get(url) as resp:
try:
await resp.read()
if resp.history:
result = UrlResult(
resp.status,
resp.history,
url,
"No error. Redirect to " + str(resp.url),
)
elif resp.status == 200:
result = UrlResult(
resp.status, resp.history, url, "No error. No redirect."
)
else:
result = UrlResult(resp.status, resp.history, url, "Error?")
except ClientResponseError as client_err:
return UrlResult(client_err.status, [], url, client_err)
except ClientConnectionError as err:
result = UrlResult(404, [], url, err)
return result
async def _check_uri_with_sem_async(sem, url, session) -> Iterable[UrlResult]:
# Getter function with semaphore.
async with sem:
return await _check_url_async(url, session)
async def _check_uris_async(
links_to_check: Iterable[str], max_threads: int = 10, delay: float = 0
) -> Iterable[UrlResult]:
tasks = []
# create instance of Semaphore
sem = asyncio.Semaphore(max_threads)
# Create client session that will ensure we dont open new connection
# per each request.
async with ClientSession() as session:
for uri in links_to_check:
if delay:
asyncio.sleep(delay)
# pass Semaphore and session to every GET request
task = asyncio.ensure_future(_check_uri_with_sem_async(sem, uri, session))
tasks.append(task)
results = await asyncio.gather(*tasks)
return results
def _print_url_results(results: Dict[str, Dict[str, UrlResult]]):
"""
Print results of any URLs that did not return 200 status.
Parameters
----------
results : Dict[str, Dict[str, UrlResult]]
List of URLs checks to print.
"""
print("\n\nResults")
# non-200s
print("\n==========\nERRORS")
for page, result_dict in results.items():
page_errors = []
for result in result_dict.values():
if result.status != 200:
page_errors.append(f"{result.status} - {result.url}")
if page_errors:
print(f"Document {page}")
for err in page_errors:
print(err)
# if __name__ == "__main__":
# t_results = check_docs("..//..")
|
mit
| -6,428,462,102,758,438,000
| 28.75
| 88
| 0.588629
| false
| 3.979101
| false
| false
| false
|
vinaykrdahiya/justuploads
|
main/__init__.py
|
1
|
2292
|
import falcon
# from main.settings import DB as db
# from main.helpers import QueryParser
import json
import urlparse
from werkzeug.http import parse_options_header
from werkzeug.formparser import parse_form_data
from cStringIO import StringIO
from werkzeug.wsgi import LimitedStream
from werkzeug import secure_filename
class CreateTemplateExclusiveImage:
"""End point for creating dealtype"""
def on_get(self, req, resp, stream, form={}, files={}):
"""return status 405. asks to use post api.
"""
resp.content_type = "application/json"
resp_dict = {"status": "error",
"summary": "use post request for logout"}
resp.body = (json.dumps(resp_dict))
def on_post(self, req, resp, stream, form={}, files={}):
"""
"""
file = files.get('file', [''])[0]
if file:
filename = secure_filename(file.filename)
file.save(filename)
resp.status = falcon.HTTP_200
resp.content_type = "application/json"
resp_dict = {"status": "success",
"summary": "File uploaded"}
resp.body = (json.dumps(resp_dict))
def generate_formdata(req, resp, params):
"""sets params['form'], params['files'], params['stream']
to pass to every endpoint.
"""
if req.method != 'GET':
mimetype, options = parse_options_header(req.get_header('content-type'))
data = req.stream.read()
environ = {'wsgi.input': StringIO(data),
'CONTENT_LENGTH': str(len(data)),
'CONTENT_TYPE': '%s; boundary=%s' %
(mimetype, options['boundary']),
'REQUEST_METHOD': 'POST'}
stream, form, files = parse_form_data(environ)
params['stream'], params['form'], params['files'] = stream, dict(form),\
dict(files)
return True
else:
di = urlparse.parse_qsl(req.query_string)
params['form'] = dict(di)
params['stream'] = LimitedStream()
params['files'] = dict()
return True
# hooks to be executed on every request before reaching to the endpoint
app = falcon.API(before=[generate_formdata])
# importing all the endpoints
cr = CreateTemplateExclusiveImage()
app.add_route('/upload', cr)
|
unlicense
| -2,264,388,575,298,547,000
| 33.727273
| 80
| 0.603403
| false
| 4.042328
| false
| false
| false
|
aphelps/platformio
|
platformio/builder/tools/platformio.py
|
1
|
10717
|
# Copyright (C) Ivan Kravets <me@ikravets.com>
# See LICENSE for details.
import atexit
import platform
import re
from os import getenv, listdir, remove, sep, walk
from os.path import basename, dirname, isdir, isfile, join, normpath
from time import sleep
from SCons.Script import Exit, SConscript, SConscriptChdir
from serial import Serial
from platformio.util import get_serialports
def ProcessGeneral(env):
corelibs = []
# specific linker script
if "ldscript" in env.get("BOARD_OPTIONS", {}).get("build", {}):
env.Append(
LINKFLAGS=["-T", join(
"$PIOHOME_DIR", "packages", "ldscripts",
"${BOARD_OPTIONS['build']['ldscript']}")]
)
if "extra_flags" in env.get("BOARD_OPTIONS", {}).get("build", {}):
env.MergeFlags(env.subst("${BOARD_OPTIONS['build']['extra_flags']}"))
if "BUILD_FLAGS" in env:
env.MergeFlags(env['BUILD_FLAGS'])
if "FRAMEWORK" in env:
if env['FRAMEWORK'] in ("arduino", "energia"):
env.ConvertInoToCpp()
for f in env['FRAMEWORK'].split(","):
SConscriptChdir(0)
env, libs = SConscript(
env.subst(join("$PIOBUILDER_DIR", "scripts",
"frameworks", "%s.py" % f.strip().lower())),
exports="env")
corelibs += libs
return corelibs
def BuildFirmware(env, corelibs):
src = env.Clone()
vdirs = src.VariantDirRecursive(
join("$BUILD_DIR", "src"), join("$PROJECT_DIR", "src"))
# build dependent libs
deplibs = src.BuildDependentLibraries(join("$PROJECT_DIR", "src"))
src.MergeFlags(getenv("PIOSRCBUILD_FLAGS", "$SRCBUILD_FLAGS"))
return src.Program(
join("$BUILD_DIR", "firmware"),
[src.GlobCXXFiles(vdir) for vdir in vdirs],
LIBS=deplibs + corelibs,
LIBPATH="$BUILD_DIR",
PROGSUFFIX=".elf")
def GlobCXXFiles(env, path):
files = []
for suff in ["*.c", "*.cpp", "*.S"]:
_list = env.Glob(join(path, suff))
if _list:
files += _list
return files
def VariantDirRecursive(env, variant_dir, src_dir, duplicate=True,
ignore_pattern=None):
if not ignore_pattern:
ignore_pattern = (".git", ".svn")
variants = []
src_dir = env.subst(src_dir)
for root, _, _ in walk(src_dir):
_src_dir = root
_var_dir = variant_dir + root.replace(src_dir, "")
if any([s in _var_dir.lower() for s in ignore_pattern]):
continue
env.VariantDir(_var_dir, _src_dir, duplicate)
variants.append(_var_dir)
return variants
def BuildLibrary(env, variant_dir, library_dir, ignore_files=None):
lib = env.Clone()
vdirs = lib.VariantDirRecursive(
variant_dir, library_dir, ignore_pattern=(".git", ".svn", "examples"))
srcfiles = []
for vdir in vdirs:
for item in lib.GlobCXXFiles(vdir):
if not ignore_files or item.name not in ignore_files:
srcfiles.append(item)
return lib.Library(
lib.subst(variant_dir),
srcfiles
)
def BuildDependentLibraries(env, src_dir): # pylint: disable=R0914
INCLUDES_RE = re.compile(r"^\s*#include\s+(\<|\")([^\>\"\']+)(?:\>|\")",
re.M)
LIBSOURCE_DIRS = [env.subst(d) for d in env.get("LIBSOURCE_DIRS", [])]
# start internal prototypes
class IncludeFinder(object):
def __init__(self, base_dir, name, is_system=False):
self.base_dir = base_dir
self.name = name
self.is_system = is_system
self._inc_path = None
self._lib_dir = None
self._lib_name = None
def getIncPath(self):
return self._inc_path
def getLibDir(self):
return self._lib_dir
def getLibName(self):
return self._lib_name
def run(self):
if not self.is_system and self._find_in_local():
return True
return self._find_in_system()
def _find_in_local(self):
if isfile(join(self.base_dir, self.name)):
self._inc_path = join(self.base_dir, self.name)
return True
else:
return False
def _find_in_system(self):
for lsd_dir in LIBSOURCE_DIRS:
if not isdir(lsd_dir):
continue
for ld in listdir(lsd_dir):
inc_path = normpath(join(lsd_dir, ld, self.name))
lib_dir = inc_path[:inc_path.index(sep, len(lsd_dir) + 1)]
lib_name = basename(lib_dir)
# ignore user's specified libs
if "IGNORE_LIBS" in env and lib_name in env['IGNORE_LIBS']:
continue
if not isfile(inc_path):
# if source code is in "src" dir
lib_dir = join(lsd_dir, lib_name, "src")
inc_path = join(lib_dir, self.name)
if isfile(inc_path):
self._lib_dir = lib_dir
self._lib_name = lib_name
self._inc_path = inc_path
return True
return False
def _get_dep_libs(src_dir):
state = {
"paths": set(),
"libs": set(),
"ordered": set()
}
state = _process_src_dir(state, env.subst(src_dir))
result = []
for item in sorted(state['ordered'], key=lambda s: s[0]):
result.append((item[1], item[2]))
return result
def _process_src_dir(state, src_dir):
for root, _, _ in walk(src_dir):
for node in (env.GlobCXXFiles(root) +
env.Glob(join(root, "*.h"))):
state = _parse_includes(state, node)
return state
def _parse_includes(state, node):
if node.path in state['paths']:
return state
else:
state['paths'].add(node.path)
skip_includes = ("arduino.h", "energia.h")
matches = INCLUDES_RE.findall(node.get_text_contents())
for (inc_type, inc_name) in matches:
base_dir = dirname(node.path)
if inc_name.lower() in skip_includes:
continue
if join(base_dir, inc_name) in state['paths']:
continue
else:
state['paths'].add(join(base_dir, inc_name))
finder = IncludeFinder(base_dir, inc_name, inc_type == "<")
if finder.run():
_lib_dir = finder.getLibDir()
if _lib_dir and _lib_dir not in state['libs']:
state['ordered'].add((
len(state['ordered']) + 1, finder.getLibName(),
_lib_dir))
_parse_includes(state, env.File(finder.getIncPath()))
if _lib_dir and _lib_dir not in state['libs']:
state['libs'].add(_lib_dir)
state = _process_src_dir(state, _lib_dir)
return state
# end internal prototypes
deplibs = _get_dep_libs(src_dir)
env.Append(CPPPATH=[join("$BUILD_DIR", l) for (l, _) in deplibs])
# add automatically "utility" dir from the lib (Arduino issue)
env.Append(CPPPATH=[join("$BUILD_DIR", l, "utility") for (l, ld) in deplibs
if isdir(join(ld, "utility"))])
libs = []
for (libname, inc_dir) in reversed(deplibs):
lib = env.BuildLibrary(
join("$BUILD_DIR", libname), inc_dir)
env.Clean(libname, lib)
libs.append(lib)
return libs
def ConvertInoToCpp(env):
def delete_tmpcpp(files):
for f in files:
remove(f)
tmpcpp = []
items = (env.Glob(join("$PROJECT_DIR", "src", "*.ino")) +
env.Glob(join("$PROJECT_DIR", "src", "*.pde")))
for item in items:
cppfile = item.get_path()[:-3] + "cpp"
if isfile(cppfile):
continue
ino_contents = item.get_text_contents()
re_includes = re.compile(r"^(#include\s+(?:\<|\")[^\r\n]+)",
re.M | re.I)
includes = re_includes.findall(ino_contents)
prototypes = re.findall(
r"""^(
(?:\s*[a-z_\d]+){1,2} # return type
\s+[a-z_\d]+\s* # name of prototype
\([a-z_,\.\*\&\[\]\s\d]*\) # args
)\s*\{ # must end with {
""",
ino_contents,
re.X | re.M | re.I
)
# print includes, prototypes
# disable previous includes
ino_contents = re_includes.sub(r"//\1", ino_contents)
# create new temporary C++ valid file
with open(cppfile, "w") as f:
f.write("#include <Arduino.h>\n")
if includes:
f.write("%s\n" % "\n".join(includes))
if prototypes:
f.write("%s;\n" % ";\n".join(prototypes))
f.write("#line 1 \"%s\"\n" % basename(item.path))
f.write(ino_contents)
tmpcpp.append(cppfile)
if tmpcpp:
atexit.register(delete_tmpcpp, tmpcpp)
def FlushSerialBuffer(env, port):
s = Serial(env.subst(port))
s.flushInput()
s.setDTR(False)
s.setRTS(False)
sleep(0.1)
s.setDTR(True)
s.setRTS(True)
s.close()
def TouchSerialPort(env, port, baudrate):
s = Serial(port=env.subst(port), baudrate=baudrate)
s.close()
if platform.system() != "Darwin":
sleep(0.3)
def WaitForNewSerialPort(_, before):
new_port = None
elapsed = 0
while elapsed < 10:
now = [i['port'] for i in get_serialports()]
diff = list(set(now) - set(before))
if diff:
new_port = diff[0]
break
before = now
sleep(0.25)
elapsed += 0.25
if not new_port:
Exit("Error: Couldn't find a board on the selected port. "
"Check that you have the correct port selected. "
"If it is correct, try pressing the board's reset "
"button after initiating the upload.")
return new_port
def exists(_):
return True
def generate(env):
env.AddMethod(ProcessGeneral)
env.AddMethod(BuildFirmware)
env.AddMethod(GlobCXXFiles)
env.AddMethod(VariantDirRecursive)
env.AddMethod(BuildLibrary)
env.AddMethod(BuildDependentLibraries)
env.AddMethod(ConvertInoToCpp)
env.AddMethod(FlushSerialBuffer)
env.AddMethod(TouchSerialPort)
env.AddMethod(WaitForNewSerialPort)
return env
|
mit
| -2,971,078,524,403,639,300
| 30.15407
| 79
| 0.528599
| false
| 3.698068
| false
| false
| false
|
dlarochelle/extractor_train
|
tests/test_forms.py
|
1
|
2252
|
# -*- coding: utf-8 -*-
import pytest
from extractor_train.public.forms import LoginForm
from extractor_train.user.forms import RegisterForm
from .factories import UserFactory
class TestRegisterForm:
def test_validate_user_already_registered(self, user):
# Enters username that is already registered
form = RegisterForm(username=user.username, email='foo@bar.com',
password='example', confirm='example')
assert form.validate() is False
assert 'Username already registered' in form.username.errors
def test_validate_email_already_registered(self, user):
# enters email that is already registered
form = RegisterForm(username='unique', email=user.email,
password='example', confirm='example')
assert form.validate() is False
assert 'Email already registered' in form.email.errors
def test_validate_success(self, db):
form = RegisterForm(username='newusername', email='new@test.test',
password='example', confirm='example')
assert form.validate() is True
class TestLoginForm:
def test_validate_success(self, user):
user.set_password('example')
user.save()
form = LoginForm(username=user.username, password='example')
assert form.validate() is True
assert form.user == user
def test_validate_unknown_username(self, db):
form = LoginForm(username='unknown', password='example')
assert form.validate() is False
assert 'Unknown username' in form.username.errors
assert form.user is None
def test_validate_invalid_password(self, user):
user.set_password('example')
user.save()
form = LoginForm(username=user.username, password='wrongpassword')
assert form.validate() is False
assert 'Invalid password' in form.password.errors
def test_validate_inactive_user(self, user):
user.active = False
user.set_password('example')
user.save()
# Correct username and password, but user is not activated
form = LoginForm(username=user.username, password='example')
assert form.validate() is False
assert 'User not activated' in form.username.errors
|
bsd-3-clause
| -4,317,048,631,488,596,000
| 35.934426
| 74
| 0.672735
| false
| 4.415686
| true
| false
| false
|
aniversarioperu/django-manolo
|
scrapers/manolo_scraper/spiders/minem.py
|
2
|
3163
|
# -*- coding: utf-8 -*-
import math
import scrapy
from .spiders import ManoloBaseSpider
from ..items import ManoloItem
from ..item_loaders import ManoloItemLoader
from ..utils import make_hash, get_dni
# url: http://intranet.minem.gob.pe/GESTION/visitas_pcm
class MinemSpider(ManoloBaseSpider):
name = 'minem'
allowed_domains = ['http://intranet.minem.gob.pe']
NUMBER_OF_PAGES_PER_PAGE = 20
def initial_request(self, date):
date_str = date.strftime("%d/%m/%Y")
request = self.make_form_request(date_str, self.parse_pages, 1)
return request
def make_form_request(self, date_str, callback, page_number):
page_url = 'http://intranet.minem.gob.pe/GESTION/visitas_pcm/Busqueda/DMET_html_SelectMaestraBuscador'
start_from_record = self.NUMBER_OF_PAGES_PER_PAGE * (page_number - 1) + 1
params = {
'TXT_FechaVisita_Inicio': date_str,
'Ls_Pagina': str(start_from_record),
'Li_ResultadoPorPagina': '20',
'FlgBuscador': '1',
'Ls_ParametrosBuscador': 'TXT_FechaVisita_Inicio=10/08/2015|Ls_Pagina={}'.format(start_from_record),
}
request = scrapy.FormRequest(url=page_url, formdata=params,
meta={'date': date_str},
dont_filter=True,
callback=callback)
return request
def parse_pages(self, response):
total_of_records = response.css('#HID_CantidadRegistros').xpath('./@value').extract_first(default=1)
total_of_records = int(total_of_records)
number_of_pages = self.get_number_of_pages(total_of_records)
for page in range(1, number_of_pages + 1):
request = self.make_form_request(response.meta['date'], self.parse, page)
yield request
def get_number_of_pages(self, total_of_records):
return int(math.ceil(total_of_records / float(self.NUMBER_OF_PAGES_PER_PAGE)))
def parse(self, response):
date = self.get_date_item(response.meta['date'], '%d/%m/%Y')
rows = response.xpath("//tr")
for row in rows:
l = ManoloItemLoader(item=ManoloItem(), selector=row)
l.add_value('institution', 'minem')
l.add_value('date', date)
l.add_xpath('full_name', './td[3]/center/text()')
l.add_xpath('entity', './td[5]/center/text()')
l.add_xpath('reason', './td[6]/center/text()')
l.add_xpath('host_name', './td[7]/center/text()')
l.add_xpath('office', './td[8]/center/text()')
l.add_xpath('meeting_place', './td[9]/center/text()')
l.add_xpath('time_start', './td[10]/center/text()')
l.add_xpath('time_end', './td[11]/center/text()')
document_identity = row.xpath('td[4]/center/text()').extract_first(default='')
id_document, id_number = get_dni(document_identity)
l.add_value('id_document', id_document)
l.add_value('id_number', id_number)
item = l.load_item()
item = make_hash(item)
yield item
|
bsd-3-clause
| 4,511,216,025,104,690,000
| 35.356322
| 112
| 0.579829
| false
| 3.322479
| false
| false
| false
|
dderichs/piradio
|
piradio/observer.py
|
1
|
5042
|
# -*- coding: utf-8 -*-
"""
* Copyright (C) 2009, Michael "Svedrin" Ziegler <diese-addy@funzt-halt.net>
*
* Omikron is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This package is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
"""
from copy import deepcopy
class OperationCanceled( Exception ):
""" Can be fired by a listener function to cancel the signal. """
pass
class Listener( object ):
""" Prepares args for and calls the observer function. """
def __init__( self, func, args, kwargs ):
""" Creates a listener associated with func, and stores base args to be
passed to func when the event is fired.
"""
self.func = func
self.args = args
self.kwargs = kwargs
def __call__( self, *args, **kwargs ):
""" Call the associated listener function and merge our args to the base args. """
origkw = deepcopy( self.kwargs )
origkw.update( kwargs )
return self.func( *( self.args + args ), **origkw )
class Signal( object ):
""" Handles calling the Listener functions and canceling of events. """
def __init__( self, cancel = True ):
self.cancel = cancel
self.exception = None
self.listeners = []
def __call__( self, *args, **kwargs ):
""" Call observers. If this signal can be canceled and one of the listeners
returns False, cancel execution and return False, otherwise return True.
"""
self.exception = None
for lst in self.listeners:
try:
ret = lst( *args, **kwargs )
except OperationCanceled, instance:
self.exception = instance
return False
else:
if self.cancel and ret == False:
return False
return True
def addListener( self, func, *args, **kwargs ):
""" Add func as a listener to this signal. """
assert callable( func ), "Listeners must be callable!"
self.listeners.append( Listener( func, args, kwargs ) )
def removeListener( self, func ):
""" Remove the first listener that is associated to func. """
entry = None
for lst in self.listeners:
if lst.func == func:
entry = lst
break
if entry:
self.listeners.remove( entry )
return entry
class Dispatcher( object ):
""" Keeps track of existing events and handles firing. """
def __init__( self ):
self.signals = {}
def addEvent( self, event, cancel = True ):
""" Add a Signal handler for an event.
This does NOT check if another handler already exists. If so, the old one will be overwritten.
"""
self.signals[event] = Signal( cancel )
def removeEvent( self, event ):
""" Remove the Signal handler for the given event. """
sig = self.signals[event]
del self.signals[event]
return sig
def fireEvent( self, event, *args, **kwargs ):
""" Fire an event. """
sig = self.signals[event]
return sig( *args, **kwargs )
def hasEvent( self, event ):
""" Return True if an event of the given name is known. """
return event in self.signals
def __getitem__( self, event ):
""" Get an event handler. """
return self.signals[event]
def __setitem__( self, event, cancel ):
""" Shortcut for addEvent. """
self.addEvent( event, cancel )
def __contains__( self, event ):
""" Shortcut for hasEvent. """
return self.hasEvent( event )
def __delitem__( self, event ):
""" Shortcut for removeEvent. """
return self.removeEvent( event )
def __call__( self, event, *args, **kwargs ):
""" Shortcut for fireEvent. """
return self.fireEvent( event, *args, **kwargs )
if __name__ == '__main__':
dis = Dispatcher()
dis.addEvent( 'ohai' )
def myfunc( *args, **kwargs ):
print 'myfunc haz been called!'
print args
print kwargs
return kwargs['drei'] == 3
def check( *args, **kwargs ):
print 'check haz been called!'
print args
print kwargs
print "drei has been 3!"
dis['ohai'].addListener( myfunc, 1, 2, 3, eins=1, zwei=2, drei=3 )
dis['ohai'].addListener( check )
if dis('ohai', 3, 4, 5, eins=1, zwei=2, drei=5, vier=6 ):
print "success!"
else:
print "fail"
if dis('ohai', 3, 4, 5 ):
print "success!"
else:
print "fail"
dis.addEvent( "kthxbai" )
dis( "kthxbai" )
|
gpl-3.0
| -8,911,325,307,187,469,000
| 29.932515
| 106
| 0.576359
| false
| 4.082591
| false
| false
| false
|
saskartt/P4UL
|
pyNetCDF/syncMaskWithNetCdf.py
|
1
|
5983
|
#!/usr/bin/env python
from netcdfTools import *
from mapTools import *
from utilities import writeLog
import sys
import argparse
import numpy as np
'''
Description:
Author: Mikko Auvinen
mikko.auvinen@helsinki.fi
University of Helsinki &
Finnish Meteorological Institute
'''
#============= functions ==================================#
def domainBoundsAndResolution( xf, yf ):
xb = np.array([ np.min(xf), np.max(xf) ])
yb = np.array([ np.min(yf), np.max(yf) ])
dx = (xb[1]-xb[0])/float(len(xf)-1)
dy = (yb[1]-yb[0])/float(len(yf)-1)
return xb, yb, dx, dy
#============ Main ====================================#
parser = argparse.ArgumentParser(prog='syncMaskWithNetCDF.py')
parser.add_argument("-fn", "--fileNetCDF",type=str, help="Name of input NETCDF file.")
parser.add_argument("-fm", "--fileMask",type=str, help="Name of input 2D Mask file.")
parser.add_argument("-d", "--decomp", action="store_true", default=False, \
help="Decomposed into mean (V_m) and fluctuating (V^prime) components.")
parser.add_argument("-dd", "--decompOnly", help="Output V_m and V^prime components only.",\
action="store_true", default=False)
parser.add_argument("-c", "--coarse", help="Coarsening level for the NETCDF data. Int > 1.",\
type=int, default=1)
args = parser.parse_args()
writeLog( parser, args )
#==========================================================#
# Initial renaming operations and variable declarations
fnc = args.fileNetCDF
fmsk = args.fileMask
fout = fnc.strip('.nc')+'-Msk.nc'
cl = abs(int(args.coarse))
# Boolean switch for the decomposition option.
decompOn = args.decomp or args.decompOnly
'''
Establish two boolean variables which indicate whether the created variable is an
independent or dependent variable in function createNetcdfVariable().
'''
parameter = True; variable = False
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = #
'''
Create a NETCDF input dataset (ds), and its associated lists of dependent (varList)
and independent (dimList) variables.
'''
ds, varList, paramList = netcdfDataset(fnc)
# Create a NETCDF output dataset (dso) for writing out the data.
dso = netcdfOutputDataset( fout )
'''
Read cell center coordinates and time.
Create the output independent variables right away and empty memory.
'''
time, time_dims = read1DVariableFromDataset('time', ds, paramList, 0, 0, 1 ) # All values.
tv = createNetcdfVariable( dso, time,'time', len(time),'s','f4',('time',), parameter )
time = None
x, x_dims = read1DVariableFromDataset( 'x',ds, paramList, 0, 0, cl ) # All values.
print(' x_dims = {} '.format(x_dims))
x[np.isnan(x)] = 0. # Special treatment.
xv = createNetcdfVariable( dso, x , 'x' , len(x) , 'm', 'f4', ('x',) , parameter )
y, y_dims = read1DVariableFromDataset( 'y',ds, paramList, 0, 0, cl )
print(' y_dims = {} '.format(y_dims))
y[np.isnan(y)] = 0. # Special treatment.
yv = createNetcdfVariable( dso, y , 'y' , len(y) , 'm', 'f4', ('y',) , parameter )
# Determine the NETCDF domain bounds and resolution.
xb, yb, dx, dy = domainBoundsAndResolution( x, y )
x = None; y = None # Clear memory ASAP.
z, z_dims = read1DVariableFromDataset( 'z',ds, paramList, 0, 0, cl )
print(' z_dims = {} '.format(z_dims))
zv = createNetcdfVariable( dso, z , 'z' , len(z) , 'm', 'f4', ('z',) , parameter )
z = None
# - - - - First, read u-component - - - - - - - - - -
u, u_dims = read3DVariableFromDataset( 'u', ds, varList, 0, 0, cl ) # All values.
print(' u_dims = {} '.format(u_dims))
yx_dims = np.array(u_dims[2:])
z_dim = u_dims[1]; t_dim = u_dims[0]
'''
At this point the mask raster data can be treated because
it needs one scalar NETCDF variable to determine the required
index bounds and coarsening level.
'''
# Read the mask raster info.
Rdict = readNumpyZTile(fmsk)
R = Rdict['R']
R_dims = np.array(np.shape(R))
ROrig = Rdict['GlobOrig']
dPx = Rdict['dPx']
Rdict = None
dr = entry2Int( dPx ) # Resolution as a single number
clr = int( dx/dr ) # Raster to NETCDF coarsening factor
print(' Orig mask dims = {} '.format(R_dims))
# We need y_max value for the Raster data to determine the reversed j-indecies.
ybr_max = R_dims[0]*dr
print(' ybr_max = {}'.format(ybr_max))
# Determine the index range for the raster data to match the NETCDF (sub)domain.
# NOTE: dy is subtracted to make first index 0-based.
irx = np.array([ int(xb[0]-dy) , int(xb[1]) ])/ dr # xb[0]:=min, xb[1]:=max
jry = np.array([ int(ybr_max-yb[1]-dy), int(ybr_max-yb[0]) ])/ dr
print(' irx = {}, iry = {}'.format(irx, jry))
# Create sub-region of the raster domain. This should match the NETCDF yx-domain.
Rsub = R[jry[0]:jry[1]:clr, irx[0]:irx[1]:clr]
Rsub_dims = np.shape( Rsub )
if( not (yx_dims==(Rsub_dims)).all ):
print(' xy-dimensions do not match: nc={} vs. r={}. Exiting ...'.format(yx_dims, Rsub_dims))
sys.exit(1)
# Create mask array m(z,y,x)
m = np.zeros( u_dims[1:], 'uint8') # u_dims[1:] := (z_dim, y_dim, x_dim)
# Copy raster data onto each z-plane. NOTE: y-direction is reversed.
for i in xrange(z_dim):
m[i,:,:] = Rsub[::-1,:]
# The mask data R, by default, may contain values 0 and >0. It has to be converted into
# a proper mask data [0,1]:
m[m>0] = 1
mv = createNetcdfVariable( dso, m, 'mask', 1, ' ', 'i4',('z','y','x',) , variable )
m = None
# To finalize, the NETCDF variables need to be copied to the new file.
uv = createNetcdfVariable( dso, u, 'u', t_dim, 'm/s', 'f4',('time','z','y','x',) , variable )
u = None
v, v_dims = read3DVariableFromDataset( 'v', ds, varList, 0, 0, cl ) # All values.
vv = createNetcdfVariable( dso, v, 'v', t_dim, 'm/s', 'f4',('time','z','y','x',) , variable )
v = None
w, w_dims = read3DVariableFromDataset( 'w', ds, varList, 0, 0, cl ) # All values.
wv = createNetcdfVariable( dso, w, 'w', t_dim, 'm/s', 'f4',('time','z','y','x',) , variable )
w = None
# - - - - Done , finalize the output - - - - - - - - - -
netcdfWriteAndClose( dso )
|
mit
| -1,528,385,973,083,507,700
| 34.402367
| 94
| 0.623767
| false
| 2.820839
| false
| false
| false
|
geodynamics/pylith
|
tests/fullscale/linearelasticity/nofaults-2d/sheartraction_gendb.py
|
1
|
2474
|
#!/usr/bin/env nemesis
#
# ----------------------------------------------------------------------
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University at Buffalo
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2021 University of California, Davis
#
# See LICENSE.md for license information.
#
# ----------------------------------------------------------------------
#
# @file tests/fullscale/linearelasticity/nofaults-2d/sheartraction_gendb.py
#
# @brief Python script to generate spatial database with displacement
# boundary conditions for the shear test. The traction boundary
# conditions use UniformDB in the .cfg file.
import numpy
class GenerateDB(object):
"""Python object to generate spatial database with displacement
boundary conditions for the shear test.
"""
def __init__(self):
"""Constructor.
"""
return
def run(self):
"""Generate the database.
"""
# Domain
x = numpy.arange(-4000.0, 4000.1, 1000.0)
y = numpy.arange(-4000.0, 4000.1, 1000.0)
npts = x.shape[0]
xx = x * numpy.ones((npts, 1), dtype=numpy.float64)
yy = y * numpy.ones((npts, 1), dtype=numpy.float64)
xy = numpy.zeros((npts**2, 2), dtype=numpy.float64)
xy[:, 0] = numpy.ravel(xx)
xy[:, 1] = numpy.ravel(numpy.transpose(yy))
from sheartraction_soln import AnalyticalSoln
soln = AnalyticalSoln()
disp = soln.displacement(xy)
from spatialdata.geocoords.CSCart import CSCart
cs = CSCart()
cs.inventory.spaceDim = 2
cs._configure()
data = {'points': xy,
'coordsys': cs,
'data_dim': 2,
'values': [{'name': "initial_amplitude_x",
'units': "m",
'data': disp[0, :, 0].ravel()},
{'name': "initial_amplitude_y",
'units': "m",
'data': disp[0, :, 1].ravel()}]}
from spatialdata.spatialdb.SimpleIOAscii import createWriter
io = createWriter("sheartraction_disp.spatialdb")
io.write(data)
return
# ======================================================================
if __name__ == "__main__":
GenerateDB().run()
# End of file
|
mit
| 6,571,733,550,315,437,000
| 29.925
| 75
| 0.525869
| false
| 3.871674
| false
| false
| false
|
gsnbng/erpnext
|
erpnext/hr/utils.py
|
1
|
17205
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, erpnext
from frappe import _
from frappe.utils import formatdate, format_datetime, getdate, get_datetime, nowdate, flt, cstr, add_days, today
from frappe.model.document import Document
from frappe.desk.form import assign_to
from erpnext.hr.doctype.employee.employee import get_holiday_list_for_employee
class DuplicateDeclarationError(frappe.ValidationError): pass
class EmployeeBoardingController(Document):
'''
Create the project and the task for the boarding process
Assign to the concerned person and roles as per the onboarding/separation template
'''
def validate(self):
# remove the task if linked before submitting the form
if self.amended_from:
for activity in self.activities:
activity.task = ''
def on_submit(self):
# create the project for the given employee onboarding
project_name = _(self.doctype) + " : "
if self.doctype == "Employee Onboarding":
project_name += self.job_applicant
else:
project_name += self.employee
project = frappe.get_doc({
"doctype": "Project",
"project_name": project_name,
"expected_start_date": self.date_of_joining if self.doctype == "Employee Onboarding" else self.resignation_letter_date,
"department": self.department,
"company": self.company
}).insert(ignore_permissions=True)
self.db_set("project", project.name)
self.db_set("boarding_status", "Pending")
self.reload()
self.create_task_and_notify_user()
def create_task_and_notify_user(self):
# create the task for the given project and assign to the concerned person
for activity in self.activities:
if activity.task:
continue
task = frappe.get_doc({
"doctype": "Task",
"project": self.project,
"subject": activity.activity_name + " : " + self.employee_name,
"description": activity.description,
"department": self.department,
"company": self.company,
"task_weight": activity.task_weight
}).insert(ignore_permissions=True)
activity.db_set("task", task.name)
users = [activity.user] if activity.user else []
if activity.role:
user_list = frappe.db.sql_list('''select distinct(parent) from `tabHas Role`
where parenttype='User' and role=%s''', activity.role)
users = users + user_list
if "Administrator" in users:
users.remove("Administrator")
# assign the task the users
if users:
self.assign_task_to_users(task, set(users))
def assign_task_to_users(self, task, users):
for user in users:
args = {
'assign_to' : user,
'doctype' : task.doctype,
'name' : task.name,
'description' : task.description or task.subject,
'notify': self.notify_users_by_email
}
assign_to.add(args)
def on_cancel(self):
# delete task project
for task in frappe.get_all("Task", filters={"project": self.project}):
frappe.delete_doc("Task", task.name, force=1)
frappe.delete_doc("Project", self.project, force=1)
self.db_set('project', '')
for activity in self.activities:
activity.db_set("task", "")
@frappe.whitelist()
def get_onboarding_details(parent, parenttype):
return frappe.get_all("Employee Boarding Activity",
fields=["activity_name", "role", "user", "required_for_employee_creation", "description", "task_weight"],
filters={"parent": parent, "parenttype": parenttype},
order_by= "idx")
@frappe.whitelist()
def get_boarding_status(project):
status = 'Pending'
if project:
doc = frappe.get_doc('Project', project)
if flt(doc.percent_complete) > 0.0 and flt(doc.percent_complete) < 100.0:
status = 'In Process'
elif flt(doc.percent_complete) == 100.0:
status = 'Completed'
return status
def set_employee_name(doc):
if doc.employee and not doc.employee_name:
doc.employee_name = frappe.db.get_value("Employee", doc.employee, "employee_name")
def update_employee(employee, details, date=None, cancel=False):
internal_work_history = {}
for item in details:
fieldtype = frappe.get_meta("Employee").get_field(item.fieldname).fieldtype
new_data = item.new if not cancel else item.current
if fieldtype == "Date" and new_data:
new_data = getdate(new_data)
elif fieldtype =="Datetime" and new_data:
new_data = get_datetime(new_data)
setattr(employee, item.fieldname, new_data)
if item.fieldname in ["department", "designation", "branch"]:
internal_work_history[item.fieldname] = item.new
if internal_work_history and not cancel:
internal_work_history["from_date"] = date
employee.append("internal_work_history", internal_work_history)
return employee
@frappe.whitelist()
def get_employee_fields_label():
fields = []
for df in frappe.get_meta("Employee").get("fields"):
if df.fieldname in ["salutation", "user_id", "employee_number", "employment_type",
"holiday_list", "branch", "department", "designation", "grade",
"notice_number_of_days", "reports_to", "leave_policy", "company_email"]:
fields.append({"value": df.fieldname, "label": df.label})
return fields
@frappe.whitelist()
def get_employee_field_property(employee, fieldname):
if employee and fieldname:
field = frappe.get_meta("Employee").get_field(fieldname)
value = frappe.db.get_value("Employee", employee, fieldname)
options = field.options
if field.fieldtype == "Date":
value = formatdate(value)
elif field.fieldtype == "Datetime":
value = format_datetime(value)
return {
"value" : value,
"datatype" : field.fieldtype,
"label" : field.label,
"options" : options
}
else:
return False
def validate_dates(doc, from_date, to_date):
date_of_joining, relieving_date = frappe.db.get_value("Employee", doc.employee, ["date_of_joining", "relieving_date"])
if getdate(from_date) > getdate(to_date):
frappe.throw(_("To date can not be less than from date"))
elif getdate(from_date) > getdate(nowdate()):
frappe.throw(_("Future dates not allowed"))
elif date_of_joining and getdate(from_date) < getdate(date_of_joining):
frappe.throw(_("From date can not be less than employee's joining date"))
elif relieving_date and getdate(to_date) > getdate(relieving_date):
frappe.throw(_("To date can not greater than employee's relieving date"))
def validate_overlap(doc, from_date, to_date, company = None):
query = """
select name
from `tab{0}`
where name != %(name)s
"""
query += get_doc_condition(doc.doctype)
if not doc.name:
# hack! if name is null, it could cause problems with !=
doc.name = "New "+doc.doctype
overlap_doc = frappe.db.sql(query.format(doc.doctype),{
"employee": doc.get("employee"),
"from_date": from_date,
"to_date": to_date,
"name": doc.name,
"company": company
}, as_dict = 1)
if overlap_doc:
if doc.get("employee"):
exists_for = doc.employee
if company:
exists_for = company
throw_overlap_error(doc, exists_for, overlap_doc[0].name, from_date, to_date)
def get_doc_condition(doctype):
if doctype == "Compensatory Leave Request":
return "and employee = %(employee)s and docstatus < 2 \
and (work_from_date between %(from_date)s and %(to_date)s \
or work_end_date between %(from_date)s and %(to_date)s \
or (work_from_date < %(from_date)s and work_end_date > %(to_date)s))"
elif doctype == "Leave Period":
return "and company = %(company)s and (from_date between %(from_date)s and %(to_date)s \
or to_date between %(from_date)s and %(to_date)s \
or (from_date < %(from_date)s and to_date > %(to_date)s))"
def throw_overlap_error(doc, exists_for, overlap_doc, from_date, to_date):
msg = _("A {0} exists between {1} and {2} (").format(doc.doctype,
formatdate(from_date), formatdate(to_date)) \
+ """ <b><a href="#Form/{0}/{1}">{1}</a></b>""".format(doc.doctype, overlap_doc) \
+ _(") for {0}").format(exists_for)
frappe.throw(msg)
def get_employee_leave_policy(employee):
leave_policy = frappe.db.get_value("Employee", employee, "leave_policy")
if not leave_policy:
employee_grade = frappe.db.get_value("Employee", employee, "grade")
if employee_grade:
leave_policy = frappe.db.get_value("Employee Grade", employee_grade, "default_leave_policy")
if not leave_policy:
frappe.throw(_("Employee {0} of grade {1} have no default leave policy").format(employee, employee_grade))
if leave_policy:
return frappe.get_doc("Leave Policy", leave_policy)
else:
frappe.throw(_("Please set leave policy for employee {0} in Employee / Grade record").format(employee))
def validate_duplicate_exemption_for_payroll_period(doctype, docname, payroll_period, employee):
existing_record = frappe.db.exists(doctype, {
"payroll_period": payroll_period,
"employee": employee,
'docstatus': ['<', 2],
'name': ['!=', docname]
})
if existing_record:
frappe.throw(_("{0} already exists for employee {1} and period {2}")
.format(doctype, employee, payroll_period), DuplicateDeclarationError)
def validate_tax_declaration(declarations):
subcategories = []
for d in declarations:
if d.exemption_sub_category in subcategories:
frappe.throw(_("More than one selection for {0} not allowed").format(d.exemption_sub_category))
subcategories.append(d.exemption_sub_category)
def get_total_exemption_amount(declarations):
exemptions = frappe._dict()
for d in declarations:
exemptions.setdefault(d.exemption_category, frappe._dict())
category_max_amount = exemptions.get(d.exemption_category).max_amount
if not category_max_amount:
category_max_amount = frappe.db.get_value("Employee Tax Exemption Category", d.exemption_category, "max_amount")
exemptions.get(d.exemption_category).max_amount = category_max_amount
sub_category_exemption_amount = d.max_amount \
if (d.max_amount and flt(d.amount) > flt(d.max_amount)) else d.amount
exemptions.get(d.exemption_category).setdefault("total_exemption_amount", 0.0)
exemptions.get(d.exemption_category).total_exemption_amount += flt(sub_category_exemption_amount)
if category_max_amount and exemptions.get(d.exemption_category).total_exemption_amount > category_max_amount:
exemptions.get(d.exemption_category).total_exemption_amount = category_max_amount
total_exemption_amount = sum([flt(d.total_exemption_amount) for d in exemptions.values()])
return total_exemption_amount
def get_leave_period(from_date, to_date, company):
leave_period = frappe.db.sql("""
select name, from_date, to_date
from `tabLeave Period`
where company=%(company)s and is_active=1
and (from_date between %(from_date)s and %(to_date)s
or to_date between %(from_date)s and %(to_date)s
or (from_date < %(from_date)s and to_date > %(to_date)s))
""", {
"from_date": from_date,
"to_date": to_date,
"company": company
}, as_dict=1)
if leave_period:
return leave_period
def generate_leave_encashment():
''' Generates a draft leave encashment on allocation expiry '''
from erpnext.hr.doctype.leave_encashment.leave_encashment import create_leave_encashment
if frappe.db.get_single_value('HR Settings', 'auto_leave_encashment'):
leave_type = frappe.get_all('Leave Type', filters={'allow_encashment': 1}, fields=['name'])
leave_type=[l['name'] for l in leave_type]
leave_allocation = frappe.get_all("Leave Allocation", filters={
'to_date': add_days(today(), -1),
'leave_type': ('in', leave_type)
}, fields=['employee', 'leave_period', 'leave_type', 'to_date', 'total_leaves_allocated', 'new_leaves_allocated'])
create_leave_encashment(leave_allocation=leave_allocation)
def allocate_earned_leaves():
'''Allocate earned leaves to Employees'''
e_leave_types = frappe.get_all("Leave Type",
fields=["name", "max_leaves_allowed", "earned_leave_frequency", "rounding"],
filters={'is_earned_leave' : 1})
today = getdate()
divide_by_frequency = {"Yearly": 1, "Half-Yearly": 6, "Quarterly": 4, "Monthly": 12}
for e_leave_type in e_leave_types:
leave_allocations = frappe.db.sql("""select name, employee, from_date, to_date from `tabLeave Allocation` where %s
between from_date and to_date and docstatus=1 and leave_type=%s""", (today, e_leave_type.name), as_dict=1)
for allocation in leave_allocations:
leave_policy = get_employee_leave_policy(allocation.employee)
if not leave_policy:
continue
if not e_leave_type.earned_leave_frequency == "Monthly":
if not check_frequency_hit(allocation.from_date, today, e_leave_type.earned_leave_frequency):
continue
annual_allocation = frappe.db.get_value("Leave Policy Detail", filters={
'parent': leave_policy.name,
'leave_type': e_leave_type.name
}, fieldname=['annual_allocation'])
if annual_allocation:
earned_leaves = flt(annual_allocation) / divide_by_frequency[e_leave_type.earned_leave_frequency]
if e_leave_type.rounding == "0.5":
earned_leaves = round(earned_leaves * 2) / 2
else:
earned_leaves = round(earned_leaves)
allocation = frappe.get_doc('Leave Allocation', allocation.name)
new_allocation = flt(allocation.total_leaves_allocated) + flt(earned_leaves)
if new_allocation > e_leave_type.max_leaves_allowed and e_leave_type.max_leaves_allowed > 0:
new_allocation = e_leave_type.max_leaves_allowed
if new_allocation == allocation.total_leaves_allocated:
continue
allocation.db_set("total_leaves_allocated", new_allocation, update_modified=False)
create_additional_leave_ledger_entry(allocation, earned_leaves, today)
def create_additional_leave_ledger_entry(allocation, leaves, date):
''' Create leave ledger entry for leave types '''
allocation.new_leaves_allocated = leaves
allocation.from_date = date
allocation.unused_leaves = 0
allocation.create_leave_ledger_entry()
def check_frequency_hit(from_date, to_date, frequency):
'''Return True if current date matches frequency'''
from_dt = get_datetime(from_date)
to_dt = get_datetime(to_date)
from dateutil import relativedelta
rd = relativedelta.relativedelta(to_dt, from_dt)
months = rd.months
if frequency == "Quarterly":
if not months % 3:
return True
elif frequency == "Half-Yearly":
if not months % 6:
return True
elif frequency == "Yearly":
if not months % 12:
return True
return False
def get_salary_assignment(employee, date):
assignment = frappe.db.sql("""
select * from `tabSalary Structure Assignment`
where employee=%(employee)s
and docstatus = 1
and %(on_date)s >= from_date order by from_date desc limit 1""", {
'employee': employee,
'on_date': date,
}, as_dict=1)
return assignment[0] if assignment else None
def get_sal_slip_total_benefit_given(employee, payroll_period, component=False):
total_given_benefit_amount = 0
query = """
select sum(sd.amount) as 'total_amount'
from `tabSalary Slip` ss, `tabSalary Detail` sd
where ss.employee=%(employee)s
and ss.docstatus = 1 and ss.name = sd.parent
and sd.is_flexible_benefit = 1 and sd.parentfield = "earnings"
and sd.parenttype = "Salary Slip"
and (ss.start_date between %(start_date)s and %(end_date)s
or ss.end_date between %(start_date)s and %(end_date)s
or (ss.start_date < %(start_date)s and ss.end_date > %(end_date)s))
"""
if component:
query += "and sd.salary_component = %(component)s"
sum_of_given_benefit = frappe.db.sql(query, {
'employee': employee,
'start_date': payroll_period.start_date,
'end_date': payroll_period.end_date,
'component': component
}, as_dict=True)
if sum_of_given_benefit and flt(sum_of_given_benefit[0].total_amount) > 0:
total_given_benefit_amount = sum_of_given_benefit[0].total_amount
return total_given_benefit_amount
def get_holidays_for_employee(employee, start_date, end_date):
holiday_list = get_holiday_list_for_employee(employee)
holidays = frappe.db.sql_list('''select holiday_date from `tabHoliday`
where
parent=%(holiday_list)s
and holiday_date >= %(start_date)s
and holiday_date <= %(end_date)s''', {
"holiday_list": holiday_list,
"start_date": start_date,
"end_date": end_date
})
holidays = [cstr(i) for i in holidays]
return holidays
@erpnext.allow_regional
def calculate_annual_eligible_hra_exemption(doc):
# Don't delete this method, used for localization
# Indian HRA Exemption Calculation
return {}
@erpnext.allow_regional
def calculate_hra_exemption_for_period(doc):
# Don't delete this method, used for localization
# Indian HRA Exemption Calculation
return {}
def get_previous_claimed_amount(employee, payroll_period, non_pro_rata=False, component=False):
total_claimed_amount = 0
query = """
select sum(claimed_amount) as 'total_amount'
from `tabEmployee Benefit Claim`
where employee=%(employee)s
and docstatus = 1
and (claim_date between %(start_date)s and %(end_date)s)
"""
if non_pro_rata:
query += "and pay_against_benefit_claim = 1"
if component:
query += "and earning_component = %(component)s"
sum_of_claimed_amount = frappe.db.sql(query, {
'employee': employee,
'start_date': payroll_period.start_date,
'end_date': payroll_period.end_date,
'component': component
}, as_dict=True)
if sum_of_claimed_amount and flt(sum_of_claimed_amount[0].total_amount) > 0:
total_claimed_amount = sum_of_claimed_amount[0].total_amount
return total_claimed_amount
|
agpl-3.0
| -7,992,770,331,410,956,000
| 36.730263
| 123
| 0.706306
| false
| 3.052697
| false
| false
| false
|
lantianlz/qiexing
|
www/journey/views.py
|
1
|
5036
|
# -*- coding: utf-8 -*-
import json
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.template import RequestContext
from django.shortcuts import render_to_response
from common import utils, page
from www.journey import interface
from www.misc.decorators import member_required, staff_required, common_ajax_response
from www.admin.interface import CoverBase
jb = interface.JourneyBase()
lb = interface.LikeBase()
def home(request, template_name='journey/home.html'):
from www.activity.interface import ActivityBase
from www.admin.interface import FriendlyLinkBase
activitys = ActivityBase().get_all_valid_activitys()[:3]
journeys = jb.format_journeys(jb.get_all_journeys_for_home_page()[:4])
links = FriendlyLinkBase().get_friendly_link_by_link_type(link_type=3)
covers = CoverBase().get_home_cover()
return render_to_response(template_name, locals(), context_instance=RequestContext(request))
def journey_list(request, template_name='journey/journey_list.html'):
journeys = jb.get_all_journeys_for_home_page()
# 分页
page_num = int(request.REQUEST.get('page', 1))
page_objs = page.Cpt(journeys, count=10, page=page_num).info
journeys = page_objs[0]
page_params = (page_objs[1], page_objs[4])
journeys = jb.format_journeys(journeys)
return render_to_response(template_name, locals(), context_instance=RequestContext(request))
def journey_detail(request, journey_id, template_name='journey/journey_detail.html'):
journey = jb.get_journey_by_id(journey_id)
if not journey:
raise Http404
journey = jb.format_journeys([journey, ],)[0]
sort = request.REQUEST.get('sort', 'like_count')
answers_list_params = "%s$%s" % (journey.id, "0") # 用于前端提取回复列表
# 从session中获取提示信息
if request.session.has_key('error_msg'):
error_msg = request.session['error_msg']
del request.session['error_msg']
if request.session.has_key('success_msg'):
success_msg = request.session['success_msg']
del request.session['success_msg']
if request.session.has_key('answer_content'):
request.answer_content = request.session['answer_content']
del request.session['answer_content']
if request.session.has_key('guide'):
guide = request.session['guide']
del request.session['guide']
# 异步更新浏览次数
from www.tasks import async_add_journey_view_count
async_add_journey_view_count(journey.id)
return render_to_response(template_name, locals(), context_instance=RequestContext(request))
@member_required
def write_journey(request, template_name='journey/write_journey.html'):
if request.POST:
journey_title = request.POST.get('journey_title', '').strip()
journey_content = request.POST.get('journey_content', '').strip()
is_hide_user = request.POST.get('is_hide_user')
errcode, result = jb.create_journey(request.user.id, journey_title, journey_content,
ip=utils.get_clientip(request), is_hide_user=is_hide_user)
if errcode == 0:
request.session['guide'] = True
return HttpResponseRedirect(result.get_url())
else:
error_msg = result
return render_to_response(template_name, locals(), context_instance=RequestContext(request))
@member_required
def modify_journey(request, journey_id):
if request.POST:
journey_title = request.POST.get('journey_title', '').strip()
journey_content = request.POST.get('journey_content', '').strip()
is_hide_user = request.POST.get('is_hide_user')
errcode, result = jb.modify_journey(journey_id, request.user, journey_title, journey_content,
ip=utils.get_clientip(request), is_hide_user=is_hide_user)
if errcode == 0:
request.session['success_msg'] = u'修改成功'
return HttpResponseRedirect(result.get_url())
else:
request.session['error_msg'] = result
return HttpResponseRedirect(jb.get_journey_by_id(journey_id).get_url())
# ===================================================ajax部分=================================================================#
@member_required
@common_ajax_response
def like_journey(request):
journey_id = request.POST.get('journey_id', '').strip()
return lb.like_it(journey_id, request.user.id, ip=utils.get_clientip(request))
@member_required
@common_ajax_response
def remove_journey(request):
journey_id = request.POST.get('journey_id', '').strip()
return jb.remove_journey(journey_id, request.user)
@staff_required
@common_ajax_response
def set_top(request):
journey_id = request.POST.get('journey_id', '').strip()
return jb.set_top(journey_id)
@staff_required
@common_ajax_response
def cancel_top(request):
journey_id = request.POST.get('journey_id', '').strip()
return jb.cancel_top(journey_id)
|
gpl-2.0
| -7,290,553,088,338,072,000
| 34.485714
| 125
| 0.662037
| false
| 3.244938
| false
| false
| false
|
MGApcDev/LamasAndGroves
|
src/wordbranch.py
|
1
|
2333
|
class WordBranch(object):
"""WordBranch represents a single branch in the tree of all the valid word combinations.
Attributes:
letter_branch (LetterBranch) The reference to the LetterBranch that represents the word.
origin (WordBranch) The reference to the parent WordBranch.
remain_char (int) Number of characters remaining in the remain_dict.
valid_children ([WordBranch]) Array of WordBranches leading to valid anagrams.
"""
def __init__(self, letter_branch, origin, remain_char, valid_children):
self.letter_branch = letter_branch
self.origin = origin
self.remain_char = remain_char
self.valid_children = valid_children
def __str__(self):
'''Trace words from leaf branch to root.
Args
self (WordBranch) The leaf branch to trace for word.
Returns
(string) The full string of represented by the leaf.
'''
output_str = ''
words = []
pointer = self
while pointer.origin != None:
words.append(pointer)
pointer = pointer.origin
words.reverse() # Put words in the right order
for word in words:
output_str += str(word.letter_branch) + ' '
# Remove last char --> ' '
return output_str[:-1]
hash_to_branch = {}
def get_word_tree_root(phrase_len, phrase_dict, words):
'''Construct the root object of the WordBranch tree.
Args
phrase_len (int) Count of valid characters in phrase.
phrase_dict ({char => int}) The remaining letters of the phrase.
words ([LetterBranch]) Array of all the available words as LetterBranch.
Returns
(WordBranch) The root of WordBranch tree.
'''
global hash_to_branch
hash_to_branch = {} # Reset hash to branch on new tree root
root_children = []
root = WordBranch(None, None, phrase_len, None)
for word in words:
root_children.append(WordBranch(word, root, phrase_len - len(str(word)), None))
return root, root_children
def get_hash_to_branch():
global hash_to_branch
return hash_to_branch
|
mit
| 8,808,247,715,610,096,000
| 37.542373
| 98
| 0.583369
| false
| 4.486538
| false
| false
| false
|
alehander42/bach
|
bach/bach_macro.py
|
1
|
4898
|
import types
import bach_ast
import compiler
import bach_stl
from errors import MacroMatchError
def register_macro(mapping, label, macro, count=None):
if label not in mapping:
mapping[label] = []
if isinstance(macro, types.FunctionType):
mapping[label].append((count, macro))
else:
mapping[label].append((macro.args_count(), macro))
class BachMacro(object):
def __init__(self, label, args, body):
self.label, self.args, self.body = label, args, body
def render(self, sexps):
# mapping = {}
# if len(self.args) > 0 and isinstance(self.args[-1], bach_ast.Many):
# if len(self.args) >= len(sexps) - 1:
# for arg, sexp in zip(self.args[:-1], self.sexps[:len(self.args) - 1]):
# mapping[arg.label] = sexp
# mapping[self.args[-1].label] = sexps[len(self.args) - 1:]
# else:
# raise MacroMatchError("No enough args for %s" % self.label)
# else:
# if len(self.args) == len(sexps):
# for arg, sexp in zip(self.args, sexps):
# mapping[arg.label] = sexp
# else:
# raise MacroMatchError("Expected %d args got %d for %s" % (len(self.args), len(sexps), self.label))
# value =
if not self.args:
args = []
elif isinstance(self.args[-1], bach_ast.Many):
args = self.args[:-1] + [bach_ast.Label(self.args[-1].label)]
else:
args = self.args
sexps = [bach_ast.Quote(sexp) for sexp in sexps]
sexp = bach_ast.Program([[bach_ast.Lambda(args, self.body)] + sexps])
result = compiler.Compiler().compile_and_eval(sexp, stl=bach_stl.load_stl(), return_value=True)
return self.normal_sexp(result)
def normal_sexp(self, sexp):
'''
we compile macros to a bach lambda and then run them, so some of the resulting values
can't have the compile-time node types(only native python types and bach runtime types)
however they are just several of those cases and they're pretty similar
we convert the results back to normal bach sexp, so we can easily apply other macros
'''
PYTHON_BACH_EQUIVALENTS = {int: bach_ast.Integer, float: bach_ast.Float, str: bach_ast.String, bool: bach_ast.Boolean}
if isinstance(sexp, list):
return map(self.normal_sexp, sexp)
elif type(sexp) in PYTHON_BACH_EQUIVALENTS:
return PYTHON_BACH_EQUIVALENTS[type(sexp)](sexp)
elif type(sexp).__name__ == 'BachSymbol':
return bach_ast.Label(sexp.value)
elif isinstance(sexp, dict):
return bach_ast.Dict(map(self.normal_sexp, sexp.keys()), map(self.normal_sexp, sexp.values()))
elif isinstance(sexp, set):
return bach_ast.Set(map(self.normal_sexp, sexp))
else:
return sexp
def generic_render(self, node, mapping):
method_name = 'render_' + type(node).lower()
if hasattr(self, method_name):
return getattr(self, 'render_' + type(node).lower())(node, mapping)
else:
return node
def render_list(self, node, mapping):
if mapping[QUA]:
result = []
for child in node:
if isinstance(child, bach_ast.UnquoteList):
result += self.generic_render(child, mapping)
else:
result.append(self.generic_render(child, mapping))
else:
return [self.generic_render(child) for child in node]
def render_quasiquote(self, node, mapping):
quasiquote_mapping = mapping.copy()
quasiquote_mapping[QUA] = True
return self.generic_render(node.expr, quasiquote_mapping)
def render_quote(self, node, mapping):
return self.generic_render(node.expr, mapping)
def render_unquote(self, node, mapping):
if mapping[QUA]:
return mapping[node.expr.label]
else:
return node
def render_unquotelist(self, node, mapping):
if mapping[QUA]:
return mapping[node.expr.label]
else:
return node
def register(self, sexps):
mapping = {QUA: False} # a flag for activated quasi
for arg, sexp in zip(self.args, sexps):
e = 4
if isinstance(arg, bach_ast.Label):
mapping[arg.label] = sexp
if len(sexps) > len(self.args) and isinstance(self.args[-1], bach_ast.Many):
mapping[self.args[-1].label] = sexps[len(self.args)-1:]
return mapping
def args_count(self):
if len(self.args) > 0 and isinstance(self.args[-1], bach_ast.Label) or len(self.args) == 0:
return len(self.args)
else:
return (len(self.args),)
|
mit
| 1,044,999,443,034,420,000
| 39.147541
| 126
| 0.576766
| false
| 3.572575
| false
| false
| false
|
juliusdedekind/FindDuplicateFiles
|
FindDuplicates.py
|
1
|
2897
|
"""Find duplicate files inside a directory tree."""
from os import walk, remove, stat
from os.path import join as joinpath
from hashlib import md5
import threading
import Queue
import time
import sys
class Scanner(threading.Thread):
def __init__(self, path, queue, finished_scan):
threading.Thread.__init__(self)
self._path = path
self._queue = queue
self._finished_scan = finished_scan
def run(self):
"""Find duplicate files in directory tree and return array with lists of duplicateted files."""
filesizes = {}
# Build up dict with key as filesize and value is list of filenames.
for path, dirs, files in walk(self._path ):
for filename in files:
filepath = joinpath( path, filename )
filesize = stat(filepath).st_size
filesizes.setdefault( filesize, [] ).append(filepath)
#Compare content hash of all files which have the same size
#if two or more files have same hash and size they are added to the queue
for files in [ flist for flist in filesizes.values() if len(flist) > 1 ]:
#run over all files in dir with the same size if there is more then one
duplicates = {}
for filepath in files:
with open( filepath ) as openfile:
filehash = md5(openfile.read()).hexdigest()
if filehash not in duplicates:
duplicates.setdefault(filehash, []).append(filepath)
else:
duplicates[filehash].append(filepath)
for duplicate in [duplicate for duplicate in duplicates.values() if len(duplicate) > 1 ]:
self._queue.put(duplicate)
self._finished_scan[0] = 1
class Updater(threading.Thread):
def __init__(self, queue, duplicates, updateFunction, finished_scan, time):
threading.Thread.__init__(self)
self._queue = queue
self._updateFunc = updateFunction
self._duplicates = duplicates
self._finished_scan = finished_scan
self._time_duration = time
def run(self):
while True:
try:
item = self._queue.get(True, 0.03) # seems to be a good time value
self._duplicates.append(item)
self._queue.task_done()
self._updateFunc()
except Queue.Empty:
# if queue is empty and scan is finished then stop this thread
if self._finished_scan[0] == 1:
self._time_duration = time.time() - self._time_duration
print 'Finished in ' + repr(self._time_duration) + ' seconds!'
self._updateFunc()
break
else:
continue
|
gpl-3.0
| -9,085,385,887,322,903,000
| 38.802817
| 103
| 0.563687
| false
| 4.741408
| false
| false
| false
|
armanpazouki/chrono
|
src/demos/python/chrono-tensorflow/PPO/train.py
|
1
|
12022
|
"""
PPO: Proximal Policy Optimization
serial version
"""
import sys
sys.path.append('../envs')
import chtrain as gym
import numpy as np
from policy import Policy
from value_function import NNValueFunction
import scipy.signal
from utils import Logger, Scaler
from datetime import datetime
import argparse
import signal
class GracefulKiller:
""" Gracefully exit program on CTRL-C """
def __init__(self):
self.kill_now = False
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
def exit_gracefully(self, signum, frame):
self.kill_now = True
def init_gym(env_name, render):
"""
Initialize gym environment, return dimension of observation
and action spaces.
Args:
render: True to toggle on visualization
Returns: 3-tuple
environment (object)
number of observation dimensions (int)
number of action dimensions (int)
"""
env = gym.Init(env_name, render)
obs_dim = env.observation_space.shape[0]
act_dim = env.action_space.shape[0]
return env, obs_dim, act_dim
def run_episode(env, policy, scaler, animate=True):
""" Run single episode
Args:
env: environment (object)
policy: policy object with sample() method
scaler: scaler object, scales/offsets each observation
Returns: 4-tuple of NumPy arrays
observes: shape = (episode len, obs_dim)
actions: shape = (episode len, act_dim)
rewards: shape = (episode len,)
unscaled_obs: dataset for training scaler, shape = (episode len, obs_dim)
"""
obs = env.reset() #resets whenever an episode begins
observes, actions, rewards, unscaled_obs = [], [], [], []
done = False
step = 0.0
scale, offset = scaler.get()
scale[-1] = 1.0 # don't scale time step feature
offset[-1] = 0.0 # don't offset time step feature
while not done:
obs = obs.astype(np.float64).reshape((1, -1))
obs = np.append(obs, [[step]], axis=1) # add time step feature TODO: check if this extra state is useful
unscaled_obs.append(obs)
obs = (obs - offset) * scale # center and scale observations TODO: check ifscaler is useful (it should be according to literature)
observes.append(obs)
action = policy.sample(obs).reshape((1, -1)).astype(np.float64)
actions.append(action)
obs, reward, done, _ = env.step(action) #state, reward, done, info = env.step(action)
if not isinstance(reward, float):
reward = np.asscalar(reward)
rewards.append(reward)
step += 1e-3 # increment time step feature
return (np.concatenate(observes), np.concatenate(actions),
np.array(rewards, dtype=np.float64), np.concatenate(unscaled_obs))
def run_policy(env, policy, scaler, logger, episodes):
""" Run policy and collect data
Args:
env: environment (object)
policy: policy object with sample() method
scaler: scaler object, scales/offsets each observation
logger: logger object, used to save stats from episodes
episodes: total episodes to run
Returns: list of trajectory dictionaries, list length = number of episodes
'observes' : NumPy array of states from episode
'actions' : NumPy array of actions from episode
'rewards' : NumPy array of (un-discounted) rewards from episode
'unscaled_obs' : NumPy array of (un-scaled) states from episode
"""
total_steps = 0
trajectories = []
for e in range(episodes):
observes, actions, rewards, unscaled_obs = run_episode(env, policy, scaler)
total_steps += observes.shape[0]
trajectory = {'observes': observes,
'actions': actions,
'rewards': rewards,
'unscaled_obs': unscaled_obs}
trajectories.append(trajectory)
unscaled = np.concatenate([t['unscaled_obs'] for t in trajectories])
scaler.update(unscaled) # update running statistics for scaling observations
logger.log({'_MeanReward': np.mean([t['rewards'].sum() for t in trajectories]),
'Steps': total_steps})
return trajectories
def discount(x, gamma):
""" Calculate discounted forward sum of a sequence at each point """
return scipy.signal.lfilter([1.0], [1.0, -gamma], x[::-1])[::-1]
def add_disc_sum_rew(trajectories, gamma):
""" Adds discounted sum of rewards to all time steps of all trajectories
Args:
trajectories: as returned by run_policy()
gamma: discount
Returns:
None (mutates trajectories dictionary to add 'disc_sum_rew')
"""
for trajectory in trajectories:
if gamma < 0.999: # don't scale for gamma ~= 1
rewards = trajectory['rewards'] * (1 - gamma)
else:
rewards = trajectory['rewards']
disc_sum_rew = discount(rewards, gamma)
trajectory['disc_sum_rew'] = disc_sum_rew
def add_value(trajectories, val_func):
""" Adds estimated value to all time steps of all trajectories
Args:
trajectories: as returned by run_policy()
val_func: object with predict() method, takes observations
and returns predicted state value
Returns:
None (mutates trajectories dictionary to add 'values')
"""
for trajectory in trajectories:
observes = trajectory['observes']
values = val_func.predict(observes)
trajectory['values'] = values
def add_gae(trajectories, gamma, lam):
""" Add generalized advantage estimator.
https://arxiv.org/pdf/1506.02438.pdf
Args:
trajectories: as returned by run_policy(), must include 'values'
key from add_value().
gamma: reward discount
lam: lambda (see paper).
lam=0 : use TD residuals
lam=1 : A = Sum Discounted Rewards - V_hat(s)
Returns:
None (mutates trajectories dictionary to add 'advantages')
"""
for trajectory in trajectories:
if gamma < 0.999: # don't scale for gamma ~= 1
rewards = trajectory['rewards'] * (1 - gamma)
else:
rewards = trajectory['rewards']
values = trajectory['values']
# temporal differences
# values[1:] deletes the first element (Vo) and attachs a 0 at the end (the future state value function at the end of the trajectory is 0)
# r - Vs + gamma*Vst+1
tds = rewards - values + np.append(values[1:] * gamma, 0)
advantages = discount(tds, gamma * lam)
trajectory['advantages'] = advantages
def build_train_set(trajectories):
"""
Args:
trajectories: trajectories after processing by add_disc_sum_rew(),
add_value(), and add_gae()
Returns: 4-tuple of NumPy arrays
observes: shape = (N, obs_dim)
actions: shape = (N, act_dim)
advantages: shape = (N,)
disc_sum_rew: shape = (N,)
"""
observes = np.concatenate([t['observes'] for t in trajectories])
actions = np.concatenate([t['actions'] for t in trajectories])
disc_sum_rew = np.concatenate([t['disc_sum_rew'] for t in trajectories])
advantages = np.concatenate([t['advantages'] for t in trajectories])
# normalize advantages
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-6)
return observes, actions, advantages, disc_sum_rew
def log_batch_stats(observes, actions, advantages, disc_sum_rew, logger, episode):
""" Log various batch statistics """
logger.log({'_mean_obs': np.mean(observes),
'_min_obs': np.min(observes),
'_max_obs': np.max(observes),
'_std_obs': np.mean(np.var(observes, axis=0)),
'_mean_act': np.mean(actions),
'_min_act': np.min(actions),
'_max_act': np.max(actions),
'_std_act': np.mean(np.var(actions, axis=0)),
'_mean_adv': np.mean(advantages),
'_min_adv': np.min(advantages),
'_max_adv': np.max(advantages),
'_std_adv': np.var(advantages),
'_mean_discrew': np.mean(disc_sum_rew),
'_min_discrew': np.min(disc_sum_rew),
'_max_discrew': np.max(disc_sum_rew),
'_std_discrew': np.var(disc_sum_rew),
'_Episode': episode
})
def main(env_name, num_episodes, render, gamma, lam, kl_targ, batch_size):
""" Main training loop
Args:
env_name: OpenAI Gym environment name, e.g. 'Hopper-v1'
num_episodes: maximum number of episodes to run
gamma: reward discount factor (float)
lam: lambda from Generalized Advantage Estimate
kl_targ: D_KL target for policy update [D_KL(pi_old || pi_new)
batch_size: number of episodes per policy training batch
"""
killer = GracefulKiller()
env, obs_dim, act_dim = init_gym(env_name, render)
obs_dim += 1 # add 1 to obs dimension for time step feature (see run_episode())
now = datetime.utcnow().strftime("%b-%d_%H-%M-%S") # create unique directories
logger = Logger(logname=env_name, now=now)
scaler = Scaler(obs_dim, env_name)
val_func = NNValueFunction(obs_dim, env_name)
policy = Policy(obs_dim, act_dim, kl_targ, env_name)
# run a few episodes of untrained policy to initialize scaler:
run_policy(env, policy, scaler, logger, episodes=5)
episode = 0
#capture = False
while episode < num_episodes:
trajectories = run_policy(env, policy, scaler, logger, episodes=batch_size)
episode += len(trajectories)
"""if episode > 600 and not capture:
env.ScreenCapture(5)
capture = True"""
add_value(trajectories, val_func) # add estimated values to episodes
add_disc_sum_rew(trajectories, gamma) # calculated discounted sum of Rs
add_gae(trajectories, gamma, lam) # calculate advantage
# concatenate all episodes into single NumPy arrays
observes, actions, advantages, disc_sum_rew = build_train_set(trajectories)
# add various stats to training log:
log_batch_stats(observes, actions, advantages, disc_sum_rew, logger, episode)
policy.update(observes, actions, advantages, logger) # update policy
val_func.fit(observes, disc_sum_rew, logger) # update value function
logger.write(display=True) # write logger results to file and stdout
scaler.save()
if killer.kill_now:
if input('Terminate training (y/[n])? ') == 'y':
break
killer.kill_now = False
logger.close()
policy.close_sess()
val_func.close_sess()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=('Train policy on OpenAI Gym environment '
'using Proximal Policy Optimizer'))
parser.add_argument('env_name', type=str, help='OpenAI Gym environment name')
parser.add_argument('-n', '--num_episodes', type=int, help='Number of episodes to run',
default=1000)
parser.add_argument('--renderON',action='store_true', default=False, dest='render', help='Toggle ON video')
parser.add_argument('--renderOFF',action='store_false', default=False, dest='render', help='Toggle OFF video')
parser.add_argument('-g', '--gamma', type=float, help='Discount factor', default=0.995)
parser.add_argument('-l', '--lam', type=float, help='Lambda for Generalized Advantage Estimation',
default=0.98)
parser.add_argument('-k', '--kl_targ', type=float, help='D_KL target value',
default=0.003)
parser.add_argument('-b', '--batch_size', type=int,
help='Number of episodes per training batch',
default=20)
args = parser.parse_args()
main(**vars(args))
|
bsd-3-clause
| -2,523,526,546,991,726,000
| 37.780645
| 147
| 0.6182
| false
| 3.786457
| false
| false
| false
|
RuudBurger/CouchPotatoServer
|
couchpotato/core/downloaders/deluge.py
|
1
|
16194
|
from base64 import b64encode, b16encode, b32decode
from datetime import timedelta
from hashlib import sha1
import os.path
import re
import traceback
from bencode import bencode as benc, bdecode
from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList
from couchpotato.core.helpers.encoding import isInt, sp
from couchpotato.core.helpers.variable import tryFloat, cleanHost
from couchpotato.core.logger import CPLog
from deluge_client.client import DelugeRPCClient
log = CPLog(__name__)
autoload = 'Deluge'
class Deluge(DownloaderBase):
protocol = ['torrent', 'torrent_magnet']
log = CPLog(__name__)
drpc = None
def connect(self, reconnect = False):
""" Connect to the delugeRPC, re-use connection when already available
:param reconnect: force reconnect
:return: DelugeRPC instance
"""
# Load host from config and split out port.
host = cleanHost(self.conf('host'), protocol = False).split(':')
# Force host assignment
if len(host) == 1:
host.append(80)
if not isInt(host[1]):
log.error('Config properties are not filled in correctly, port is missing.')
return False
if not self.drpc or reconnect:
self.drpc = DelugeRPC(host[0], port = host[1], username = self.conf('username'), password = self.conf('password'))
return self.drpc
def download(self, data = None, media = None, filedata = None):
""" Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {}
if not data: data = {}
log.info('Sending "%s" (%s) to Deluge.', (data.get('name'), data.get('protocol')))
if not self.connect():
return False
if not filedata and data.get('protocol') == 'torrent':
log.error('Failed sending torrent, no data')
return False
# Set parameters for Deluge
options = {
'add_paused': self.conf('paused', default = 0),
'label': self.conf('label')
}
if self.conf('directory'):
#if os.path.isdir(self.conf('directory')):
options['download_location'] = self.conf('directory')
#else:
# log.error('Download directory from Deluge settings: %s doesn\'t exist', self.conf('directory'))
if self.conf('completed_directory'):
#if os.path.isdir(self.conf('completed_directory')):
options['move_completed'] = 1
options['move_completed_path'] = self.conf('completed_directory')
#else:
# log.error('Download directory from Deluge settings: %s doesn\'t exist', self.conf('directory'))
if data.get('seed_ratio'):
options['stop_at_ratio'] = 1
options['stop_ratio'] = tryFloat(data.get('seed_ratio'))
# Deluge only has seed time as a global option. Might be added in
# in a future API release.
# if data.get('seed_time'):
# Send request to Deluge
if data.get('protocol') == 'torrent_magnet':
remote_torrent = self.drpc.add_torrent_magnet(data.get('url'), options)
else:
filename = self.createFileName(data, filedata, media)
remote_torrent = self.drpc.add_torrent_file(filename, filedata, options)
if not remote_torrent:
log.error('Failed sending torrent to Deluge')
return False
log.info('Torrent sent to Deluge successfully.')
return self.downloadReturnId(remote_torrent)
def test(self):
""" Check if connection works
:return: bool
"""
if self.connect(True) and self.drpc.test():
return True
return False
def getAllDownloadStatus(self, ids):
""" Get status of all active downloads
:param ids: list of (mixed) downloader ids
Used to match the releases for this downloader as there could be
other downloaders active that it should ignore
:return: list of releases
"""
log.debug('Checking Deluge download status.')
if not self.connect():
return []
release_downloads = ReleaseDownloadList(self)
queue = self.drpc.get_alltorrents(ids)
if not queue:
log.debug('Nothing in queue or error')
return []
for torrent_id in queue:
torrent = queue[torrent_id]
if not 'hash' in torrent:
# When given a list of ids, deluge will return an empty item for a non-existant torrent.
continue
log.debug('name=%s / id=%s / save_path=%s / move_on_completed=%s / move_completed_path=%s / hash=%s / progress=%s / state=%s / eta=%s / ratio=%s / stop_ratio=%s / is_seed=%s / is_finished=%s / paused=%s', (torrent['name'], torrent['hash'], torrent['save_path'], torrent['move_on_completed'], torrent['move_completed_path'], torrent['hash'], torrent['progress'], torrent['state'], torrent['eta'], torrent['ratio'], torrent['stop_ratio'], torrent['is_seed'], torrent['is_finished'], torrent['paused']))
# Deluge has no easy way to work out if a torrent is stalled or failing.
#status = 'failed'
status = 'busy'
# If an user opts to seed a torrent forever (usually associated to private trackers usage), stop_ratio will be 0 or -1 (depending on Deluge version).
# In this scenario the status of the torrent would never change from BUSY to SEEDING.
# The last check takes care of this case.
if torrent['is_seed'] and ((tryFloat(torrent['ratio']) < tryFloat(torrent['stop_ratio'])) or (tryFloat(torrent['stop_ratio']) < 0)):
# We have torrent['seeding_time'] to work out what the seeding time is, but we do not
# have access to the downloader seed_time, as with deluge we have no way to pass it
# when the torrent is added. So Deluge will only look at the ratio.
# See above comment in download().
status = 'seeding'
elif torrent['is_seed'] and torrent['is_finished'] and torrent['paused'] and torrent['state'] == 'Paused':
status = 'completed'
download_dir = sp(torrent['save_path'])
if torrent['move_on_completed']:
download_dir = torrent['move_completed_path']
torrent_files = []
for file_item in torrent['files']:
torrent_files.append(sp(os.path.join(download_dir, file_item['path'])))
release_downloads.append({
'id': torrent['hash'],
'name': torrent['name'],
'status': status,
'original_status': torrent['state'],
'seed_ratio': torrent['ratio'],
'timeleft': str(timedelta(seconds = torrent['eta'])),
'folder': sp(download_dir if len(torrent_files) == 1 else os.path.join(download_dir, torrent['name'])),
'files': torrent_files,
})
return release_downloads
def pause(self, release_download, pause = True):
if pause:
return self.drpc.pause_torrent([release_download['id']])
else:
return self.drpc.resume_torrent([release_download['id']])
def removeFailed(self, release_download):
log.info('%s failed downloading, deleting...', release_download['name'])
return self.drpc.remove_torrent(release_download['id'], True)
def processComplete(self, release_download, delete_files = False):
log.debug('Requesting Deluge to remove the torrent %s%s.', (release_download['name'], ' and cleanup the downloaded files' if delete_files else ''))
return self.drpc.remove_torrent(release_download['id'], remove_local_data = delete_files)
class DelugeRPC(object):
host = 'localhost'
port = 58846
username = None
password = None
client = None
def __init__(self, host = 'localhost', port = 58846, username = None, password = None):
super(DelugeRPC, self).__init__()
self.host = host
self.port = port
self.username = username
self.password = password
def connect(self):
#self.client = DelugeClient()
#self.client.connect(self.host, int(self.port), self.username, self.password)
self.client = DelugeRPCClient(self.host, int(self.port), self.username, self.password)
self.client.connect()
def test(self):
try:
self.connect()
except:
return False
return True
def add_torrent_magnet(self, torrent, options):
torrent_id = False
try:
self.connect()
torrent_id = self.client.core.add_torrent_magnet(torrent, options)
if not torrent_id:
torrent_id = self._check_torrent(True, torrent)
if torrent_id and options['label']:
self.client.label.set_torrent(torrent_id, options['label'])
except Exception as err:
log.error('Failed to add torrent magnet %s: %s %s', (torrent, err, traceback.format_exc()))
finally:
if self.client:
self.disconnect()
return torrent_id
def add_torrent_file(self, filename, torrent, options):
torrent_id = False
try:
self.connect()
torrent_id = self.client.core.add_torrent_file(filename, b64encode(torrent), options)
if not torrent_id:
torrent_id = self._check_torrent(False, torrent)
if torrent_id and options['label']:
self.client.label.set_torrent(torrent_id, options['label'])
except Exception as err:
log.error('Failed to add torrent file %s: %s %s', (filename, err, traceback.format_exc()))
finally:
if self.client:
self.disconnect()
return torrent_id
def get_alltorrents(self, ids):
ret = False
try:
self.connect()
ret = self.client.core.get_torrents_status({'id': ids}, ('name', 'hash', 'save_path', 'move_completed_path', 'progress', 'state', 'eta', 'ratio', 'stop_ratio', 'is_seed', 'is_finished', 'paused', 'move_on_completed', 'files'))
except Exception as err:
log.error('Failed to get all torrents: %s %s', (err, traceback.format_exc()))
finally:
if self.client:
self.disconnect()
return ret
def pause_torrent(self, torrent_ids):
try:
self.connect()
self.client.core.pause_torrent(torrent_ids)
except Exception as err:
log.error('Failed to pause torrent: %s %s', (err, traceback.format_exc()))
finally:
if self.client:
self.disconnect()
def resume_torrent(self, torrent_ids):
try:
self.connect()
self.client.core.resume_torrent(torrent_ids)
except Exception as err:
log.error('Failed to resume torrent: %s %s', (err, traceback.format_exc()))
finally:
if self.client:
self.disconnect()
def remove_torrent(self, torrent_id, remove_local_data):
ret = False
try:
self.connect()
ret = self.client.core.remove_torrent(torrent_id, remove_local_data)
except Exception as err:
log.error('Failed to remove torrent: %s %s', (err, traceback.format_exc()))
finally:
if self.client:
self.disconnect()
return ret
def disconnect(self):
self.client.disconnect()
def _check_torrent(self, magnet, torrent):
# Torrent not added, check if it already existed.
if magnet:
torrent_hash = re.findall('urn:btih:([\w]{32,40})', torrent)[0]
else:
info = bdecode(torrent)["info"]
torrent_hash = sha1(benc(info)).hexdigest()
# Convert base 32 to hex
if len(torrent_hash) == 32:
torrent_hash = b16encode(b32decode(torrent_hash))
torrent_hash = torrent_hash.lower()
torrent_check = self.client.core.get_torrent_status(torrent_hash, {})
if torrent_check['hash']:
return torrent_hash
return False
config = [{
'name': 'deluge',
'groups': [
{
'tab': 'downloaders',
'list': 'download_providers',
'name': 'deluge',
'label': 'Deluge',
'description': 'Use <a href="http://www.deluge-torrent.org/" target="_blank">Deluge</a> to download torrents.',
'wizard': True,
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
'radio_group': 'torrent',
},
{
'name': 'host',
'default': 'localhost:58846',
'description': 'Hostname with port. Usually <strong>localhost:58846</strong>',
},
{
'name': 'username',
},
{
'name': 'password',
'type': 'password',
},
{
'name': 'directory',
'type': 'directory',
'description': 'Download to this directory. Keep empty for default Deluge download directory.',
},
{
'name': 'completed_directory',
'type': 'directory',
'description': 'Move completed torrent to this directory. Keep empty for default Deluge options.',
'advanced': True,
},
{
'name': 'label',
'description': 'Label to add to torrents in the Deluge UI.',
},
{
'name': 'remove_complete',
'label': 'Remove torrent',
'type': 'bool',
'default': True,
'advanced': True,
'description': 'Remove the torrent from Deluge after it has finished seeding.',
},
{
'name': 'delete_files',
'label': 'Remove files',
'default': True,
'type': 'bool',
'advanced': True,
'description': 'Also remove the leftover files.',
},
{
'name': 'paused',
'type': 'bool',
'advanced': True,
'default': False,
'description': 'Add the torrent paused.',
},
{
'name': 'manual',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
},
{
'name': 'delete_failed',
'default': True,
'advanced': True,
'type': 'bool',
'description': 'Delete a release after the download has failed.',
},
],
}
],
}]
|
gpl-3.0
| 8,216,095,494,107,268,000
| 37.374408
| 512
| 0.545943
| false
| 4.40893
| false
| false
| false
|
smira/fmspy
|
fmspy/application/room.py
|
1
|
2208
|
# FMSPy - Copyright (c) 2009 Andrey Smirnov.
#
# See COPYRIGHT for details.
"""
Application rooms.
"""
class Room(object):
"""
Room (scope, context) is location inside application where clients meet.
Room holds server objects: streams, shared objects, etc. It can be
used to iterate over clients in room.
@ivar clients: set of clients inside room
@type clients: C{set}
@ivar name: room name
@type name: C{str}
@ivar application: application owning this room
@type application: L{Application}
"""
def __init__(self, application, name='_'):
"""
Construct new room.
@param application: application owning this room
@type application: L{Application}
@param name: room name
@type name: C{str}
"""
self.name = name
self.application = application
self.clients = set()
def dismiss(self):
"""
Close room.
"""
self.clients = set()
self.application = None
def __eq__(self, other):
if not isinstance(other, Room):
return NotImplemented
return self.application == other.application and self.name == other.name
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "<Room %r @ %r (%d)>" % (self.name, self.application, len(self.clients))
def enter(self, client):
"""
Client enters room.
@param client: room client
@type client: L{RTMPServerProtocol}
"""
assert client not in self.clients
self.clients.add(client)
def leave(self, client):
"""
Client leaves room.
@param client: room client
@type client: L{RTMPServerProtocol}
"""
assert client in self.clients
self.clients.remove(client)
if not self.clients:
self.application.room_empty(self)
def __iter__(self):
"""
Iterate over clients.
"""
return self.clients.__iter__()
def empty(self):
"""
Is this room empty?
@rtype: C{bool}
"""
return False if self.clients else True
|
mit
| -1,946,959,390,960,262,100
| 22.242105
| 87
| 0.567482
| false
| 4.238004
| false
| false
| false
|
chadgates/locmaster
|
unlocode/csvimport.py
|
1
|
10411
|
from unlocode.models import Country, SubDivision, Locode, LocCountry, LocFunction, LocStatus, LocSubdivision, LocVersion
from unlocode.models import LocChangeIndicator
import os
import csv
import logging
from django.db import IntegrityError, transaction
def saveatomic(object, logger):
result = False
try:
with transaction.atomic():
object.save()
except IntegrityError as ex:
if logger:
logger.exception(ex)
return result
def cleanoutVersion(version):
logger = logging.getLogger(__name__)
msg = str(Locode.objects.filter(version=version).delete()[0]) + " LocCodes deleted"
msg += "\n"
msg += str(LocCountry.objects.filter(version=version).delete()[0]) + " LocCountries deleted"
msg += "\n"
msg += str(LocFunction.objects.filter(version=version).delete()[0]) + " LocCodes deleted"
msg += "\n"
msg += str(LocStatus.objects.filter(version=version).delete()[0]) + " LocStatus deleted"
msg += "\n"
msg += str(LocSubdivision.objects.filter(version=version).delete()[0]) + " LocSubdivisions deleted"
logger.info(msg)
return msg
def importUNLOCODE(version):
logger = logging.getLogger(__name__)
path = os.getcwd() + "/unlocode/data/versions/" + version + "/"
logger.info("Start import for " + path)
if not (False in dict(check_version_dir(version)).values()):
objversion = LocVersion.objects.get(version=version)
msg = cleanoutVersion(version)
msg += "\n"
msg += importFunctionClassifiers(objversion, version, path)
msg += "\n"
msg += importStatusIndicators(objversion, version, path)
msg += "\n"
msg += importCountryCodes(objversion, version, path)
msg += "\n"
msg += importLocSubdivision(objversion, version, path)
msg += "\n"
msg += importCodeList(objversion, version, path)
else:
msg = "Nothing imported, files incomplete. "
logger.info(msg)
return msg
def importCountryCodes(objversion, version, path):
logger = logging.getLogger(__name__)
csv_filepathname = path + "CountryCodes.txt"
dataReader = csv.reader(open(csv_filepathname, encoding='utf-8'), delimiter=',', quotechar='"')
savecounter = 0
skipcounter = 0
rowcounter = 0
for row in dataReader:
locountry = LocCountry()
locountry.alpha2code = row[0]
locountry.name = row[1]
locountry.version = objversion
#locountry.save()
if saveatomic(locountry, logger):
savecounter += 1
else:
skipcounter += 1
rowcounter += 1
msg = str(rowcounter) + " Country codes (" + version + ") processed: " + str(savecounter) + \
" created / " + str(skipcounter) + " skipped."
logger.info(msg)
return msg
def importFunctionClassifiers(objversion, version, path):
logger = logging.getLogger(__name__)
csv_filepathname = path + "FunctionClassifiers.txt"
dataReader = csv.reader(open(csv_filepathname, encoding='utf-8'), delimiter=',', quotechar='"')
rowcounter = 0
skipcounter = 0
savecounter = 0
for row in dataReader:
locfunction = LocFunction()
locfunction.functioncode = row[0]
locfunction.description = row[1]
locfunction.version = objversion
try:
with transaction.atomic():
locfunction.save()
savecounter += 1
except IntegrityError as ex:
logger.exception(ex)
logger.exception += 1
rowcounter += 1
msg = str(rowcounter) + " Function classifiers (" + version + ") processed: " + str(savecounter) + \
" created / " + str(skipcounter) + " skipped."
logger.info(msg)
return msg
def importStatusIndicators(objversion, version, path):
logger = logging.getLogger(__name__)
csv_filepathname = path + "StatusIndicators.txt"
dataReader = csv.reader(open(csv_filepathname, encoding='utf-8'), delimiter=',', quotechar='"')
rowcounter = 0
skipcounter = 0
savecounter = 0
for row in dataReader:
locstatus = LocStatus()
locstatus.statuscode = row[0]
locstatus.description = row[1]
locstatus.version = objversion
try:
with transaction.atomic():
locstatus.save()
savecounter += 1
except IntegrityError as ex:
logger.exception(ex)
skipcounter += 1
rowcounter += 1
msg = str(rowcounter) + " Status Indicators (" + version + ") processed: " + str(savecounter) + \
" created / " + str(skipcounter) + " skipped."
logger.info(msg)
return msg
def importLocSubdivision(objversion, version, path):
logger = logging.getLogger(__name__)
csv_filepathname = path + "SubdivisionCodes.txt"
dataReader = csv.reader(open(csv_filepathname, encoding='utf-8'), delimiter=',', quotechar='"')
rowcounter = 0
skipcounter = 0
savecounter = 0
for row in dataReader:
locsubdivision = LocSubdivision()
locsubdivision.alpha2code = LocCountry.objects.filter(alpha2code=row[0], version=version).first()
locsubdivision.shortcode = row[1]
locsubdivision.name = row[2]
locsubdivision.version = objversion
try:
with transaction.atomic():
locsubdivision.save()
savecounter += 1
except IntegrityError as ex:
logger.exception(ex)
skipcounter += 1
rowcounter += 1
msg = str(rowcounter) + " Subdivisions (" + version + ") processed: " + str(savecounter) + \
" created / " + str(skipcounter) + " skipped."
logger.info(msg)
return msg
def importCodeList(objversion, version, path):
logger = logging.getLogger(__name__)
csv_filepathname = path + "CodeList.txt"
dataReader = csv.reader(open(csv_filepathname, encoding='utf-8'), delimiter=',', quotechar='"')
savecounter = 0
skipcounter = 0
rowcounter = 0
for row in dataReader:
if row[2] != '':
locode = Locode()
locode.locchangeindicator = LocChangeIndicator.objects.filter(changecode=row[0]).first()
locode.locodecountry = LocCountry.objects.filter(alpha2code=row[1], version=objversion).first()
locode.locodeplace = row[2]
locode.locname = row[3]
locode.locnamewodia = row[4]
locode.locsubdivision = LocSubdivision.objects.filter(shortcode=row[5], version=objversion,
alpha2code=locode.locodecountry_id).first()
locode.locfunction = row[7]
locode.locstatus = LocStatus.objects.filter(statuscode=row[6], version=objversion).first()
locode.locdate = row[8]
locode.lociata = row[9]
locode.locoordinates = row[10]
locode.locremarks = row[11]
# locode.locode = row[1]+row[2]
locode.version = objversion
try:
with transaction.atomic():
locode.save()
savecounter += 1
except IntegrityError as ex:
logger.exception(ex)
skipcounter += 1
else:
skipcounter += 1
rowcounter += 1
msg = str(rowcounter) + " UN/LOCODES (" + version + ") processed: " + str(savecounter) + \
" created / " + str(skipcounter) + " skipped."
logger.info(msg)
return msg
def importsubdivisons():
logger = logging.getLogger(__name__)
csv_filepathname = os.getcwd() + "/unlocode/data/subdivisions.txt"
dataReader = csv.reader(open(csv_filepathname, encoding='utf-8'), dialect='excel-tab')
# dataReader = csv.reader(open(csv_filepathname), delimter=',', quotechar='"')
savecounter = 0
skipcounter = 0
rowcounter = 0
for row in dataReader:
if not rowcounter == 0:
subdivision = SubDivision()
subdivision.level1 = row[0]
subdivision.level2 = row[1]
subdivision.name = row[2]
subdivision.alpha2code = (subdivision.level1 + subdivision.level2).split("-", 1)[0]
subdivision.shortcode = (subdivision.level1 + subdivision.level2).split("-", 1)[1]
try:
with transaction.atomic():
subdivision.save()
savecounter += 1
except IntegrityError as ex:
logger.exception(ex)
skipcounter += 1
rowcounter += 1
msg = str(rowcounter) + " Subdivisions processed: " + str(savecounter) + \
" created / " + str(skipcounter) + " skipped."
logger.info(msg)
return msg
def importcountries():
csv_filepathname = os.getcwd() + "/unlocode/data/Country_List_ISO_3166_Codes_Latitude_Longitude.csv"
dataReader = csv.reader(open(csv_filepathname, encoding='utf-8'), delimiter=',', quotechar='"')
rowcounter = 0
for row in dataReader:
if not rowcounter == 0:
country = Country()
country.name = row[0]
country.alpha2code = row[1]
country.alpha3code = row[2]
country.numericcode = row[3]
country.latitudeavg = row[4]
country.longitudeavg = row[5]
country.save()
rowcounter += 1
return str(rowcounter) + " countries imported"
def check_for_complete_set(filelist):
loc_set = {'CodeList.txt': False,
'CountryCodes.txt': False,
'FunctionClassifiers.txt': False,
'StatusIndicators.txt': False,
'SubdivisionCodes.txt': False}
for items in filelist:
if items in loc_set:
loc_set.update({items: True})
return list(loc_set.items())
def get_file_names(directory):
"""Returns list of file names within directory"""
contents = os.listdir(directory)
files = list()
for item in contents:
if os.path.isfile(os.path.join(directory, item)):
files.append(item)
return files
def check_version_dir(version):
dirpath = os.getcwd() + "/unlocode/data/versions/" + version
if os.path.exists(dirpath):
files = get_file_names(dirpath)
else:
files = ""
filestatus = check_for_complete_set(files)
return filestatus
|
bsd-3-clause
| 5,067,940,848,887,395,000
| 31.232198
| 120
| 0.600038
| false
| 3.97974
| false
| false
| false
|
Birion/python-ffdl
|
pyffdl/core/app.py
|
1
|
4385
|
import shutil
from typing import List, Tuple, Optional
import attr
import click
from furl import furl # type: ignore
from pyffdl.__version__ import __version__
from pyffdl.sites import (
AdultFanFictionStory,
ArchiveOfOurOwnStory,
FanFictionNetStory,
HTMLStory,
TwistingTheHellmouthStory,
TGStorytimeStory,
)
from pyffdl.utilities import get_url_from_file, list2text
AVAILABLE_SITES = {
"fanfiction.net": FanFictionNetStory,
"fictionpress.com": FanFictionNetStory,
"adult-fanfiction.org": AdultFanFictionStory,
"archiveofourown.org": ArchiveOfOurOwnStory,
"tthfanfic.org": TwistingTheHellmouthStory,
"tgstorytime.com": TGStorytimeStory,
}
@attr.s()
class URL:
url: furl = attr.ib()
file: Optional[str] = attr.ib(default=None)
def download(urls: List[URL], verbose: bool = False, force: bool = False) -> None:
for url in urls:
if not url.url:
continue
try:
host = ".".join(url.url.host.split(".")[-2:])
site = AVAILABLE_SITES.get(host)
if not site:
click.echo(
f"{__file__} is currently only able to download from {list2text(list(AVAILABLE_SITES.keys()))}."
)
return
story = site.parse(url.url, verbose, force)
if url.file:
story.filename = url.file
story.run()
except AttributeError as e:
raise e
# print(e)
# error = "There were problems with parsing the URL."
# with open("pyffdl.log", "a") as fp:
# click.echo(error, file=fp)
# click.echo(error, err=True)
@click.group()
@click.version_option(version=__version__)
def cli() -> None:
pass
@cli.command( # noqa: unused-function
"download", help="Download a new fanfiction story."
)
@click.option(
"-f",
"--from",
"from_file",
type=click.File(),
help="Load a list of URLs from a plaintext file.",
)
@click.option("-v", "--verbose", is_flag=True)
@click.argument("url_list", nargs=-1)
def cli_download(
from_file: click.File, url_list: Tuple[str, ...], verbose: bool = False
) -> None:
urls = [URL(furl(x)) for x in url_list]
if from_file:
urls += [
URL(furl(x.strip("\n"))) for x in from_file.readlines() if not x.startswith("#")
]
download(urls, verbose)
@cli.command( # noqa: unused-function
"html", help="Download a single story, using a list of chapter URLs."
)
@click.option(
"-f",
"--from",
"from_file",
type=click.File(),
help="Load a list of URLs from a plaintext file.",
)
@click.option("-a", "--author", help="Name of the author", type=str, required=True)
@click.option("-t", "--title", help="Title of the story", type=str, required=True)
@click.option("-v", "--verbose", is_flag=True)
@click.argument("url_list", nargs=-1)
def cli_html(
from_file: click.File,
author: str,
title: str,
url_list: Tuple[str, ...],
verbose: bool = False,
):
urls = [URL(furl(x)) for x in url_list]
if from_file:
urls += [
URL(furl(x.strip("\n"))) for x in from_file.readlines() if not x.startswith("#")
]
if not urls:
click.echo("You must provide at least one URL to download.")
return
story = HTMLStory(
chapters=[x.url.tostr() for x in urls],
author=author,
title=title,
url=furl("http://httpbin.org/status/200"),
)
story.verbose = verbose
story.run()
@cli.command( # noqa: unused-function
"update", help="Update an existing .epub fanfiction file."
)
@click.option(
"-f",
"--force",
is_flag=True,
default=False,
help="Completely refresh the ebook file.",
)
@click.option(
"-b", "--backup", is_flag=True, default=False, help="Backup the original file."
)
@click.option("-v", "--verbose", is_flag=True)
@click.argument("filenames", type=click.Path(dir_okay=False, exists=True), nargs=-1)
def cli_update(
force: bool, backup: bool, filenames: List[click.Path], verbose: bool = False
) -> None:
if backup:
for filename in filenames:
shutil.copy(f"{filename}", f"{filename}.bck")
stories = [
URL(get_url_from_file(x), str(x) if not force else None) for x in filenames
]
download(stories, verbose, force)
|
mit
| 876,202,144,005,720,600
| 27.848684
| 116
| 0.601596
| false
| 3.316944
| false
| false
| false
|
libvirt/libvirt-python
|
examples/domstart.py
|
1
|
1239
|
#!/usr/bin/env python3
"""
Check that the domain described by DOMAIN.XML is running.
If the domain is not running, create it.
"""
import libvirt
import libxml2
from argparse import ArgumentParser
from typing import Tuple
# Parse the XML description of domU from FNAME
# and return a tuple (name, xmldesc) where NAME
# is the name of the domain, and xmldesc is the contetn of FNAME
def read_domain(fname: str) -> Tuple[str, str]:
fp = open(fname, "r")
xmldesc = fp.read()
fp.close()
doc = libxml2.parseDoc(xmldesc)
name = doc.xpathNewContext().xpathEval("/domain/name")[0].content
return (name, xmldesc)
parser = ArgumentParser(description=__doc__)
parser.add_argument("file", metavar="DOMAIN.XML", help="XML configuration of the domain in libvirt's XML format")
args = parser.parse_args()
(name, xmldesc) = read_domain(args.file)
try:
conn = libvirt.open(None)
except libvirt.libvirtError:
print('Failed to open connection to the hypervisor')
exit(1)
try:
dom = conn.lookupByName(name)
except libvirt.libvirtError:
print("Starting domain %s ... " % name)
dom = conn.createLinux(xmldesc, 0)
if dom is None:
print("failed")
exit(1)
else:
print("done")
|
lgpl-2.1
| -1,861,244,689,678,241,000
| 25.361702
| 113
| 0.688458
| false
| 3.529915
| false
| false
| false
|
liuwill-projects/flask-server-scaffold
|
main.py
|
1
|
1505
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import os
from flask import Flask, jsonify # , request, current_app
from flask_cors import CORS, cross_origin
from flask_socketio import SocketIO, emit, send
from chat.utils.jsonp import jsonp
from chat.controllers.mock import Mock
import logging
from logging.config import fileConfig
fileConfig('logging_config.ini')
logger = logging.getLogger()
#logger = logging.getLogger('api')
app = Flask(__name__)
cors = CORS(app, resources={r"/api/users/*": {"origins": "*"}})
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app)
mockController = Mock(app)
@app.route("/")
def hello():
print os.environ.get('PYTHONSTARTUP')
return "Hello World!"
@app.route("/api/jsonp/getCookie.js")
@jsonp
def getCookie():
return mockController.getCookie()
@app.route("/api/users/me")
@cross_origin()
def me():
return mockController.me()
@socketio.on('message', namespace='/chat')
def handle_message(message):
send(message)
@socketio.on('json', namespace='/chat')
def handle_json(json):
send(json, json=True)
@socketio.on('my event', namespace='/chat')
def test_message(message):
emit('my response', {'data': 'got it!'})
@socketio.on('connect', namespace='/chat')
def test_connect():
emit('my response', {'data': 'Connected'})
@socketio.on('disconnect', namespace='/chat')
def test_disconnect():
print('Client disconnected')
if __name__ == "__main__":
#app.run('0.0.0.0', 5000)
socketio.run(app, host='0.0.0.0', port=5000)
|
mit
| 7,558,739,674,957,527,000
| 24.508475
| 63
| 0.682392
| false
| 3.128898
| false
| false
| false
|
theY4Kman/infusionsoft-client
|
setup.py
|
1
|
1956
|
import os
from setuptools import setup, find_packages
def build_install_requires(path):
"""Support pip-type requirements files"""
basedir = os.path.dirname(path)
with open(path) as f:
reqs = []
for line in f:
line = line.strip()
if not line:
continue
if line[0] == '#':
continue
elif line.startswith('-r '):
nested_req = line[3:].strip()
nested_path = os.path.join(basedir, nested_req)
reqs += build_install_requires(nested_path)
elif line[0] == '-':
continue
else:
reqs.append(line)
return reqs
pkg = 'infusionsoft'
root = os.path.dirname(__file__)
from_root = lambda *p: os.path.join(root, *p)
pkg_root = lambda *p: from_root(pkg, *p)
with open(from_root('README.rst')) as fp:
long_description = fp.read()
with open(pkg_root('version.py')) as fp:
context = {}
exec(fp.read(), None, context)
version = context['__version__']
setup(
name='infusionsoft-client',
version=version,
url='https://github.com/theY4Kman/infusionsoft-client',
author='Zach "theY4Kman" Kanzler',
author_email='they4kman@gmail.com',
description='Sexy Infusionsoft XML-RPC API client',
long_description=long_description,
packages=find_packages(include=(pkg, pkg + '.*')),
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: CGI Tools/Libraries',
],
install_requires=build_install_requires(from_root('requirements.txt')),
extras_requires=build_install_requires(from_root('django-requirements.txt')),
include_package_data=True,
)
|
apache-2.0
| 1,362,176,675,255,124,500
| 30.047619
| 82
| 0.593047
| false
| 3.842829
| false
| false
| false
|
brigittebigi/proceed
|
proceed/src/term/textprogress.py
|
1
|
5853
|
#!/usr/bin/env python2
# -*- coding: UTF-8 -*-
# ---------------------------------------------------------------------------
# ___ __ __ __ ___
# / | \ | \ | \ / Automatic
# \__ |__/ |__/ |___| \__ Annotation
# \ | | | | \ of
# ___/ | | | | ___/ Speech
# =============================
#
# http://sldr.org/sldr000800/preview/
#
# ---------------------------------------------------------------------------
# developed at:
#
# Laboratoire Parole et Langage
#
# Copyright (C) 2011-2015 Brigitte Bigi
#
# Use of this software is governed by the GPL, v3
# This banner notice must not be removed
# ---------------------------------------------------------------------------
#
# SPPAS is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SPPAS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SPPAS. If not, see <http://www.gnu.org/licenses/>.
#
# ---------------------------------------------------------------------------
# File: textprogress.py
# ----------------------------------------------------------------------------
__docformat__ = """epytext"""
__authors__ = """Brigitte Bigi (brigitte.bigi@gmail.com)"""
__copyright__ = """Copyright (C) 2011-2015 Brigitte Bigi"""
# ----------------------------------------------------------------------------
import sys
import re
import math
from terminalcontroller import TerminalController
# ----------------------------------------------------------------------------
# Constants
# ----------------------------------------------------------------------------
WIDTH = 74
BAR = '%3d%% ${GREEN}[${BOLD}%s%s${NORMAL}${GREEN}]${NORMAL}\n'
HEADER = '${BOLD}${CYAN}%s${NORMAL}\n\n'
# ----------------------------------------------------------------------------
class TextProgress:
"""
@authors: Brigitte Bigi
@contact: brigitte.bigi((AATT))lpl-aix.fr
@license: GPL
@summary: A 3-lines progress self.bar.
It looks like::
Header
20% [===========----------------------------------]
progress message
The progress self.bar is colored, if the terminal supports color
output; and adjusts to the width of the terminal.
"""
def __init__(self):
"""
Constructor.
"""
try:
self.term = TerminalController()
except:
self.term = None
if not (self.term.CLEAR_EOL and self.term.UP and self.term.BOL):
self.term = None
self.bar = BAR
if self.term:
self.bar = self.term.render(BAR)
self.cleared = 1 #: true if we haven't drawn the self.bar yet.
self.percent = 0
self.text = ""
# End __init__
# ------------------------------------------------------------------
def update(self, percent, message):
"""
Update the progress.
@param text: progress self.bar text (default: None)
@param fraction: progress self.bar value (default: 0)
"""
n = int((WIDTH-10)*percent)
if self.term:
sys.stdout.write(
self.term.BOL + self.term.UP + self.term.CLEAR_EOL +
(self.bar % (100*percent, '='*n, '-'*(WIDTH-10-n))) +
self.term.CLEAR_EOL + message.center(WIDTH))
else:
sys.stdout.write( ' => ' + message + " \n")
self.percent = percent
self.text = message
# End update
# ------------------------------------------------------------------
def clear(self):
"""
Clear.
"""
if not self.cleared:
if self.term:
sys.stdout.write(self.term.BOL + self.term.CLEAR_EOL +
self.term.UP + self.term.CLEAR_EOL +
self.term.UP + self.term.CLEAR_EOL)
else:
sys.stdout.write('\n'*50)
self.cleared = 1
# End clear
# ------------------------------------------------------------------
def set_fraction(self, percent):
"""
Set a new progress value.
@param fraction: new progress value
"""
self.update(percent,self.text)
# End set_fraction
# ------------------------------------------------------------------
def set_text(self,text):
"""
Set a new progress text.
@param text: new progress text
"""
self.update(self.percent,text)
# End set_text
# ------------------------------------------------------------------
def set_header(self,header):
"""
Set a new progress label.
@param label: new progress label
"""
if self.term:
self.header = self.term.render(HEADER % header.center(WIDTH))
else:
self.header = " " + header
sys.stdout.write(self.header)
# End set_header
# ------------------------------------------------------------------
def set_new(self):
"""
Initialize a new progress line.
"""
sys.stdout.write('\n')
self.clear()
self.text = ""
self.percent = 0
# End set_new
# ------------------------------------------------------------------
|
gpl-3.0
| -6,311,342,072,019,640,000
| 27.832512
| 78
| 0.411584
| false
| 4.495392
| false
| false
| false
|
chenzhengchen200821109/github-python
|
socketerror.py
|
1
|
1117
|
#!/bin/python
# Error handing
import socket, sys
host = sys.argv[1]
textport = sys.argv[2]
filename = sys.argv[3]
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except socket.error, e:
print "Strange error creating socket: %s" % e
sys.exit(1)
try:
port = int(textport)
except ValueError:
# Thant didn't work, so it's probably a protocol name
# Look it up instead.
try:
port = socket.getservbyname(textport, 'tcp')
except socket.error, e:
print "Couldn't find your port: %s" % e
sys.exit(1)
try:
s.connect((host, port))
except socket.gaierror, e:
print "Address-related error connecting to server: %s" % e
sys.exit(1)
except socket.error, e:
print "Connection error: %s" % e
sys.exit(1)
try:
s.sendall("GET %s HTTP/1.0\r\n\r\n" % filename)
except socket.error, e:
print "Error sending data: %s" % e
sys.exit(1)
while 1:
try:
buf = s.recv(2048)
except socket.error, e:
print "Error receiving data: %s" % e
sys.exit(1)
if not len(buf):
break
sys.stdout.write(buf)
|
mit
| 3,929,081,061,703,721,000
| 20.901961
| 62
| 0.615936
| false
| 3.002688
| false
| false
| false
|
hasibi/TAGME-Reproducibility
|
scripts/evaluator_annot.py
|
1
|
6793
|
"""
This script computes Topic metrics for the end-to-end performance.
Precision and recall are macro-averaged.
Matching condition: entities should match and mentions should be equal or contained in each other.
@author: Faegheh Hasibi (faegheh.hasibi@idi.ntnu.no)
"""
from __future__ import division
import sys
from collections import defaultdict
class EvaluatorAnnot(object):
def __init__(self, qrels, results, score_th, null_qrels=None):
self.qrels_dict = self.__group_by_queries(qrels)
self.results_dict = self.__group_by_queries(results, res=True, score_th=score_th)
self.null_qrels = self.__group_by_queries(null_qrels) if null_qrels else None
@staticmethod
def __group_by_queries(file_lines, res=False, score_th=None):
"""
Groups the lines by query id.
:param file_lines: list of lines [[qid, score, en_id, mention, page_id], ...]
:return: {qid: {(men0, en0), (men1, en01), ..}, ..};
"""
grouped_inters = defaultdict(set)
for cols in file_lines:
if len(cols) > 2:
if res and (float(cols[1]) < score_th):
continue
grouped_inters[cols[0]].add((cols[3].lower(), cols[2].lower()))
return grouped_inters
def rm_nulls_from_res(self):
"""
Removes mentions that not linked to an entity in the qrel.
There are some entities in the qrel with "*NONE*" as id. We remove the related mentions from the result file.
Null entities are generated due to the inconsistency between TAGME Wikipedia dump (2009) and our dump (2010).
"""
print "Removing mentions with null entities ..."
new_results_dict = defaultdict(set)
for qid in self.results_dict:
# easy case: the query does not have any null entity.
if qid not in set(self.null_qrels.keys()):
new_results_dict[qid] = self.results_dict[qid]
continue
qrel_null_mentions = [item[0] for item in self.null_qrels[qid]]
# check null mentions with results mentions
for men, en in self.results_dict[qid]:
is_null = False
for qrel_null_men in qrel_null_mentions:
# results mention does not match null qrel mention
if mention_match(qrel_null_men, men):
is_null = True
break
if not is_null:
new_results_dict[qid].add((men, en))
self.results_dict = new_results_dict
def eval(self, eval_query_func):
"""
Evaluates all queries and calculates total precision, recall and F1 (macro averaging).
:param eval_query_func: A function that takes qrel and results for a query and returns evaluation metrics
:return Total precision, recall, and F1 for all queries
"""
self.rm_nulls_from_res()
queries_eval = {}
total_prec, total_rec, total_f = 0, 0, 0
for qid in sorted(self.qrels_dict):
queries_eval[qid] = eval_query_func(self.qrels_dict[qid], self.results_dict.get(qid, {}))
total_prec += queries_eval[qid]['prec']
total_rec += queries_eval[qid]['rec']
n = len(self.qrels_dict) # number of queries
total_prec /= n
total_rec /= n
total_f = 2 * total_prec * total_rec / (total_prec + total_rec)
log = "\n----------------" + "\nEvaluation results:\n" + \
"Prec: " + str(round(total_prec, 4)) + "\n" +\
"Rec: " + str(round(total_rec, 4)) + "\n" + \
"F1: " + str(round(total_f, 4)) + "\n" + \
"all: " + str(round(total_prec, 4)) + ", " + str(round(total_rec, 4)) + ", " + str(round(total_f, 4))
print log
metrics = {'prec': total_prec, 'rec': total_rec, 'f': total_f}
return metrics
def erd_eval_query(query_qrels, query_results):
"""
Evaluates a single query.
:param query_qrels: Query interpretations from Qrel [{en1, en2, ..}, ..]
:param query_results: Query interpretations from result file [{en1, en2, ..}, ..]
:return: precision, recall, and F1 for a query
"""
tp = 0 # correct
fn = 0 # missed
fp = 0 # incorrectly returned
# ----- Query has at least an interpretation set. -----
# Iterate over qrels to calculate TP and FN
for qrel_item in query_qrels:
if find_item(qrel_item, query_results):
tp += 1
else:
fn += 1
# Iterate over results to calculate FP
for res_item in query_results:
if not find_item(res_item, query_qrels): # Finds the result in the qrels
fp += 1
prec = tp / (tp+fp) if tp+fp != 0 else 0
rec = tp / (tp+fn) if tp+fn != 0 else 0
f = (2 * prec * rec) / (prec + rec) if prec + rec != 0 else 0
metrics = {'prec': prec, 'rec': rec, 'f': f}
return metrics
def find_item(item_to_find, items_list):
"""
Returns True if an item is found in the item list.
:param item_to_find: item to be found
:param items_list: list of items to search in
:return boolean
"""
is_found = False
for item in items_list:
if (item[1] == item_to_find[1]) and mention_match(item[0], item_to_find[0]):
is_found = True
return is_found
def mention_match(mention1, mention2):
"""
Checks if two mentions matches each other.
Matching condition: One of the mentions is sub-string of the other one.
"""
match = ((mention1 in mention2) or (mention2 in mention1))
return match
def parse_file(file_name, res=False):
"""
Parses file and returns the positive instances for each query.
:param file_name: Name of file to be parsed
:return lists of lines [[qid, label, en_id, ...], ...], lines with null entities are separated
"""
null_lines = []
file_lines = []
efile = open(file_name, "r")
for line in efile.readlines():
if line.strip() == "":
continue
cols = line.strip().split("\t")
if (not res) and (cols[2].strip() == "*NONE*"):
null_lines.append(cols)
else:
file_lines.append(cols)
return file_lines, null_lines
def main(args):
if len(args) < 2:
print "\tUsage: <qrel_file> <result_file>"
exit(0)
print "parsing qrel ..."
qrels, null_qrels = parse_file(args[0]) # here qrel does not contain null entities
print "parsing results ..."
results = parse_file(args[1], res=True)[0]
print "evaluating ..."
evaluator = EvaluatorAnnot(qrels, results, float(args[2]), null_qrels=null_qrels)
evaluator.eval(erd_eval_query)
if __name__ == '__main__':
main(sys.argv[1:])
|
mit
| 5,499,083,128,206,597,000
| 35.718919
| 117
| 0.580892
| false
| 3.50697
| false
| false
| false
|
conda/kapsel
|
conda_kapsel/internal/toposort.py
|
1
|
2834
|
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Copyright © 2016, Continuum Analytics, Inc. All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, print_function
import collections
class CycleError(Exception):
def __init__(self, involving):
message = "Cycle in graph involving {involving}".format(involving=involving)
super(CycleError, self).__init__(message)
self.involving = involving
def toposort(nodes, get_next_nodes):
"""Sort list of graph nodes.
Returns a new list, does not modify input list.
Args:
nodes (iterable): iterable of some kind of node
get_next_nodes (function): takes a node and returns iterable of next nodes
Returns:
new sorted list of nodes
"""
traversing = set()
traversed = set()
result = collections.deque()
def traverse(node):
if node in traversing:
raise CycleError(node)
if node in traversed:
return # not a cycle but we already saw this
traversing.add(node)
for next in get_next_nodes(node):
traverse(next)
traversed.add(node)
traversing.remove(node)
result.appendleft(node)
for node in nodes:
traverse(node)
return list(result)
def toposort_from_dependency_info(nodes, get_node_key, get_dependency_keys, can_ignore_dependency=None):
"""Sort list of nodes that depend on other nodes in dependency-first order.
All dependencies must be in the list of nodes.
Returns a new list, does not modify input list.
Args:
nodes (iterable): iterable of some kind of node
get_node_key (function): get identifier for a node
get_dependency_keys (function): get iterable of node identifiers a node depends on
Returns:
new sorted list of nodes
"""
nodes_by_key = dict()
node_depended_on_by = dict()
for node in nodes:
key = get_node_key(node)
if key in nodes_by_key:
raise ValueError("two nodes with the same key %r" % key)
nodes_by_key[key] = node
node_depended_on_by[key] = set()
for node in nodes:
dep_keys = get_dependency_keys(node)
for dep_key in dep_keys:
if dep_key not in nodes_by_key:
if can_ignore_dependency is None or not can_ignore_dependency(dep_key):
raise ValueError("Dependency %r was not in the list of nodes %r" % (dep_key, nodes))
else:
node_depended_on_by[dep_key].add(node)
return toposort(nodes, lambda n: node_depended_on_by[get_node_key(n)])
|
bsd-3-clause
| 6,041,369,235,106,766,000
| 31.563218
| 104
| 0.5976
| false
| 4.041369
| false
| false
| false
|
demianw/tract_querier
|
tract_querier/tests/test_query_eval.py
|
1
|
10072
|
from .. import query_processor
from nose.tools import assert_true, assert_equal
from numpy import random
import ast
# Ten tracts traversing random labels
another_set = True
while (another_set):
tracts_labels = dict([(i, set(random.randint(100, size=2))) for i in range(100)])
labels_tracts = query_processor.labels_for_tracts(tracts_labels)
another_set = 0 not in labels_tracts.keys() or 1 not in labels_tracts.keys()
tracts_in_0 = set().union(*[labels_tracts[label] for label in labels_tracts if label == 0])
tracts_in_all_but_0 = set().union(*[labels_tracts[label] for label in labels_tracts if label != 0])
tract_in_label_0_uniquely = labels_tracts[0].difference(tracts_in_all_but_0)
class DummySpatialIndexing:
def __init__(
self,
crossing_tracts_labels, crossing_labels_tracts,
ending_tracts_labels, ending_labels_tracts,
label_bounding_boxes, tract_bounding_boxes
):
self.crossing_tracts_labels = crossing_tracts_labels
self.crossing_labels_tracts = crossing_labels_tracts
self.ending_tracts_labels = ending_tracts_labels
self.ending_labels_tracts = ending_labels_tracts
self.label_bounding_boxes = label_bounding_boxes
self.tract_bounding_boxes = tract_bounding_boxes
dummy_spatial_indexing = DummySpatialIndexing(tracts_labels, labels_tracts, ({}, {}), ({}, {}), {}, {})
empty_spatial_indexing = DummySpatialIndexing({}, {}, ({}, {}), ({}, {}), {}, {})
def test_assign():
query_evaluator = query_processor.EvaluateQueries(dummy_spatial_indexing)
query_evaluator.visit(ast.parse("A=0"))
assert_true((
'A' in query_evaluator.queries_to_save and
query_evaluator.evaluated_queries_info['A'].tracts == labels_tracts[0] and
query_evaluator.evaluated_queries_info['A'].labels == set((0,))
))
def test_assign_attr():
query_evaluator = query_processor.EvaluateQueries(dummy_spatial_indexing)
query_evaluator.visit(ast.parse("a.left=0"))
assert_true((
'a.left' in query_evaluator.queries_to_save and
query_evaluator.evaluated_queries_info['a.left'].tracts == labels_tracts[0] and
query_evaluator.evaluated_queries_info['a.left'].labels == set((0,))
))
def test_assign_side():
query_evaluator = query_processor.EvaluateQueries(empty_spatial_indexing)
queries_labels = {
'a.left': set([3, 6]),
'a.right': set([4, 5]),
'b.left': set([3]),
'b.right': set([4]),
'c.left': set([5]),
'c.right': set([6])
}
queries_tracts = {
'a.left': set([]),
'a.right': set([]),
'b.left': set([]),
'b.right': set([]),
'c.left': set([]),
'c.right': set([])
}
query = r"""
b.left=3 ;
b.right = 4;
c.left = 5;
c.right = 6;
a.side = b.side or c.opposite
"""
query_evaluator.visit(ast.parse(query))
assert_equal({k: v.labels for k, v in query_evaluator.evaluated_queries_info.items()}, queries_labels)
assert_equal({k: v.tracts for k, v in query_evaluator.evaluated_queries_info.items()}, queries_tracts)
def test_assign_str():
query_evaluator = query_processor.EvaluateQueries(empty_spatial_indexing)
queries_labels = {
'b.left': set([3]),
'b.right': set([4]),
'c.left': set([5]),
'c.right': set([6]),
'h': set([3, 5])
}
queries_tracts = {
'b.left': set([]),
'b.right': set([]),
'c.left': set([]),
'c.right': set([]),
'h': set([])
}
query = """
b.left=3
b.right = 4
c.left = 5
c.right = 6
h = '*left'
"""
query_evaluator.visit(ast.parse(query))
assert_equal({k: v.labels for k, v in query_evaluator.evaluated_queries_info.items()}, queries_labels)
assert_equal({k: v.tracts for k, v in query_evaluator.evaluated_queries_info.items()}, queries_tracts)
def test_for_list():
query_evaluator = query_processor.EvaluateQueries(empty_spatial_indexing)
queries_tracts = {
'a.left': set([]),
'a.right': set([]),
'b.left': set([]),
'b.right': set([]),
'c.left': set([]),
'c.right': set([]),
'd.left': set([]),
'd.right': set([]),
'e.left': set([]),
'e.right': set([])
}
query = """
a.left= 0
b.left= 1
c.left= 2
d.left= 3
e.left= 4
for i in [a,b,c,d,e]: i.right = i.left
"""
query_evaluator.visit(ast.parse(query))
assert_equal({k: v.tracts for k, v in query_evaluator.evaluated_queries_info.items()}, queries_tracts)
def test_for_str():
query_evaluator = query_processor.EvaluateQueries(empty_spatial_indexing)
queries_tracts = {
'a.left': set([]),
'a.left.right': set([]),
'b.left': set([]),
'b.left.right': set([]),
'c.left': set([]),
'c.left.right': set([]),
'd.left': set([]),
'd.left.right': set([]),
'e.left': set([]),
'e.left.right': set([])
}
query = """
a.left= 0
b.left= 1
c.left= 2
d.left= 3
e.left= 4
for i in '*left': i.right = i
"""
query_evaluator.visit(ast.parse(query))
assert_equal({k: v.tracts for k, v in query_evaluator.evaluated_queries_info.items()}, queries_tracts)
def test_add():
query_evaluator = query_processor.EvaluateQueries(dummy_spatial_indexing)
query_evaluator.visit(ast.parse("A=0+1"))
assert_true((
'A' in query_evaluator.queries_to_save and
query_evaluator.evaluated_queries_info['A'].tracts == labels_tracts[0].union(labels_tracts[1]) and
query_evaluator.evaluated_queries_info['A'].labels == set((0, 1))
))
def test_mult():
query_evaluator = query_processor.EvaluateQueries(dummy_spatial_indexing)
query_evaluator.visit(ast.parse("A=0 * 1"))
assert_true((
'A' in query_evaluator.queries_to_save and
query_evaluator.evaluated_queries_info['A'].tracts == labels_tracts[0].intersection(labels_tracts[1]) and
query_evaluator.evaluated_queries_info['A'].labels == set((0, 1))
))
def test_sub():
query_evaluator = query_processor.EvaluateQueries(dummy_spatial_indexing)
query_evaluator.visit(ast.parse("A=(0 + 1) - 1"))
assert_true((
'A' in query_evaluator.queries_to_save and
query_evaluator.evaluated_queries_info['A'].tracts == labels_tracts[0].difference(labels_tracts[1]) and
query_evaluator.evaluated_queries_info['A'].labels == set((0,))
))
def test_or():
query_evaluator = query_processor.EvaluateQueries(dummy_spatial_indexing)
query_evaluator.visit(ast.parse("A=0 or 1"))
assert_true((
'A' in query_evaluator.queries_to_save and
query_evaluator.evaluated_queries_info['A'].tracts == labels_tracts[0].union(labels_tracts[1]) and
query_evaluator.evaluated_queries_info['A'].labels == set((0, 1))
))
def test_and():
query_evaluator = query_processor.EvaluateQueries(dummy_spatial_indexing)
query_evaluator.visit(ast.parse("A=0 and 1"))
assert_true((
'A' in query_evaluator.queries_to_save and
query_evaluator.evaluated_queries_info['A'].tracts == labels_tracts[0].intersection(labels_tracts[1]) and
query_evaluator.evaluated_queries_info['A'].labels == set((0, 1))
))
def test_not_in():
query_evaluator = query_processor.EvaluateQueries(dummy_spatial_indexing)
query_evaluator.visit(ast.parse("A=0 or 1 not in 1"))
assert_true((
'A' in query_evaluator.queries_to_save and
query_evaluator.evaluated_queries_info['A'].tracts == labels_tracts[0].difference(labels_tracts[1]) and
query_evaluator.evaluated_queries_info['A'].labels == set((0,))
))
def test_only_sign():
query_evaluator = query_processor.EvaluateQueries(dummy_spatial_indexing)
query_evaluator.visit(ast.parse("A=~0"))
assert_true((
'A' in query_evaluator.queries_to_save and
query_evaluator.evaluated_queries_info['A'].tracts == tract_in_label_0_uniquely and
query_evaluator.evaluated_queries_info['A'].labels == set((0,))
))
def test_only():
query_evaluator = query_processor.EvaluateQueries(dummy_spatial_indexing)
query_evaluator.visit(ast.parse("A=only(0)"))
assert_true((
'A' in query_evaluator.queries_to_save and
query_evaluator.evaluated_queries_info['A'].tracts == tract_in_label_0_uniquely and
query_evaluator.evaluated_queries_info['A'].labels == set((0,))
))
def test_unsaved_query():
query_evaluator = query_processor.EvaluateQueries(dummy_spatial_indexing)
query_evaluator.visit(ast.parse("A|=0"))
assert_true((
'A' not in query_evaluator.queries_to_save and
query_evaluator.evaluated_queries_info['A'].tracts == labels_tracts[0] and
query_evaluator.evaluated_queries_info['A'].labels == set((0,))
))
def test_symbolic_assignment():
query_evaluator = query_processor.EvaluateQueries(dummy_spatial_indexing)
query_evaluator.visit(ast.parse("A=0; B=A"))
assert_true((
'B' in query_evaluator.queries_to_save and
query_evaluator.evaluated_queries_info['B'].tracts == labels_tracts[0] and
query_evaluator.evaluated_queries_info['B'].labels == set((0,))
))
def test_unarySub():
query_evaluator = query_processor.EvaluateQueries(dummy_spatial_indexing)
query_evaluator.visit(ast.parse("B=0; A=-B"))
assert_true((
'A' in query_evaluator.queries_to_save and
query_evaluator.evaluated_queries_info['A'].tracts == tracts_in_all_but_0 and
query_evaluator.evaluated_queries_info['A'].labels == set(labels_tracts.keys()).difference((0,))
))
def test_not():
query_evaluator = query_processor.EvaluateQueries(dummy_spatial_indexing)
query_evaluator.visit(ast.parse("A= not 0"))
assert_true((
'A' in query_evaluator.queries_to_save and
query_evaluator.evaluated_queries_info['A'].tracts == tracts_in_all_but_0 and
query_evaluator.evaluated_queries_info['A'].labels == set(labels_tracts.keys()).difference((0,))
))
|
bsd-3-clause
| -3,546,840,993,955,329,000
| 32.131579
| 113
| 0.629468
| false
| 3.286134
| true
| false
| false
|
morganmeliment/Calculate-Pi
|
calculatepi.py
|
1
|
1034
|
"""
calculatepi.py
Author: Morgan Meliment
Credit: none
Assignment:
Write and submit a Python program that computes an approximate value of π by calculating the following sum:
(see: https://github.com/HHS-IntroProgramming/Calculate-Pi/blob/master/README.md)
This sum approaches the true value of π as n approaches ∞.
Your program must ask the user how many terms to use in the estimate of π, how many decimal places,
then print the estimate using that many decimal places. Exactly like this:
I will estimate pi. How many terms should I use? 100
How many decimal places should I use in the result? 7
The approximate value of pi is 3.1315929
Note: remember that the printed value of pi will be an estimate!
"""
import math
num = int(input("I will estimate pi. How many terms should I use? "))
dec = int(input("How many decimal places should I use in the result? "))
func = lambda n: (((-1) ** n)/((2 * n) + 1))
m = map(func, range(0,num))
pi = 4 * sum(m)
print("The approximate value of pi is {0:.{1}f}".format(pi, dec))
|
mit
| -4,700,008,651,961,431,000
| 32.193548
| 107
| 0.729835
| false
| 3.384868
| false
| true
| false
|
gh4w/some
|
web/diego/pronostix/scripts/load_database.py
|
1
|
1473
|
# coding: utf8
#! /usr/bin/env python3
import json
import re
import iso8601 as iso
from pronostix.models import Club, Rencontre
def get_entity_id(entity):
return get_url_id(entity['_links']['self']['href'])
def get_url_id(url):
regex = re.compile('http://api.football-data.org/v1/[^/]+/(?P<id>\d+)$')
m = regex.match(url)
return m.group("id")
def charger_clubs(teams):
clubs = []
for t in teams:
c, created = Club.objects.get_or_create(nom = t['name'], json_id = get_entity_id(t))
if created: c.save()
clubs.append(c)
return clubs
def charger_rencontres(fixtures, clubs):
team2club = { c.json_id: c for c in clubs }
rencontres = []
for f in fixtures:
status = f['status']
id1 = get_url_id(f['_links']['homeTeam']['href'])
id2 = get_url_id(f['_links']['awayTeam']['href'])
d = iso.parse_date(f['date'])
r, created = Rencontre.objects.get_or_create(date = d, club1 = team2club[id1], club2 = team2club[id2], json_id = get_entity_id(f))
if created: r.save()
rencontres.append(r)
return rencontres
def charger():
teams_file = 'teams.json'
with open(teams_file, 'r') as fs: teams = json.load(fs)
clubs = charger_clubs(teams['teams'])
fixtures_file = 'fixtures.json'
with open(fixtures_file, 'r') as fs: fixtures = json.load(fs)
rencontres = charger_rencontres(fixtures['fixtures'], clubs)
if __name__ == '__main__': main()
|
mit
| -5,689,127,188,113,624,000
| 30.340426
| 138
| 0.610998
| false
| 2.922619
| false
| false
| false
|
magnusmorton/nest
|
nest/main.py
|
1
|
1314
|
#!/usr/bin/env python
# encoding: utf-8
"""
Created by Magnus Morton on 2012-03-14.
(c) Copyright 2012 Magnus Morton.
This file is part of Nest.
Nest is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Nest is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Nest. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import os
import argparse
import nest.translate
import nest.transformer
import ast
from nest.loop import get_safe_loops
def main():
parser = argparse.ArgumentParser(description='implicitly parallelising Python')
parser.add_argument('file')
args = parser.parse_args()
source_file = args.file
translator = nest.translate.Translator(source_file, get_safe_loops, nest.transformer.ForTransformer)
with open(source_file, 'r') as the_file:
translator.translate(the_file.read())
if __name__ == '__main__':
main()
|
agpl-3.0
| 6,048,209,914,957,177,000
| 29.55814
| 104
| 0.750381
| false
| 3.765043
| false
| false
| false
|
bazz-erp/erpnext
|
erpnext/accounts/doctype/eventual_purchase_invoice/eventual_purchase_invoice.py
|
1
|
3890
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from erpnext.accounts.general_ledger import make_gl_entries
from frappe.utils import nowdate, flt
from frappe import _
class EventualPurchaseInvoice(Document):
def validate(self):
self.validate_dates()
self.check_mandatory()
self.validate_cuit()
self.set_total_amount()
# Removes 'Draft' transition, submit document directly
self._action = "submit"
self.docstatus = 1
self.set_status()
def validate_dates(self):
if not self.issue_date:
self.issue_date = nowdate()
if not self.iva_date:
self.iva_date = nowdate()
def validate_cuit(self):
if not self.cuit.isdigit():
frappe.throw(_("{0} field must contain only digits"))
if len(self.cuit) > 11:
frappe.throw (_("CUIT has 11 numbers as maximum"))
def set_status(self, update = False):
if self.is_new():
self.status = 'Draft'
# None value in outstanding amount indicates that document is new
elif self.docstatus == 1 and (self.outstanding_amount > 0 or self.outstanding_amount is None):
self.status = 'Unpaid'
elif self.docstatus == 1 and self.outstanding_amount <= 0:
self.status = 'Paid'
if update:
self.db_set("status", self.status)
def check_mandatory(self):
for field in ["supplier_name", "cuit", "iva_type", "taxed_amount_21", "taxed_amount_10",
"taxed_amount_27", "iva_21", "iva_10", "iva_27"]:
if self.get(field) == None:
frappe.throw(_("{0} in Eventual Purchase Invoice is mandatory").format(self.meta.get_label(field)))
def set_total_amount(self):
total_amount = 0
for field in ["taxed_amount_21", "taxed_amount_10",
"taxed_amount_27", "iva_21", "iva_10", "iva_27", "exempts", "others", "iva_perception", "ibb_perception"]:
if self.get(field):
total_amount += flt(self.get(field))
self.total_amount = total_amount
def on_submit(self):
self.make_gl_entries()
self.set_status(update = True)
def make_gl_entries(self):
gl_entries = []
self.make_supplier_gl_entry(gl_entries)
make_gl_entries(gl_entries)
def make_supplier_gl_entry(self, gl_entries):
default_payable_account = frappe.get_doc("Company", self.company).default_payable_account
stock_received_but_not_billed = frappe.get_doc("Company", self.company).stock_received_but_not_billed
gl_entries.append(
frappe._dict({
'company': self.company,
'posting_date': nowdate(),
"account": default_payable_account,
"party_type": "Supplier",
"credit": self.total_amount,
"credit_in_account_currency": self.total_amount,
"voucher_no": self.name,
"voucher_type": self.doctype,
"against_voucher": self.name,
"against_voucher_type": self.doctype,
"against": self.supplier_name
})
)
gl_entries.append(
frappe._dict({
"party_type": "Supplier",
"posting_date": nowdate(),
"account": stock_received_but_not_billed,
"debit": self.total_amount,
"debit_in_account_currency": self.total_amount,
"voucher_no": self.name,
"voucher_type": self.doctype,
"against": default_payable_account
})
)
|
gpl-3.0
| -2,652,611,756,018,956,000
| 31.416667
| 128
| 0.569409
| false
| 3.882236
| false
| false
| false
|
mtbc/openmicroscopy
|
components/tools/OmeroWeb/omeroweb/webstart/views.py
|
1
|
4134
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#
#
# Copyright (c) 2008-2014 University of Dundee.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Aleksandra Tarkowska <A(dot)Tarkowska(at)dundee(dot)ac(dot)uk>, 2008.
#
# Version: 1.0
#
import os
import sys
import traceback
from glob import glob
from django.conf import settings
from django.template import loader as template_loader
from django.template import RequestContext as Context
from django.core.urlresolvers import reverse
from django.views.decorators.cache import never_cache
from omeroweb.http import HttpJNLPResponse
from omero_version import omero_version
from omeroweb.webclient.decorators import render_response
@never_cache
@render_response()
def custom_index(request, conn=None, **kwargs):
context = {"version": omero_version}
if settings.INDEX_TEMPLATE is not None:
try:
template_loader.get_template(settings.INDEX_TEMPLATE)
context['template'] = settings.INDEX_TEMPLATE
except Exception, e:
context['template'] = 'webstart/start.html'
context["error"] = traceback.format_exception(*sys.exc_info())[-1]
else:
context['template'] = 'webstart/start.html'
insight_url = None
if settings.WEBSTART:
context['insight_url'] = request.build_absolute_uri(reverse("webstart_insight"))
return context
@never_cache
@render_response()
def index(request, conn=None, **kwargs):
context = {"version": omero_version}
if settings.WEBSTART_TEMPLATE is not None:
try:
template_loader.get_template(settings.WEBSTART_TEMPLATE)
context['template'] = settings.WEBSTART_TEMPLATE
except Exception, e:
context['template'] = 'webstart/index.html'
context["error"] = traceback.format_exception(*sys.exc_info())[-1]
else:
context['template'] = 'webstart/index.html'
insight_url = None
if settings.WEBSTART:
context['insight_url'] = request.build_absolute_uri(reverse("webstart_insight"))
return context
@never_cache
def insight(request):
t = template_loader.get_template('webstart/insight.xml')
codebase = request.build_absolute_uri(settings.STATIC_URL+'webstart/jars/')
href = request.build_absolute_uri(reverse("webstart_insight"))
pattern = os.path.abspath(os.path.join(settings.OMERO_HOME, "lib", "insight", "*.jar").replace('\\','/'))
jarlist = glob(pattern)
jarlist = [os.path.basename(x) for x in jarlist]
# ticket:9478 put insight jar at the start of the list if available
# This can be configured via omero.web.webstart_jar to point to a
# custom value.
idx = jarlist.index(settings.WEBSTART_JAR)
if idx > 0:
jarlist.pop(idx)
jarlist.insert(0, settings.WEBSTART_JAR)
idy = jarlist.index(settings.NANOXML_JAR)
if idy > 0:
jarlist.pop(idy)
jarlist.insert(len(jarlist)-1, settings.NANOXML_JAR)
context = {'codebase': codebase, 'href': href, 'jarlist': jarlist,
'icon': settings.WEBSTART_ICON,
'heap': settings.WEBSTART_HEAP,
'host': settings.WEBSTART_HOST,
'port': settings.WEBSTART_PORT,
'class': settings.WEBSTART_CLASS,
'title': settings.WEBSTART_TITLE,
'vendor': settings.WEBSTART_VENDOR,
'homepage': settings.WEBSTART_HOMEPAGE,
}
c = Context(request, context)
return HttpJNLPResponse(t.render(c))
|
gpl-2.0
| 4,398,770,162,860,684,000
| 33.739496
| 110
| 0.676343
| false
| 3.645503
| false
| false
| false
|
MrChoclate/optim
|
tsp.py
|
1
|
3352
|
import itertools
import math
import functools
import time
import random
import copy
def timer(func):
def with_time(*args, **kwargs):
t = time.time()
res = func(*args, **kwargs)
print("{} took {} sec".format(func.__name__, time.time() - t))
return res
return with_time
def read():
n = int(input())
return [tuple(float(x) for x in input().split()) for _ in range(n)]
@functools.lru_cache(maxsize=1024)
def distance(src, dest):
return math.sqrt(sum((x - y) ** 2 for x, y in zip(src, dest)))
def cost(sol, cities):
dst = sum(distance(cities[x], cities[y]) for x, y in zip(sol[:-1], sol[1:]))
dst += distance(cities[-1], cities[0])
return dst
def random_sol(cities):
sol = list(range(1, len(cities)))
random.shuffle(sol)
return [0] + sol
def neighboor(sol):
assert(sol[0] == 0)
i = random.randint(1, len(sol) - 1)
j = i
while j == i:
j = random.randint(1, len(sol) - 1)
res = copy.copy(sol)
res[i], res[j] = res[j], res[i]
return res
@timer
def random_search(cities):
res = float('inf')
best_sol = None
for _ in range(len(cities)):
sol = random_sol(cities)
current_cost = cost(sol, cities)
if res > current_cost:
best_sol = sol
res = current_cost
return res, best_sol
@timer
def stochastic_hill_climbing(cities, kmax=1000):
best_sol = random_sol(cities)
best_cost = cost(best_sol, cities)
k = 0
while k < kmax:
k += 1
current_sol = neighboor(best_sol)
current_cost = cost(current_sol, cities)
if current_cost < best_cost:
best_sol = current_sol
best_cost = current_cost
k = 0
return best_cost, best_sol
@timer
def simulated_annealing(cities):
current_sol = best_sol = random_sol(cities)
current_cost = best_cost = cost(best_sol, cities)
T = 1000 * best_cost / len(cities)
T_min = best_cost / len(cities) / 1000.
k = 0
while T > T_min:
k += 1
new_sol = neighboor(current_sol)
new_cost = cost(new_sol, cities)
if new_cost < best_cost:
best_sol = new_sol
best_cost = new_cost
k = 0
if new_cost < current_cost or random.random() <= math.exp((current_cost - new_cost) / T):
current_sol = new_sol
current_cost = new_cost
if k > 100:
T *= 0.99999
return best_cost, best_sol
@timer
def brute_solve(cities):
best_cost = float('inf')
best_sol = None
for sol in itertools.permutations(range(len(cities))):
current_cost = cost(sol, cities)
if current_cost < best_cost:
best_cost = current_cost
best_sol = sol
return best_cost, best_sol
@timer
def greedy_solve(cities, fn=min):
sol = [0]
i = 0
while i != len(cities) - 1:
remaining = set(range(len(cities))) - set(sol)
_, pick = fn((distance(cities[i], cities[x]), x) for x in remaining)
sol.append(pick)
i += 1
return cost(sol, cities), sol
if __name__ == '__main__':
cities = read()
print(greedy_solve(cities, fn=min))
print(greedy_solve(cities, fn=max))
print(random_search(cities))
print(stochastic_hill_climbing(cities))
print(simulated_annealing(cities))
|
gpl-3.0
| -7,064,214,220,862,310,000
| 25.1875
| 97
| 0.571599
| false
| 3.207656
| false
| false
| false
|
nkolban/Espruino
|
scripts/common.py
|
1
|
16586
|
#!/bin/false
# This file is part of Espruino, a JavaScript interpreter for Microcontrollers
#
# Copyright (C) 2013 Gordon Williams <gw@pur3.co.uk>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# ----------------------------------------------------------------------------------------
# Reads board information from boards/BOARDNAME.py - used by build_board_docs,
# build_pininfo, and build_platform_config
# ----------------------------------------------------------------------------------------
import subprocess;
import re;
import json;
import sys;
import os;
import importlib;
silent = os.getenv("SILENT");
if silent:
class Discarder(object):
def write(self, text):
pass # do nothing
# now discard everything coming out of stdout
sys.stdout = Discarder()
# http://stackoverflow.com/questions/4814970/subprocess-check-output-doesnt-seem-to-exist-python-2-6-5
if "check_output" not in dir( subprocess ):
def f(*popenargs, **kwargs):
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd)
return output
subprocess.check_output = f
# Scans files for comments of the form /*JSON......*/
#
# Comments look like:
#
#/*JSON{ "type":"staticmethod|staticproperty|constructor|method|property|function|variable|class|library|idle|init|kill",
# // class = built-in class that does not require instantiation
# // library = built-in class that needs require('classname')
# // idle = function to run on idle regardless
# // init = function to run on initialisation
# // kill = function to run on deinitialisation
# "class" : "Double", "name" : "doubleToIntBits",
# "needs_parentName":true, // optional - if for a method, this makes the first 2 args parent+parentName (not just parent)
# "generate_full|generate|wrap" : "*(JsVarInt*)&x",
# "description" : " Convert the floating point value given into an integer representing the bits contained in it",
# "params" : [ [ "x" , "float|int|int32|bool|pin|JsVar|JsVarName|JsVarArray", "A floating point number"] ],
# // float - parses into a JsVarFloat which is passed to the function
# // int - parses into a JsVarInt which is passed to the function
# // int32 - parses into a 32 bit int
# // bool - parses into a boolean
# // pin - parses into a pin
# // JsVar - passes a JsVar* to the function (after skipping names)
# // JsVarArray - parses this AND ANY SUBSEQUENT ARGUMENTS into a JsVar of type JSV_ARRAY. THIS IS ALWAYS DEFINED, EVEN IF ZERO LENGTH. Currently it must be the only parameter
# "return" : ["int|float|JsVar", "The integer representation of x"],
# "return_object" : "ObjectName", // optional - used for tern's code analysis - so for example we can do hints for openFile(...).yyy
# "no_create_links":1 // optional - if this is set then hyperlinks are not created when this name is mentioned (good example = bit() )
# "not_real_object" : "anything", // optional - for classes, this means we shouldn't treat this as a built-in object, as internally it isn't stored in a JSV_OBJECT
# "prototype" : "Object", // optional - for classes, this is what their prototype is. It's particlarly helpful if not_real_object, because there is no prototype var in that case
# "check" : "jsvIsFoo(var)", // for classes - this is code that returns true if 'var' is of the given type
# "ifndef" : "SAVE_ON_FLASH", // if the given preprocessor macro is defined, don't implement this
# "ifdef" : "USE_LCD_FOO", // if the given preprocessor macro isn't defined, don't implement this
# "#if" : "A>2", // add a #if statement in the generated C file (ONLY if type==object)
#}*/
#
# description can be an array of strings as well as a simple string (in which case each element is separated by a newline),
# and adding ```sometext``` in the description surrounds it with HTML code tags
#
def get_jsondata(is_for_document, parseArgs = True, board = False):
scriptdir = os.path.dirname (os.path.realpath(__file__))
print("Script location "+scriptdir)
os.chdir(scriptdir+"/..")
jswraps = []
defines = []
if board and ("build" in board.info) and ("defines" in board.info["build"]):
for i in board.info["build"]["defines"]:
print("Got define from board: " + i);
defines.append(i)
if parseArgs and len(sys.argv)>1:
print("Using files from command line")
for i in range(1,len(sys.argv)):
arg = sys.argv[i]
if arg[0]=="-":
if arg[1]=="D":
defines.append(arg[2:])
elif arg[1]=="B":
board = importlib.import_module(arg[2:])
if "usart" in board.chip: defines.append("USART_COUNT="+str(board.chip["usart"]));
if "spi" in board.chip: defines.append("SPI_COUNT="+str(board.chip["spi"]));
if "i2c" in board.chip: defines.append("I2C_COUNT="+str(board.chip["i2c"]));
if "USB" in board.devices: defines.append("defined(USB)=True");
else: defines.append("defined(USB)=False");
elif arg[1]=="F":
"" # -Fxxx.yy in args is filename xxx.yy, which is mandatory for build_jswrapper.py
else:
print("Unknown command-line option")
exit(1)
else:
jswraps.append(arg)
else:
print("Scanning for jswrap.c files")
jswraps = subprocess.check_output(["find", ".", "-name", "jswrap*.c"]).strip().split("\n")
if len(defines)>1:
print("Got #DEFINES:")
for d in defines: print(" "+d)
jsondatas = []
for jswrap in jswraps:
# ignore anything from archives
if jswrap.startswith("./archives/"): continue
# now scan
print("Scanning "+jswrap)
code = open(jswrap, "r").read()
if is_for_document and "DO_NOT_INCLUDE_IN_DOCS" in code:
print("FOUND 'DO_NOT_INCLUDE_IN_DOCS' IN FILE "+jswrap)
continue
for comment in re.findall(r"/\*JSON.*?\*/", code, re.VERBOSE | re.MULTILINE | re.DOTALL):
charnumber = code.find(comment)
linenumber = 1+code.count("\n", 0, charnumber)
# Strip off /*JSON .. */ bit
comment = comment[6:-2]
endOfJson = comment.find("\n}")+2;
jsonstring = comment[0:endOfJson];
description = comment[endOfJson:].strip();
# print("Parsing "+jsonstring)
try:
jsondata = json.loads(jsonstring)
if len(description): jsondata["description"] = description;
jsondata["filename"] = jswrap
jsondata["include"] = jswrap[:-2]+".h"
jsondata["githublink"] = "https://github.com/espruino/Espruino/blob/master/"+jswrap+"#L"+str(linenumber)
dropped_prefix = "Dropped "
if "name" in jsondata: dropped_prefix += jsondata["name"]+" "
elif "class" in jsondata: dropped_prefix += jsondata["class"]+" "
drop = False
if not is_for_document:
if ("ifndef" in jsondata) and (jsondata["ifndef"] in defines):
print(dropped_prefix+" because of #ifndef "+jsondata["ifndef"])
drop = True
if ("ifdef" in jsondata) and not (jsondata["ifdef"] in defines):
print(dropped_prefix+" because of #ifdef "+jsondata["ifdef"])
drop = True
if ("#if" in jsondata):
expr = jsondata["#if"]
for defn in defines:
if defn.find('=')!=-1:
dname = defn[:defn.find('=')]
dkey = defn[defn.find('=')+1:]
expr = expr.replace(dname, dkey);
try:
r = eval(expr)
except:
print("WARNING: error evaluating '"+expr+"' - from '"+jsondata["#if"]+"'")
r = True
if not r:
print(dropped_prefix+" because of #if "+jsondata["#if"]+ " -> "+expr)
drop = True
if not drop:
jsondatas.append(jsondata)
except ValueError as e:
sys.stderr.write( "JSON PARSE FAILED for " + jsonstring + " - "+ str(e) + "\n")
exit(1)
except:
sys.stderr.write( "JSON PARSE FAILED for " + jsonstring + " - "+str(sys.exc_info()[0]) + "\n" )
exit(1)
print("Scanning finished.")
return jsondatas
# Takes the data from get_jsondata and restructures it in prepartion for output as JS
#
# Results look like:,
#{
# "Pin": {
# "desc": [
# "This is the built-in class for Pins, such as D0,D1,LED1, or BTN",
# "You can call the methods on Pin, or you can use Wiring-style functions such as digitalWrite"
# ],
# "methods": {
# "read": {
# "desc": "Returns the input state of the pin as a boolean",
# "params": [],
# "return": [
# "bool",
# "Whether pin is a logical 1 or 0"
# ]
# },
# "reset": {
# "desc": "Sets the output state of the pin to a 0",
# "params": [],
# "return": []
# },
# ...
# },
# "props": {},
# "staticmethods": {},
# "staticprops": {}
# },
# "print": {
# "desc": "Print the supplied string",
# "return": []
# },
# ...
#}
#
def get_struct_from_jsondata(jsondata):
context = {"modules": {}}
def checkClass(details):
cl = details["class"]
if not cl in context:
context[cl] = {"type": "class", "methods": {}, "props": {}, "staticmethods": {}, "staticprops": {}, "desc": details.get("description", "")}
return cl
def addConstructor(details):
cl = checkClass(details)
context[cl]["constructor"] = {"params": details.get("params", []), "return": details.get("return", []), "desc": details.get("description", "")}
def addMethod(details, type = ""):
cl = checkClass(details)
context[cl][type + "methods"][details["name"]] = {"params": details.get("params", []), "return": details.get("return", []), "desc": details.get("description", "")}
def addProp(details, type = ""):
cl = checkClass(details)
context[cl][type + "props"][details["name"]] = {"return": details.get("return", []), "desc": details.get("description", "")}
def addFunc(details):
context[details["name"]] = {"type": "function", "return": details.get("return", []), "desc": details.get("description", "")}
def addObj(details):
context[details["name"]] = {"type": "object", "instanceof": details.get("instanceof", ""), "desc": details.get("description", "")}
def addLib(details):
context["modules"][details["class"]] = {"desc": details.get("description", "")}
def addVar(details):
return
for data in jsondata:
type = data["type"]
if type=="class":
checkClass(data)
elif type=="constructor":
addConstructor(data)
elif type=="method":
addMethod(data)
elif type=="property":
addProp(data)
elif type=="staticmethod":
addMethod(data, "static")
elif type=="staticproperty":
addProp(data, "static")
elif type=="function":
addFunc(data)
elif type=="object":
addObj(data)
elif type=="library":
addLib(data)
elif type=="variable":
addVar(data)
else:
print(json.dumps(data, sort_keys=True, indent=2))
return context
def get_includes_from_jsondata(jsondatas):
includes = []
for jsondata in jsondatas:
include = jsondata["include"]
if not include in includes:
includes.append(include)
return includes
def is_property(jsondata):
return jsondata["type"]=="property" or jsondata["type"]=="staticproperty" or jsondata["type"]=="variable"
def is_function(jsondata):
return jsondata["type"]=="function" or jsondata["type"]=="method"
def get_prefix_name(jsondata):
if jsondata["type"]=="event": return "event"
if jsondata["type"]=="constructor": return "constructor"
if jsondata["type"]=="function": return "function"
if jsondata["type"]=="method": return "function"
if jsondata["type"]=="variable": return "variable"
if jsondata["type"]=="property": return "property"
return ""
def get_ifdef_description(d):
if d=="SAVE_ON_FLASH": return "devices with low flash memory"
if d=="STM32F1": return "STM32F1 devices (including Original Espruino Board)"
if d=="USE_LCD_SDL": return "Linux with SDL support compiled in"
if d=="USE_TLS": return "devices with TLS and SSL support (Espruino Pico and Espruino WiFi only)"
if d=="RELEASE": return "release builds"
if d=="LINUX": return "Linux-based builds"
if d=="USE_USB_HID": return "devices that support USB HID (Espruino Pico and Espruino WiFi)"
if d=="USE_AES": return "devices that support AES (Espruino Pico, Espruino WiFi or Linux)"
if d=="USE_CRYPTO": return "devices that support Crypto Functionality (Espruino Pico, Espruino WiFi, Linux or ESP8266)"
print("WARNING: Unknown ifdef '"+d+"' in common.get_ifdef_description")
return d
def get_script_dir():
return os.path.dirname(os.path.realpath(__file__))
def get_version():
# Warning: the same release label derivation is also in the Makefile
scriptdir = get_script_dir()
jsutils = scriptdir+"/../src/jsutils.h"
version = re.compile("^.*JS_VERSION.*\"(.*)\"");
alt_release = os.getenv("ALT_RELEASE")
if alt_release == None:
# Default release labeling based on commits since last release tag
latest_release = subprocess.check_output('git tag | grep RELEASE_ | sort | tail -1', shell=True).strip()
commits_since_release = subprocess.check_output('git log --oneline '+latest_release.decode("utf-8")+'..HEAD | wc -l', shell=True).decode("utf-8").strip()
else:
# Alternate release labeling with fork name (in ALT_RELEASE env var) plus branch
# name plus commit SHA
sha = subprocess.check_output('git rev-parse --short HEAD', shell=True).strip()
branch = subprocess.check_output('git name-rev --name-only HEAD', shell=True).strip()
commits_since_release = alt_release + '_' + branch + '_' + sha
for line in open(jsutils):
match = version.search(line);
if (match != None):
v = match.group(1);
if commits_since_release=="0": return v
else: return v+"."+commits_since_release
return "UNKNOWN"
def get_name_or_space(jsondata):
if "name" in jsondata: return jsondata["name"]
return ""
def get_bootloader_size(board):
if board.chip["family"]=="STM32F4": return 16*1024; # 16kb Pages, so we have no choice
return 10*1024;
# On normal chips this is 0x00000000
# On boards with bootloaders it's generally + 10240
# On F401, because of the setup of pages we put the bootloader in the first 16k, then in the 16+16+16 we put the saved code, and then finally we but the binary somewhere else
def get_espruino_binary_address(board):
if "place_text_section" in board.chip:
return board.chip["place_text_section"]
if "bootloader" in board.info and board.info["bootloader"]==1:
return get_bootloader_size(board);
return 0;
def get_board_binary_name(board):
return board.info["binary_name"].replace("%v", get_version());
|
mpl-2.0
| -2,976,484,380,835,921,000
| 43.347594
| 205
| 0.574882
| false
| 3.804128
| false
| false
| false
|
centrofermi/e3pipe
|
display/E3EventCanvas.py
|
1
|
5968
|
#!/usr/bin/env python
# *********************************************************************
# * Copyright (C) 2015 Luca Baldini (luca.baldini@pi.infn.it) *
# * *
# * For the license terms see the file LICENCE, distributed *
# * along with this software. *
# *********************************************************************
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
X_MIN = 0.
X_MAX = 158.0
NUM_STRIPS = 24
STRIP_PITCH = 3.2
Y_MIN = 0.5*STRIP_PITCH
Y_MAX = STRIP_PITCH*(NUM_STRIPS - 0.5)
DX = X_MAX - X_MIN
DY = Y_MAX - Y_MIN
from e3pipe.display.E3PhysicalCanvas import E3PhysicalCanvas
from e3pipe.display.__geometry2d__ import *
class E3EventCanvas(E3PhysicalCanvas):
"""
"""
NAME = 'cdisplay'
TITLE = 'EEE event display'
WPX = 1200
PLANE_THICKNESS = 2.
def __init__(self, z = [0, 40, 80], padding = 30., **kwargs):
""" Constructor.
"""
self.__Z = z
self.__Pad = padding
dz = max(z) - min(z)
self.__W = self.__Pad*4 + DX + DY
self.__H = self.__Pad*2 + dz
E3PhysicalCanvas.__init__(self, self.NAME, self.__W, self.__H,
self.WPX, title = self.TITLE, logo = False)
self.setup()
def setup(self):
""" Setup the canvas to display an event.
"""
self.Clear()
self.drawTelescope()
self.drawReference()
self.drawAnnotations()
def xz2canvas(self, x, z):
""" Convert from physical units to canvas coordinates (x-z plane).
"""
_x = x + self.__Pad - 0.5*self.__W
_z = z + self.__Pad - 0.5*self.__H
return (_x, _z)
def yz2canvas(self, y, z):
""" Convert from physical units to canvas coordinates (y-z plane).
"""
_y = y + 3*self.__Pad + DX - 0.5*self.__W
_z = z + self.__Pad - 0.5*self.__H
return (_y, _z)
def drawTelescope(self):
""" Draw the three planes of the telescope.
"""
for z in self.__Z:
box(0.5*DX - 0.5*self.__W + self.__Pad,
z - 0.5*self.__H + self.__Pad,
DX, self.PLANE_THICKNESS)
for i in range(NUM_STRIPS):
box((1 + i)*STRIP_PITCH - 0.5*self.__W + DX + 3*self.__Pad,
z - 0.5*self.__H + self.__Pad,
STRIP_PITCH, self.PLANE_THICKNESS)
def drawReference(self):
""" Draw the reference system.
"""
_l = 0.4*self.__Pad
_x, _z = self.xz2canvas(-0.5*self.__Pad, -0.5*self.__Pad)
arrow(_x, _z, _x + _l, _z)
annotate(_x + _l, _z, ' x', align = 13)
arrow(_x, _z, _x, _z + _l)
annotate(_x, _z + _l, 'z ', align = 31)
_y, _z = self.yz2canvas(-0.5*self.__Pad, -0.5*self.__Pad)
arrow(_y, _z, _y + _l, _z)
annotate(_y + _l, _z, ' y', align = 13)
arrow(_y, _z, _y, _z + _l)
annotate(_y, _z + _l, 'z ', align = 31)
def drawAnnotations(self):
""" Draw some annotations.
"""
_x, _z = self.xz2canvas(DX + self.__Pad, self.__Z[0])
annotate(_x, _z, 'bot', align = 22)
_x, _z = self.xz2canvas(DX + self.__Pad, self.__Z[1])
annotate(_x, _z, 'mid', align = 22)
_x, _z = self.xz2canvas(DX + self.__Pad, self.__Z[2])
annotate(_x, _z, 'top', align = 22)
_x, _z1 = self.xz2canvas(-0.5*self.__Pad, self.__Z[1])
_x, _z2 = self.xz2canvas(-0.5*self.__Pad, self.__Z[2])
vquote(_z1, _z2, _x)
def drawMarker(self, x, y, z, **kwargs):
""" Draw a three-dimensional point.
"""
_x, _z = self.xz2canvas(x, z)
marker(_x, _z, **kwargs)
_y, _z = self.yz2canvas(y, z)
marker(_y, _z, **kwargs)
self.Update()
def drawLine(self, x0, y0, z0, xdir, ydir, zdir, top = 100, bot = 100,
**kwargs):
""" Draw a line.
"""
_x0, _z0 = self.xz2canvas(x0, z0)
_x1 = _x0 - bot*xdir
_z1 = _z0 - bot*zdir
_x2 = _x0 + top*xdir
_z2 = _z0 + top*zdir
line(_x1, _z1, _x2, _z2, **kwargs)
_y0, _z0 = self.yz2canvas(y0, z0)
_y1 = _y0 - bot*ydir
_z1 = _z0 - bot*zdir
_y2 = _y0 + top*ydir
_z2 = _z0 + top*zdir
line(_y1, _z1, _y2, _z2, **kwargs)
self.Update()
def drawEventInfo(self, fileName, run, evt):
"""
"""
annotate(0.02, 0.94, '%s [%d - %d]' % (fileName, run, evt),
ndc = True, align = 12)
self.Update()
def annotateXZ(self, x, z, text, size = 1, ndc = False,
align = 22, color = ROOT.kBlack, angle = 0):
"""
"""
_x, _z = self.xz2canvas(x, z)
annotate(_x, _z, text, size, ndc, align, color, angle)
self.Update()
def annotateYZ(self, y, z, text, size = 1, ndc = False,
align = 22, color = ROOT.kBlack, angle = 0):
"""
"""
_y, _z = self.yz2canvas(y, z)
annotate(_y, _z, text, size, ndc, align, color, angle)
self.Update()
if __name__ == '__main__':
c = E3EventCanvas()
c.Draw()
c.drawPoint(100, 34, 40)
|
gpl-3.0
| 3,696,403,357,255,309,000
| 32.909091
| 78
| 0.487936
| false
| 3.122972
| false
| false
| false
|
nithintech/google-python-exercises
|
babynames/babynames.py
|
2
|
2533
|
#!/usr/bin/python
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
import sys
import re
import os
"""Baby Names exercise
Define the extract_names() function below and change main()
to call it.
For writing regex, it's nice to include a copy of the target
text for inspiration.
Here's what the html looks like in the baby.html files:
...
<h3 align="center">Popularity in 1990</h3>
....
<tr align="right"><td>1</td><td>Michael</td><td>Jessica</td>
<tr align="right"><td>2</td><td>Christopher</td><td>Ashley</td>
<tr align="right"><td>3</td><td>Matthew</td><td>Brittany</td>
...
Suggested milestones for incremental development:
-Extract the year and print it
-Extract the names and rank numbers and just print them
-Get the names data into a dict and print it
-Build the [year, 'name rank', ... ] list and print it
-Fix main() to use the extract_names list
"""
def extract_names(filename):
"""
Given a file name for baby.html, returns a list starting with the year string
followed by the name-rank strings in alphabetical order.
['2006', 'Aaliyah 91', Aaron 57', 'Abagail 895', ' ...]
"""
# +++your code here+++
dic={}
f=open(filename,'rU')
d=f.read()
m=re.search("Popularity\sin\s(\d\d\d\d)",d)
print m.group(1)
n=re.findall("<td>(\d*)</td><td>(\w+)</td><td>(\w+)</td>",d)
for i in n:
dic[i[1]]=i[0]
dic[i[2]]=i[0]
res=sorted(dic.items())
lis=[]
lis.append(m.group(1))
for i in res:
s=i[0]+" "+i[1]
lis.append(s)
return lis
#for i in n:
# print i[0], i[1]
def main():
# This command-line parsing code is provided.
# Make a list of command line arguments, omitting the [0] element
# which is the script itself.
args = sys.argv[1:]
if not args:
print 'usage: [--summaryfile] file [file ...]'
sys.exit(1)
# Notice the summary flag and remove it from args if it is present.
summary = False
if args[0] == '--summaryfile':
summary = True
del args[0]
for i in args:
names=extract_names(i)
if summary:
j=os.path.basename(i)
f=open(j+"summary.txt",'w')
for i in names:
f.write(i+"\n")
f.close()
else:
print names
# +++your code here+++
# For each filename, get the names, then either print the text output
# or write it to a summary file
if __name__ == '__main__':
main()
|
apache-2.0
| 3,653,605,300,787,971,600
| 22.896226
| 79
| 0.629293
| false
| 3.015476
| false
| false
| false
|
pbs/django-cms
|
cms/forms/widgets.py
|
1
|
9439
|
# -*- coding: utf-8 -*-
from cms.forms.utils import get_site_choices, get_page_choices
from cms.models import Page, PageUser, Placeholder
from cms.plugin_pool import plugin_pool
from cms.utils import get_language_from_request, cms_static_url
from django.conf import settings
from django.contrib.sites.models import Site
from django.contrib.auth import get_permission_codename
from django.forms.widgets import Select, MultiWidget, Widget
from django.template.context import RequestContext
from django.template.loader import render_to_string
from django.utils.encoding import force_unicode
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
import copy
from cms.templatetags.cms_admin import CMS_ADMIN_ICON_BASE
class LazySelect(Select):
def __init__(self, *args, **kwargs):
choices = kwargs['choices']
from cms.forms.fields import SuperLazyIterator
if isinstance(choices, SuperLazyIterator):
self.choices = kwargs.pop('choices')
super(Select, self).__init__(*args, **kwargs)
else:
super(LazySelect, self).__init__(*args, **kwargs)
class PageSelectWidget(MultiWidget):
"""A widget that allows selecting a page by first selecting a site and then
a page on that site in a two step process.
"""
def __init__(self, site_choices=None, page_choices=None, attrs=None):
if attrs is not None:
self.attrs = attrs.copy()
else:
self.attrs = {}
if site_choices is None or page_choices is None:
from cms.forms.fields import SuperLazyIterator
site_choices = SuperLazyIterator(get_site_choices)
page_choices = SuperLazyIterator(get_page_choices)
self.site_choices = site_choices
self.choices = page_choices
widgets = (LazySelect(choices=site_choices ),
LazySelect(choices=[('', '----')]),
LazySelect(choices=self.choices, attrs={'style': "display:none;"} ),
)
super(PageSelectWidget, self).__init__(widgets, attrs)
def decompress(self, value):
"""
receives a page_id in value and returns the site_id and page_id
of that page or the current site_id and None if no page_id is given.
"""
if value:
page = Page.objects.get(pk=value)
site = page.site
return [site.pk, page.pk, page.pk]
site = Site.objects.get_current()
return [site.pk,None,None]
def _has_changed(self, initial, data):
# THIS IS A COPY OF django.forms.widgets.Widget._has_changed()
# (except for the first if statement)
"""
Return True if data differs from initial.
"""
# For purposes of seeing whether something has changed, None is
# the same as an empty string, if the data or inital value we get
# is None, replace it w/ u''.
if data is None or (len(data)>=2 and data[1] in [None,'']):
data_value = u''
else:
data_value = data
if initial is None:
initial_value = u''
else:
initial_value = initial
if force_unicode(initial_value) != force_unicode(data_value):
return True
return False
def render(self, name, value, attrs=None):
# THIS IS A COPY OF django.forms.widgets.MultiWidget.render()
# (except for the last line)
# value is a list of values, each corresponding to a widget
# in self.widgets.
if not isinstance(value, list):
value = self.decompress(value)
output = []
final_attrs = self.build_attrs(attrs)
id_ = final_attrs.get('id', None)
for i, widget in enumerate(self.widgets):
try:
widget_value = value[i]
except IndexError:
widget_value = None
if id_:
final_attrs = dict(final_attrs, id='%s_%s' % (id_, i))
output.append(widget.render(name + '_%s' % i, widget_value, final_attrs))
output.append(r'''<script type="text/javascript">
(function($) {
var handleSiteChange = function(site_name, selected_id) {
$("#id_%(name)s_1 optgroup").remove();
var myOptions = $("#id_%(name)s_2 optgroup[label='" + site_name.replace(/(\'|\")/g, '\\$1') + "']").clone();
$("#id_%(name)s_1").append(myOptions);
$("#id_%(name)s_1").change();
};
var handlePageChange = function(page_id) {
if (page_id) {
$("#id_%(name)s_2 option").removeAttr('selected');
$("#id_%(name)s_2 option[value=" + page_id + "]").attr('selected','selected');
} else {
$("#id_%(name)s_2 option[value=]").attr('selected','selected');
};
};
$("#id_%(name)s_0").change(function(){
var site_label = $("#id_%(name)s_0").children(":selected").text();
handleSiteChange( site_label );
});
$("#id_%(name)s_1").change(function(){
var page_id = $(this).find('option:selected').val();
handlePageChange( page_id );
});
$(function(){
handleSiteChange( $("#id_%(name)s_0").children(":selected").text() );
$("#add_id_%(name)s").hide();
});
})(django.jQuery);
</script>''' % {'name': name})
return mark_safe(self.format_output(output))
def format_output(self, rendered_widgets):
return u' '.join(rendered_widgets)
class PluginEditor(Widget):
def __init__(self, attrs=None):
if attrs is not None:
self.attrs = attrs.copy()
else:
self.attrs = {}
class Media:
js = [cms_static_url(path) for path in (
'js/libs/jquery.ui.core.js',
'js/libs/jquery.ui.sortable.js',
'js/plugin_editor.js',
)]
css = {
'all': [cms_static_url(path) for path in (
'css/plugin_editor.css',
)]
}
def render(self, name, value, attrs=None):
context = {
'plugin_list': self.attrs['list'],
'installed_plugins': self.attrs['installed'],
'copy_languages': self.attrs['copy_languages'],
'language': self.attrs['language'],
'show_copy': self.attrs['show_copy'],
'placeholder': self.attrs['placeholder'],
}
return mark_safe(render_to_string(
'admin/cms/page/widgets/plugin_editor.html', context))
class UserSelectAdminWidget(Select):
"""Special widget used in page permission inlines, because we have to render
an add user (plus) icon, but point it somewhere else - to special user creation
view, which is accessible only if user haves "add user" permissions.
Current user should be assigned to widget in form constructor as an user
attribute.
"""
def render(self, name, value, attrs=None, choices=()):
output = [super(UserSelectAdminWidget, self).render(name, value, attrs, choices)]
opts = PageUser._meta
if hasattr(self, 'user') and (self.user.is_superuser or \
self.user.has_perm(opts.app_label + '.' + get_permission_codename('add', opts))):
# append + icon
add_url = '../../../cms/pageuser/add/'
output.append(u'<a href="%s" class="add-another" id="add_id_%s" onclick="return showAddAnotherPopup(this);"> ' % \
(add_url, name))
output.append(u'<img src="%sicon_addlink.gif" width="10" height="10" alt="%s"/></a>' % (CMS_ADMIN_ICON_BASE, _('Add Another')))
return mark_safe(u''.join(output))
class PlaceholderPluginEditorWidget(PluginEditor):
attrs = {}
def __init__(self, request, filter_func):
self.request = request
self.filter_func = filter_func
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.request = copy.copy(self.request)
obj.filter_func = self.filter_func
memo[id(self)] = obj
return obj
def render(self, name, value, attrs=None):
try:
ph = Placeholder.objects.get(pk=value)
except Placeholder.DoesNotExist:
ph = None
context = {'add':True}
if ph:
plugin_list = ph.cmsplugin_set.filter(parent=None).order_by('position')
plugin_list = self.filter_func(self.request, plugin_list)
language = get_language_from_request(self.request)
copy_languages = []
if ph.actions.can_copy:
copy_languages = ph.actions.get_copy_languages(
placeholder=ph,
model=ph._get_attached_model(),
fieldname=ph._get_attached_field_name()
)
context = {
'plugin_list': plugin_list,
'installed_plugins': plugin_pool.get_all_plugins(ph.slot, include_page_only=False),
'copy_languages': copy_languages,
'language': language,
'show_copy': bool(copy_languages) and ph.actions.can_copy,
'urloverride': True,
'placeholder': ph,
}
#return mark_safe(render_to_string(
# 'admin/cms/page/widgets/plugin_editor.html', context))
return mark_safe(render_to_string(
'admin/cms/page/widgets/placeholder_editor.html', context, RequestContext(self.request)))
|
bsd-3-clause
| -3,840,572,163,296,305,700
| 38.827004
| 139
| 0.580888
| false
| 3.914973
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.