repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
interfect/cjdns
|
refs/heads/master
|
node_build/dependencies/libuv/build/gyp/pylib/gyp/generator/__init__.py
|
12133432
| |
Jorge-Rodriguez/ansible
|
refs/heads/devel
|
test/units/modules/network/netvisor/test_pn_vrouter_bgp_network.py
|
9
|
# Copyright: (c) 2018, Pluribus Networks
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from units.compat.mock import patch
from ansible.modules.network.netvisor import pn_vrouter_bgp_network
from units.modules.utils import set_module_args
from .nvos_module import TestNvosModule, load_fixture
class TestVrouterBGPNetworkModule(TestNvosModule):
module = pn_vrouter_bgp_network
def setUp(self):
self.mock_run_nvos_commands = patch('ansible.modules.network.netvisor.pn_vrouter_bgp_network.run_cli')
self.run_nvos_commands = self.mock_run_nvos_commands.start()
self.mock_run_check_cli = patch('ansible.modules.network.netvisor.pn_vrouter_bgp_network.check_cli')
self.run_check_cli = self.mock_run_check_cli.start()
def tearDown(self):
self.mock_run_nvos_commands.stop()
self.mock_run_check_cli.stop()
def run_cli_patch(self, module, cli, state_map):
if state_map['present'] == 'vrouter-bgp-network-add':
results = dict(
changed=True,
cli_cmd=cli
)
elif state_map['absent'] == 'vrouter-bgp-network-remove':
results = dict(
changed=True,
cli_cmd=cli
)
module.exit_json(**results)
def load_fixtures(self, commands=None, state=None, transport='cli'):
self.run_nvos_commands.side_effect = self.run_cli_patch
if state == 'present':
self.run_check_cli.return_value = False, ''
if state == 'absent':
self.run_check_cli.return_value = True, ''
def test_vrouter_bgp_network_add(self):
set_module_args({'pn_cliswitch': 'sw01', 'pn_vrouter_name': 'foo-vrouter',
'pn_network': '10.10.10.10', 'pn_netmask': '31', 'state': 'present'})
result = self.execute_module(changed=True, state='present')
expected_cmd = '/usr/bin/cli --quiet -e --no-login-prompt switch sw01 vrouter-bgp-network-add vrouter-name foo-vrouter netmask 31 '
expected_cmd += 'network 10.10.10.10'
self.assertEqual(result['cli_cmd'], expected_cmd)
def test_vrouter_bgp_network_remove(self):
set_module_args({'pn_cliswitch': 'sw01', 'pn_vrouter_name': 'foo-vrouter',
'pn_network': '10.10.10.10', 'state': 'absent'})
result = self.execute_module(changed=True, state='absent')
expected_cmd = '/usr/bin/cli --quiet -e --no-login-prompt switch sw01 vrouter-bgp-network-remove vrouter-name foo-vrouter network 10.10.10.10'
self.assertEqual(result['cli_cmd'], expected_cmd)
|
anentropic/django-director
|
refs/heads/master
|
director/signals.py
|
1
|
import django.dispatch
"""
Use the job function you're executing as the `sender`
(though we don't use it currently)
"""
new_artefact = django.dispatch.Signal(providing_args=['file', 'name'])
|
boompieman/iim_project
|
refs/heads/master
|
project_python2/lib/python2.7/site-packages/tornado/queues.py
|
78
|
# Copyright 2015 The Tornado Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, with_statement
__all__ = ['Queue', 'PriorityQueue', 'LifoQueue', 'QueueFull', 'QueueEmpty']
import collections
import heapq
from tornado import gen, ioloop
from tornado.concurrent import Future
from tornado.locks import Event
class QueueEmpty(Exception):
"""Raised by `.Queue.get_nowait` when the queue has no items."""
pass
class QueueFull(Exception):
"""Raised by `.Queue.put_nowait` when a queue is at its maximum size."""
pass
def _set_timeout(future, timeout):
if timeout:
def on_timeout():
future.set_exception(gen.TimeoutError())
io_loop = ioloop.IOLoop.current()
timeout_handle = io_loop.add_timeout(timeout, on_timeout)
future.add_done_callback(
lambda _: io_loop.remove_timeout(timeout_handle))
class _QueueIterator(object):
def __init__(self, q):
self.q = q
def __anext__(self):
return self.q.get()
class Queue(object):
"""Coordinate producer and consumer coroutines.
If maxsize is 0 (the default) the queue size is unbounded.
.. testcode::
from tornado import gen
from tornado.ioloop import IOLoop
from tornado.queues import Queue
q = Queue(maxsize=2)
@gen.coroutine
def consumer():
while True:
item = yield q.get()
try:
print('Doing work on %s' % item)
yield gen.sleep(0.01)
finally:
q.task_done()
@gen.coroutine
def producer():
for item in range(5):
yield q.put(item)
print('Put %s' % item)
@gen.coroutine
def main():
# Start consumer without waiting (since it never finishes).
IOLoop.current().spawn_callback(consumer)
yield producer() # Wait for producer to put all tasks.
yield q.join() # Wait for consumer to finish all tasks.
print('Done')
IOLoop.current().run_sync(main)
.. testoutput::
Put 0
Put 1
Doing work on 0
Put 2
Doing work on 1
Put 3
Doing work on 2
Put 4
Doing work on 3
Doing work on 4
Done
In Python 3.5, `Queue` implements the async iterator protocol, so
``consumer()`` could be rewritten as::
async def consumer():
async for item in q:
try:
print('Doing work on %s' % item)
yield gen.sleep(0.01)
finally:
q.task_done()
.. versionchanged:: 4.3
Added ``async for`` support in Python 3.5.
"""
def __init__(self, maxsize=0):
if maxsize is None:
raise TypeError("maxsize can't be None")
if maxsize < 0:
raise ValueError("maxsize can't be negative")
self._maxsize = maxsize
self._init()
self._getters = collections.deque([]) # Futures.
self._putters = collections.deque([]) # Pairs of (item, Future).
self._unfinished_tasks = 0
self._finished = Event()
self._finished.set()
@property
def maxsize(self):
"""Number of items allowed in the queue."""
return self._maxsize
def qsize(self):
"""Number of items in the queue."""
return len(self._queue)
def empty(self):
return not self._queue
def full(self):
if self.maxsize == 0:
return False
else:
return self.qsize() >= self.maxsize
def put(self, item, timeout=None):
"""Put an item into the queue, perhaps waiting until there is room.
Returns a Future, which raises `tornado.gen.TimeoutError` after a
timeout.
"""
try:
self.put_nowait(item)
except QueueFull:
future = Future()
self._putters.append((item, future))
_set_timeout(future, timeout)
return future
else:
return gen._null_future
def put_nowait(self, item):
"""Put an item into the queue without blocking.
If no free slot is immediately available, raise `QueueFull`.
"""
self._consume_expired()
if self._getters:
assert self.empty(), "queue non-empty, why are getters waiting?"
getter = self._getters.popleft()
self.__put_internal(item)
getter.set_result(self._get())
elif self.full():
raise QueueFull
else:
self.__put_internal(item)
def get(self, timeout=None):
"""Remove and return an item from the queue.
Returns a Future which resolves once an item is available, or raises
`tornado.gen.TimeoutError` after a timeout.
"""
future = Future()
try:
future.set_result(self.get_nowait())
except QueueEmpty:
self._getters.append(future)
_set_timeout(future, timeout)
return future
def get_nowait(self):
"""Remove and return an item from the queue without blocking.
Return an item if one is immediately available, else raise
`QueueEmpty`.
"""
self._consume_expired()
if self._putters:
assert self.full(), "queue not full, why are putters waiting?"
item, putter = self._putters.popleft()
self.__put_internal(item)
putter.set_result(None)
return self._get()
elif self.qsize():
return self._get()
else:
raise QueueEmpty
def task_done(self):
"""Indicate that a formerly enqueued task is complete.
Used by queue consumers. For each `.get` used to fetch a task, a
subsequent call to `.task_done` tells the queue that the processing
on the task is complete.
If a `.join` is blocking, it resumes when all items have been
processed; that is, when every `.put` is matched by a `.task_done`.
Raises `ValueError` if called more times than `.put`.
"""
if self._unfinished_tasks <= 0:
raise ValueError('task_done() called too many times')
self._unfinished_tasks -= 1
if self._unfinished_tasks == 0:
self._finished.set()
def join(self, timeout=None):
"""Block until all items in the queue are processed.
Returns a Future, which raises `tornado.gen.TimeoutError` after a
timeout.
"""
return self._finished.wait(timeout)
@gen.coroutine
def __aiter__(self):
return _QueueIterator(self)
# These three are overridable in subclasses.
def _init(self):
self._queue = collections.deque()
def _get(self):
return self._queue.popleft()
def _put(self, item):
self._queue.append(item)
# End of the overridable methods.
def __put_internal(self, item):
self._unfinished_tasks += 1
self._finished.clear()
self._put(item)
def _consume_expired(self):
# Remove timed-out waiters.
while self._putters and self._putters[0][1].done():
self._putters.popleft()
while self._getters and self._getters[0].done():
self._getters.popleft()
def __repr__(self):
return '<%s at %s %s>' % (
type(self).__name__, hex(id(self)), self._format())
def __str__(self):
return '<%s %s>' % (type(self).__name__, self._format())
def _format(self):
result = 'maxsize=%r' % (self.maxsize, )
if getattr(self, '_queue', None):
result += ' queue=%r' % self._queue
if self._getters:
result += ' getters[%s]' % len(self._getters)
if self._putters:
result += ' putters[%s]' % len(self._putters)
if self._unfinished_tasks:
result += ' tasks=%s' % self._unfinished_tasks
return result
class PriorityQueue(Queue):
"""A `.Queue` that retrieves entries in priority order, lowest first.
Entries are typically tuples like ``(priority number, data)``.
.. testcode::
from tornado.queues import PriorityQueue
q = PriorityQueue()
q.put((1, 'medium-priority item'))
q.put((0, 'high-priority item'))
q.put((10, 'low-priority item'))
print(q.get_nowait())
print(q.get_nowait())
print(q.get_nowait())
.. testoutput::
(0, 'high-priority item')
(1, 'medium-priority item')
(10, 'low-priority item')
"""
def _init(self):
self._queue = []
def _put(self, item):
heapq.heappush(self._queue, item)
def _get(self):
return heapq.heappop(self._queue)
class LifoQueue(Queue):
"""A `.Queue` that retrieves the most recently put items first.
.. testcode::
from tornado.queues import LifoQueue
q = LifoQueue()
q.put(3)
q.put(2)
q.put(1)
print(q.get_nowait())
print(q.get_nowait())
print(q.get_nowait())
.. testoutput::
1
2
3
"""
def _init(self):
self._queue = []
def _put(self, item):
self._queue.append(item)
def _get(self):
return self._queue.pop()
|
plish/Trolly
|
refs/heads/master
|
trolly/exceptions.py
|
2
|
class ResourceUnavailable(Exception):
'''
Exception representing a failed request to a resource
'''
def __init__(self, message, http_response):
super(ResourceUnavailable, self).__init__()
self.message = message
self.status = http_response.status
def __str__(self):
return "Resource unavailable: %s (HTTP status: %s)" % (self.message,
self.status)
class Unauthorised(Exception):
'''
This is raised if you don't have access to the requested object
'''
def __init__(self, message, http_response):
super(Unauthorised, self).__init__()
self.message = message
self.status = http_response.status
def __str__(self):
return "Unauthorised access to resource: %s (HTTP status: %s)" % (
self.message, self.status)
|
ahmadassaf/zulip
|
refs/heads/master
|
zerver/migrations/0022_subscription_pin_to_top.py
|
41
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('zerver', '0021_migrate_attachment_data'),
]
operations = [
migrations.AddField(
model_name='subscription',
name='pin_to_top',
field=models.BooleanField(default=False),
),
]
|
dfang/odoo
|
refs/heads/10.0
|
odoo/tools/graph.py
|
71
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import operator
import math
class graph(object):
def __init__(self, nodes, transitions, no_ancester=None):
"""Initialize graph's object
@param nodes list of ids of nodes in the graph
@param transitions list of edges in the graph in the form (source_node, destination_node)
@param no_ancester list of nodes with no incoming edges
"""
self.nodes = nodes or []
self.edges = transitions or []
self.no_ancester = no_ancester or {}
trans = {}
for t in transitions:
trans.setdefault(t[0], [])
trans[t[0]].append(t[1])
self.transitions = trans
self.result = {}
def init_rank(self):
"""Computes rank of the nodes of the graph by finding initial feasible tree
"""
self.edge_wt = {}
for link in self.links:
self.edge_wt[link] = self.result[link[1]]['x'] - self.result[link[0]]['x']
tot_node = len(self.partial_order)
#do until all the nodes in the component are searched
while self.tight_tree()<tot_node:
list_node = []
list_edge = []
for node in self.nodes:
if node not in self.reachable_nodes:
list_node.append(node)
for edge in self.edge_wt:
if edge not in self.tree_edges:
list_edge.append(edge)
slack = 100
for edge in list_edge:
if ((edge[0] in self.reachable_nodes and edge[1] not in self.reachable_nodes) or
(edge[1] in self.reachable_nodes and edge[0] not in self.reachable_nodes)):
if slack > self.edge_wt[edge]-1:
slack = self.edge_wt[edge]-1
new_edge = edge
if new_edge[0] not in self.reachable_nodes:
delta = -(self.edge_wt[new_edge]-1)
else:
delta = self.edge_wt[new_edge]-1
for node in self.result:
if node in self.reachable_nodes:
self.result[node]['x'] += delta
for edge in self.edge_wt:
self.edge_wt[edge] = self.result[edge[1]]['x'] - self.result[edge[0]]['x']
self.init_cutvalues()
def tight_tree(self):
self.reachable_nodes = []
self.tree_edges = []
self.reachable_node(self.start)
return len(self.reachable_nodes)
def reachable_node(self, node):
"""Find the nodes of the graph which are only 1 rank apart from each other
"""
if node not in self.reachable_nodes:
self.reachable_nodes.append(node)
for edge in self.edge_wt:
if edge[0]==node:
if self.edge_wt[edge]==1:
self.tree_edges.append(edge)
if edge[1] not in self.reachable_nodes:
self.reachable_nodes.append(edge[1])
self.reachable_node(edge[1])
def init_cutvalues(self):
"""Initailize cut values of edges of the feasible tree.
Edges with negative cut-values are removed from the tree to optimize rank assignment
"""
self.cut_edges = {}
self.head_nodes = []
i=0
for edge in self.tree_edges:
self.head_nodes = []
rest_edges = []
rest_edges += self.tree_edges
del rest_edges[i]
self.head_component(self.start, rest_edges)
i+=1
positive = 0
negative = 0
for source_node in self.transitions:
if source_node in self.head_nodes:
for dest_node in self.transitions[source_node]:
if dest_node not in self.head_nodes:
negative+=1
else:
for dest_node in self.transitions[source_node]:
if dest_node in self.head_nodes:
positive+=1
self.cut_edges[edge] = positive - negative
def head_component(self, node, rest_edges):
"""Find nodes which are reachable from the starting node, after removing an edge
"""
if node not in self.head_nodes:
self.head_nodes.append(node)
for edge in rest_edges:
if edge[0]==node:
self.head_component(edge[1],rest_edges)
def process_ranking(self, node, level=0):
"""Computes initial feasible ranking after making graph acyclic with depth-first search
"""
if node not in self.result:
self.result[node] = {'y': None, 'x':level, 'mark':0}
else:
if level > self.result[node]['x']:
self.result[node]['x'] = level
if self.result[node]['mark']==0:
self.result[node]['mark'] = 1
for sec_end in self.transitions.get(node, []):
self.process_ranking(sec_end, level+1)
def make_acyclic(self, parent, node, level, tree):
"""Computes Partial-order of the nodes with depth-first search
"""
if node not in self.partial_order:
self.partial_order[node] = {'level':level, 'mark':0}
if parent:
tree.append((parent, node))
if self.partial_order[node]['mark']==0:
self.partial_order[node]['mark'] = 1
for sec_end in self.transitions.get(node, []):
self.links.append((node, sec_end))
self.make_acyclic(node, sec_end, level+1, tree)
return tree
def rev_edges(self, tree):
"""reverse the direction of the edges whose source-node-partail_order> destination-node-partail_order
to make the graph acyclic
"""
Is_Cyclic = False
i=0
for link in self.links:
src = link[0]
des = link[1]
edge_len = self.partial_order[des]['level'] - self.partial_order[src]['level']
if edge_len < 0:
del self.links[i]
self.links.insert(i, (des, src))
self.transitions[src].remove(des)
self.transitions.setdefault(des, []).append(src)
Is_Cyclic = True
elif math.fabs(edge_len) > 1:
Is_Cyclic = True
i += 1
return Is_Cyclic
def exchange(self, e, f):
"""Exchange edges to make feasible-tree optimized
:param e: edge with negative cut-value
:param f: new edge with minimum slack-value
"""
del self.tree_edges[self.tree_edges.index(e)]
self.tree_edges.append(f)
self.init_cutvalues()
def enter_edge(self, edge):
"""Finds a new_edge with minimum slack value to replace an edge with negative cut-value
@param edge edge with negative cut-value
"""
self.head_nodes = []
rest_edges = []
rest_edges += self.tree_edges
del rest_edges[rest_edges.index(edge)]
self.head_component(self.start, rest_edges)
if edge[1] in self.head_nodes:
l = []
for node in self.result:
if node not in self.head_nodes:
l.append(node)
self.head_nodes = l
slack = 100
new_edge = edge
for source_node in self.transitions:
if source_node in self.head_nodes:
for dest_node in self.transitions[source_node]:
if dest_node not in self.head_nodes:
if slack>(self.edge_wt[edge]-1):
slack = self.edge_wt[edge]-1
new_edge = (source_node, dest_node)
return new_edge
def leave_edge(self):
"""Returns the edge with negative cut_value(if exists)
"""
if self.critical_edges:
for edge in self.critical_edges:
self.cut_edges[edge] = 0
for edge in self.cut_edges:
if self.cut_edges[edge]<0:
return edge
return None
def finalize_rank(self, node, level):
self.result[node]['x'] = level
for destination in self.optimal_edges.get(node, []):
self.finalize_rank(destination, level+1)
def normalize(self):
"""The ranks are normalized by setting the least rank to zero.
"""
least_rank = min(map(lambda x: x['x'], self.result.values()))
if least_rank!=0:
for node in self.result:
self.result[node]['x']-=least_rank
def make_chain(self):
"""Edges between nodes more than one rank apart are replaced by chains of unit
length edges between temporary nodes.
"""
for edge in self.edge_wt:
if self.edge_wt[edge]>1:
self.transitions[edge[0]].remove(edge[1])
start = self.result[edge[0]]['x']
end = self.result[edge[1]]['x']
for rank in range(start+1, end):
if not self.result.get((rank, 'temp'), False):
self.result[(rank, 'temp')] = {'y': None, 'x': rank, 'mark': 0}
for rank in range(start, end):
if start==rank:
self.transitions[edge[0]].append((rank+1, 'temp'))
elif rank==end-1:
self.transitions.setdefault((rank, 'temp'), []).append(edge[1])
else:
self.transitions.setdefault((rank, 'temp'), []).append((rank+1, 'temp'))
def init_order(self, node, level):
"""Initialize orders the nodes in each rank with depth-first search
"""
if not self.result[node]['y']:
self.result[node]['y'] = self.order[level]
self.order[level] += 1
for sec_end in self.transitions.get(node, []):
if node!=sec_end:
self.init_order(sec_end, self.result[sec_end]['x'])
def order_heuristic(self):
for i in range(12):
self.wmedian()
def wmedian(self):
"""Applies median heuristic to find optimzed order of the nodes with in their ranks
"""
for level in self.levels:
node_median = []
nodes = self.levels[level]
for node in nodes:
node_median.append((node, self.median_value(node, level-1)))
sort_list = sorted(node_median, key=operator.itemgetter(1))
new_list = [tuple[0] for tuple in sort_list]
self.levels[level] = new_list
order = 0
for node in nodes:
self.result[node]['y'] = order
order +=1
def median_value(self, node, adj_rank):
"""Returns median value of a vertex , defined as the median position of the adjacent vertices
@param node node to process
@param adj_rank rank 1 less than the node's rank
"""
adj_nodes = self.adj_position(node, adj_rank)
l = len(adj_nodes)
m = l/2
if l==0:
return -1.0
elif l%2 == 1:
return adj_nodes[m]#median of the middle element
elif l==2:
return (adj_nodes[0]+adj_nodes[1])/2
else:
left = adj_nodes[m-1] - adj_nodes[0]
right = adj_nodes[l-1] - adj_nodes[m]
return ((adj_nodes[m-1]*right) + (adj_nodes[m]*left))/(left+right)
def adj_position(self, node, adj_rank):
"""Returns list of the present positions of the nodes adjacent to node in the given adjacent rank.
@param node node to process
@param adj_rank rank 1 less than the node's rank
"""
pre_level_nodes = self.levels.get(adj_rank, [])
adj_nodes = []
if pre_level_nodes:
for src in pre_level_nodes:
if self.transitions.get(src) and node in self.transitions[src]:
adj_nodes.append(self.result[src]['y'])
return adj_nodes
def preprocess_order(self):
levels = {}
for r in self.partial_order:
l = self.result[r]['x']
levels.setdefault(l,[])
levels[l].append(r)
self.levels = levels
def graph_order(self):
"""Finds actual-order of the nodes with respect to maximum number of nodes in a rank in component
"""
mid_pos = 0.0
max_level = max(map(lambda x: len(x), self.levels.values()))
for level in self.levels:
if level:
no = len(self.levels[level])
factor = (max_level - no) * 0.10
list = self.levels[level]
list.reverse()
if no%2==0:
first_half = list[no/2:]
factor = -factor
else:
first_half = list[no/2+1:]
if max_level==1:#for the case when horizontal graph is there
self.result[list[no/2]]['y'] = mid_pos + (self.result[list[no/2]]['x']%2 * 0.5)
else:
self.result[list[no/2]]['y'] = mid_pos + factor
last_half = list[:no/2]
i=1
for node in first_half:
self.result[node]['y'] = mid_pos - (i + factor)
i += 1
i=1
for node in last_half:
self.result[node]['y'] = mid_pos + (i + factor)
i += 1
else:
self.max_order += max_level+1
mid_pos = self.result[self.start]['y']
def tree_order(self, node, last=0):
mid_pos = self.result[node]['y']
l = self.transitions.get(node, [])
l.reverse()
no = len(l)
rest = no%2
first_half = l[no/2+rest:]
last_half = l[:no/2]
for i, child in enumerate(first_half):
self.result[child]['y'] = mid_pos - (i+1 - (0 if rest else 0.5))
if self.transitions.get(child, False):
if last:
self.result[child]['y'] = last + len(self.transitions[child])/2 + 1
last = self.tree_order(child, last)
if rest:
mid_node = l[no/2]
self.result[mid_node]['y'] = mid_pos
if self.transitions.get(mid_node, False):
if last:
self.result[mid_node]['y'] = last + len(self.transitions[mid_node])/2 + 1
if node!=mid_node:
last = self.tree_order(mid_node)
else:
if last:
self.result[mid_node]['y'] = last + 1
self.result[node]['y'] = self.result[mid_node]['y']
mid_pos = self.result[node]['y']
i=1
last_child = None
for child in last_half:
self.result[child]['y'] = mid_pos + (i - (0 if rest else 0.5))
last_child = child
i += 1
if self.transitions.get(child, False):
if last:
self.result[child]['y'] = last + len(self.transitions[child])/2 + 1
if node!=child:
last = self.tree_order(child, last)
if last_child:
last = self.result[last_child]['y']
return last
def process_order(self):
"""Finds actual-order of the nodes with respect to maximum number of nodes in a rank in component
"""
if self.Is_Cyclic:
max_level = max(map(lambda x: len(x), self.levels.values()))
if max_level%2:
self.result[self.start]['y'] = (max_level+1)/2 + self.max_order + (self.max_order and 1)
else:
self.result[self.start]['y'] = max_level /2 + self.max_order + (self.max_order and 1)
self.graph_order()
else:
self.result[self.start]['y'] = 0
self.tree_order(self.start, 0)
min_order = math.fabs(min(map(lambda x: x['y'], self.result.values())))
index = self.start_nodes.index(self.start)
same = False
roots = []
if index>0:
for start in self.start_nodes[:index]:
same = True
for edge in self.tree_list[start][1:]:
if edge in self.tree_list[self.start]:
continue
else:
same = False
break
if same:
roots.append(start)
if roots:
min_order += self.max_order
else:
min_order += self.max_order + 1
for level in self.levels:
for node in self.levels[level]:
self.result[node]['y'] += min_order
if roots:
roots.append(self.start)
one_level_el = self.tree_list[self.start][0][1]
base = self.result[one_level_el]['y']# * 2 / (index + 2)
no = len(roots)
first_half = roots[:no/2]
if no%2==0:
last_half = roots[no/2:]
else:
last_half = roots[no/2+1:]
factor = -math.floor(no/2)
for start in first_half:
self.result[start]['y'] = base + factor
factor += 1
if no%2:
self.result[roots[no/2]]['y'] = base + factor
factor +=1
for start in last_half:
self.result[start]['y'] = base + factor
factor += 1
self.max_order = max(map(lambda x: x['y'], self.result.values()))
def find_starts(self):
"""Finds other start nodes of the graph in the case when graph is disconneted
"""
rem_nodes = []
for node in self.nodes:
if not self.partial_order.get(node):
rem_nodes.append(node)
cnt = 0
while True:
if len(rem_nodes)==1:
self.start_nodes.append(rem_nodes[0])
break
else:
count = 0
new_start = rem_nodes[0]
largest_tree = []
for node in rem_nodes:
self.partial_order = {}
tree = self.make_acyclic(None, node, 0, [])
if len(tree)+1 > count:
count = len(tree) + 1
new_start = node
largest_tree = tree
else:
if not largest_tree:
new_start = rem_nodes[0]
rem_nodes.remove(new_start)
self.start_nodes.append(new_start)
for edge in largest_tree:
if edge[0] in rem_nodes:
rem_nodes.remove(edge[0])
if edge[1] in rem_nodes:
rem_nodes.remove(edge[1])
if not rem_nodes:
break
def rank(self):
"""Finds the optimized rank of the nodes using Network-simplex algorithm
"""
self.levels = {}
self.critical_edges = []
self.partial_order = {}
self.links = []
self.Is_Cyclic = False
self.tree_list[self.start] = self.make_acyclic(None, self.start, 0, [])
self.Is_Cyclic = self.rev_edges(self.tree_list[self.start])
self.process_ranking(self.start)
self.init_rank()
#make cut values of all tree edges to 0 to optimize feasible tree
e = self.leave_edge()
while e :
f = self.enter_edge(e)
if e==f:
self.critical_edges.append(e)
else:
self.exchange(e,f)
e = self.leave_edge()
#finalize rank using optimum feasible tree
# self.optimal_edges = {}
# for edge in self.tree_edges:
# source = self.optimal_edges.setdefault(edge[0], [])
# source.append(edge[1])
# self.finalize_rank(self.start, 0)
#normalization
self.normalize()
for edge in self.edge_wt:
self.edge_wt[edge] = self.result[edge[1]]['x'] - self.result[edge[0]]['x']
def order_in_rank(self):
"""Finds optimized order of the nodes within their ranks using median heuristic
"""
self.make_chain()
self.preprocess_order()
self.order = {}
max_rank = max(map(lambda x: x, self.levels.keys()))
for i in range(max_rank+1):
self.order[i] = 0
self.init_order(self.start, self.result[self.start]['x'])
for level in self.levels:
self.levels[level].sort(lambda x, y: cmp(self.result[x]['y'], self.result[y]['y']))
self.order_heuristic()
self.process_order()
def process(self, starting_node):
"""Process the graph to find ranks and order of the nodes
@param starting_node node from where to start the graph search
"""
self.start_nodes = starting_node or []
self.partial_order = {}
self.links = []
self.tree_list = {}
if self.nodes:
if self.start_nodes:
#add dummy edges to the nodes which does not have any incoming edges
tree = self.make_acyclic(None, self.start_nodes[0], 0, [])
for node in self.no_ancester:
for sec_node in self.transitions.get(node, []):
if sec_node in self.partial_order.keys():
self.transitions[self.start_nodes[0]].append(node)
break
self.partial_order = {}
tree = self.make_acyclic(None, self.start_nodes[0], 0, [])
# if graph is disconnected or no start-node is given
#than to find starting_node for each component of the node
if len(self.nodes) > len(self.partial_order):
self.find_starts()
self.max_order = 0
#for each component of the graph find ranks and order of the nodes
for s in self.start_nodes:
self.start = s
self.rank() # First step:Netwoek simplex algorithm
self.order_in_rank() #Second step: ordering nodes within ranks
def __str__(self):
result = ''
for l in self.levels:
result += 'PosY: ' + str(l) + '\n'
for node in self.levels[l]:
result += '\tPosX: '+ str(self.result[node]['y']) + ' - Node:' + str(node) + "\n"
return result
def scale(self, maxx, maxy, nwidth=0, nheight=0, margin=20):
"""Computes actual co-ordiantes of the nodes
"""
#for flat edges ie. source an destination nodes are on the same rank
for src in self.transitions:
for des in self.transitions[src]:
if self.result[des]['x'] - self.result[src]['x'] == 0:
self.result[src]['x'] += 0.08
self.result[des]['x'] -= 0.08
factorX = maxx + nheight
factorY = maxy + nwidth
for node in self.result:
self.result[node]['y'] = (self.result[node]['y']) * factorX + margin
self.result[node]['x'] = (self.result[node]['x']) * factorY + margin
def result_get(self):
return self.result
if __name__=='__main__':
starting_node = ['profile'] # put here nodes with flow_start=True
nodes = ['project','account','hr','base','product','mrp','test','profile']
transitions = [
('profile','mrp'),
('mrp','project'),
('project','product'),
('mrp','hr'),
('mrp','test'),
('project','account'),
('project','hr'),
('product','base'),
('account','product'),
('account','test'),
('account','base'),
('hr','base'),
('test','base')
]
radius = 20
g = graph(nodes, transitions)
g.process(starting_node)
g.scale(radius*3,radius*3, radius, radius)
from PIL import Image
from PIL import ImageDraw
img = Image.new("RGB", (800, 600), "#ffffff")
draw = ImageDraw.Draw(img)
result = g.result_get()
node_res = {}
for node in nodes:
node_res[node] = result[node]
for name,node in node_res.items():
draw.arc( (int(node['y']-radius), int(node['x']-radius),int(node['y']+radius), int(node['x']+radius) ), 0, 360, (128,128,128))
draw.text( (int(node['y']), int(node['x'])), str(name), (128,128,128))
for t in transitions:
draw.line( (int(node_res[t[0]]['y']), int(node_res[t[0]]['x']),int(node_res[t[1]]['y']),int(node_res[t[1]]['x'])),(128,128,128) )
img.save("graph.png", "PNG")
|
KrzysztofStachanczyk/Sensors-WWW-website
|
refs/heads/master
|
www/env/lib/python2.7/site-packages/django/contrib/gis/geoip/libgeoip.py
|
479
|
import os
from ctypes import CDLL
from ctypes.util import find_library
from django.conf import settings
# Creating the settings dictionary with any settings, if needed.
GEOIP_SETTINGS = {key: getattr(settings, key)
for key in ('GEOIP_PATH', 'GEOIP_LIBRARY_PATH', 'GEOIP_COUNTRY', 'GEOIP_CITY')
if hasattr(settings, key)}
lib_path = GEOIP_SETTINGS.get('GEOIP_LIBRARY_PATH')
# The shared library for the GeoIP C API. May be downloaded
# from http://www.maxmind.com/download/geoip/api/c/
if lib_path:
lib_name = None
else:
# TODO: Is this really the library name for Windows?
lib_name = 'GeoIP'
# Getting the path to the GeoIP library.
if lib_name:
lib_path = find_library(lib_name)
if lib_path is None:
raise RuntimeError('Could not find the GeoIP library (tried "%s"). '
'Try setting GEOIP_LIBRARY_PATH in your settings.' % lib_name)
lgeoip = CDLL(lib_path)
# Getting the C `free` for the platform.
if os.name == 'nt':
libc = CDLL('msvcrt')
else:
libc = CDLL(None)
free = libc.free
|
saurabh6790/test-erp
|
refs/heads/develop
|
erpnext/hr/doctype/leave_allocation/leave_allocation.py
|
43
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cint, flt
from frappe import _
from frappe.model.document import Document
from erpnext.hr.utils import set_employee_name
class LeaveAllocation(Document):
def validate(self):
self.validate_new_leaves_allocated_value()
self.check_existing_leave_allocation()
if not self.total_leaves_allocated:
self.total_leaves_allocated = self.new_leaves_allocated
set_employee_name(self)
def on_update_after_submit(self):
self.validate_new_leaves_allocated_value()
def on_update(self):
self.get_total_allocated_leaves()
def validate_new_leaves_allocated_value(self):
"""validate that leave allocation is in multiples of 0.5"""
if flt(self.new_leaves_allocated) % 0.5:
frappe.throw(_("Leaves must be allocated in multiples of 0.5"))
def check_existing_leave_allocation(self):
"""check whether leave for same type is already allocated or not"""
leave_allocation = frappe.db.sql("""select name from `tabLeave Allocation`
where employee=%s and leave_type=%s and fiscal_year=%s and docstatus=1""",
(self.employee, self.leave_type, self.fiscal_year))
if leave_allocation:
frappe.msgprint(_("Leaves for type {0} already allocated for Employee {1} for Fiscal Year {0}").format(self.leave_type,
self.employee, self.fiscal_year))
frappe.throw('<a href="#Form/Leave Allocation/{0}">{0}</a>'.format(leave_allocation[0][0]))
def get_leave_bal(self, prev_fyear):
return self.get_leaves_allocated(prev_fyear) - self.get_leaves_applied(prev_fyear)
def get_leaves_applied(self, fiscal_year):
leaves_applied = frappe.db.sql("""select SUM(ifnull(total_leave_days, 0))
from `tabLeave Application` where employee=%s and leave_type=%s
and fiscal_year=%s and docstatus=1""",
(self.employee, self.leave_type, fiscal_year))
return leaves_applied and flt(leaves_applied[0][0]) or 0
def get_leaves_allocated(self, fiscal_year):
leaves_allocated = frappe.db.sql("""select SUM(ifnull(total_leaves_allocated, 0))
from `tabLeave Allocation` where employee=%s and leave_type=%s
and fiscal_year=%s and docstatus=1 and name!=%s""",
(self.employee, self.leave_type, fiscal_year, self.name))
return leaves_allocated and flt(leaves_allocated[0][0]) or 0
def allow_carry_forward(self):
"""check whether carry forward is allowed or not for this leave type"""
cf = frappe.db.sql("""select is_carry_forward from `tabLeave Type` where name = %s""",
self.leave_type)
cf = cf and cint(cf[0][0]) or 0
if not cf:
frappe.db.set(self,'carry_forward',0)
frappe.throw(_("Cannot carry forward {0}").format(self.leave_type))
def get_carry_forwarded_leaves(self):
if self.carry_forward:
self.allow_carry_forward()
prev_fiscal_year = frappe.db.sql("""select name from `tabFiscal Year`
where year_start_date = (select date_add(year_start_date, interval -1 year)
from `tabFiscal Year` where name=%s)
order by name desc limit 1""", self.fiscal_year)
prev_fiscal_year = prev_fiscal_year and prev_fiscal_year[0][0] or ''
prev_bal = 0
if prev_fiscal_year and cint(self.carry_forward) == 1:
prev_bal = self.get_leave_bal(prev_fiscal_year)
ret = {
'carry_forwarded_leaves': prev_bal,
'total_leaves_allocated': flt(prev_bal) + flt(self.new_leaves_allocated)
}
return ret
def get_total_allocated_leaves(self):
leave_det = self.get_carry_forwarded_leaves()
frappe.db.set(self,'carry_forwarded_leaves',flt(leave_det['carry_forwarded_leaves']))
frappe.db.set(self,'total_leaves_allocated',flt(leave_det['total_leaves_allocated']))
|
StephenWeber/ansible
|
refs/heads/devel
|
lib/ansible/modules/system/gluster_volume.py
|
11
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Taneli Leppä <taneli@crasman.fi>
#
# This file is part of Ansible (sort of)
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = """
module: gluster_volume
short_description: Manage GlusterFS volumes
description:
- Create, remove, start, stop and tune GlusterFS volumes
version_added: "1.9"
options:
name:
required: true
description:
- The volume name
state:
required: true
choices: [ 'present', 'absent', 'started', 'stopped' ]
description:
- Use present/absent ensure if a volume exists or not,
use started/stopped to control it's availability.
cluster:
required: false
default: null
description:
- List of hosts to use for probing and brick setup
host:
required: false
default: null
description:
- Override local hostname (for peer probing purposes)
replicas:
required: false
default: null
description:
- Replica count for volume
arbiter:
required: false
default: null
description:
- Arbiter count for volume
version_added: "2.3"
stripes:
required: false
default: null
description:
- Stripe count for volume
disperses:
required: false
default: null
description:
- Disperse count for volume
version_added: "2.2"
redundancies:
required: false
default: null
description:
- Redundancy count for volume
version_added: "2.2"
transport:
required: false
choices: [ 'tcp', 'rdma', 'tcp,rdma' ]
default: 'tcp'
description:
- Transport type for volume
bricks:
required: false
default: null
description:
- Brick paths on servers. Multiple brick paths can be separated by commas
aliases: ['brick']
start_on_create:
choices: [ 'yes', 'no']
required: false
default: 'yes'
description:
- Controls whether the volume is started after creation or not, defaults to yes
rebalance:
choices: [ 'yes', 'no']
required: false
default: 'no'
description:
- Controls whether the cluster is rebalanced after changes
directory:
required: false
default: null
description:
- Directory for limit-usage
options:
required: false
default: null
description:
- A dictionary/hash with options/settings for the volume
quota:
required: false
default: null
description:
- Quota value for limit-usage (be sure to use 10.0MB instead of 10MB, see quota list)
force:
required: false
default: null
description:
- If brick is being created in the root partition, module will fail.
Set force to true to override this behaviour
notes:
- "Requires cli tools for GlusterFS on servers"
- "Will add new bricks, but not remove them"
author: "Taneli Leppä (@rosmo)"
"""
EXAMPLES = """
- name: create gluster volume
gluster_volume:
state: present
name: test1
bricks: /bricks/brick1/g1
rebalance: yes
cluster:
- 192.0.2.10
- 192.0.2.11
run_once: true
- name: tune
gluster_volume:
state: present
name: test1
options:
performance.cache-size: 256MB
- name: start gluster volume
gluster_volume:
state: started
name: test1
- name: limit usage
gluster_volume:
state: present
name: test1
directory: /foo
quota: 20.0MB
- name: stop gluster volume
gluster_volume:
state: stopped
name: test1
- name: remove gluster volume
gluster_volume:
state: absent
name: test1
- name: create gluster volume with multiple bricks
gluster_volume:
state: present
name: test2
bricks: /bricks/brick1/g2,/bricks/brick2/g2
cluster:
- 192.0.2.10
- 192.0.2.11
run_once: true
"""
import shutil
import time
import socket
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.basic import *
glusterbin = ''
def run_gluster(gargs, **kwargs):
global glusterbin
global module
args = [glusterbin]
args.extend(gargs)
try:
rc, out, err = module.run_command(args, **kwargs)
if rc != 0:
module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' % (' '.join(args), rc, out or err))
except Exception:
e = get_exception()
module.fail_json(msg='error running gluster (%s) command: %s' % (' '.join(args), str(e)))
return out
def run_gluster_nofail(gargs, **kwargs):
global glusterbin
global module
args = [glusterbin]
args.extend(gargs)
rc, out, err = module.run_command(args, **kwargs)
if rc != 0:
return None
return out
def run_gluster_yes(gargs):
global glusterbin
global module
args = [glusterbin]
args.extend(gargs)
rc, out, err = module.run_command(args, data='y\n')
if rc != 0:
module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' % (' '.join(args), rc, out or err))
return out
def get_peers():
out = run_gluster([ 'peer', 'status'])
i = 0
peers = {}
hostname = None
uuid = None
state = None
shortNames = False
for row in out.split('\n'):
if ': ' in row:
key, value = row.split(': ')
if key.lower() == 'hostname':
hostname = value
shortNames = False
if key.lower() == 'uuid':
uuid = value
if key.lower() == 'state':
state = value
peers[hostname] = [uuid, state]
elif row.lower() == 'other names:':
shortNames = True
elif row != '' and shortNames is True:
peers[row] = [uuid, state]
elif row == '':
shortNames = False
return peers
def get_volumes():
out = run_gluster([ 'volume', 'info' ])
volumes = {}
volume = {}
for row in out.split('\n'):
if ': ' in row:
key, value = row.split(': ')
if key.lower() == 'volume name':
volume['name'] = value
volume['options'] = {}
volume['quota'] = False
if key.lower() == 'volume id':
volume['id'] = value
if key.lower() == 'status':
volume['status'] = value
if key.lower() == 'transport-type':
volume['transport'] = value
if value.lower().endswith(' (arbiter)'):
if not 'arbiters' in volume:
volume['arbiters'] = []
value = value[:-10]
volume['arbiters'].append(value)
if key.lower() != 'bricks' and key.lower()[:5] == 'brick':
if not 'bricks' in volume:
volume['bricks'] = []
volume['bricks'].append(value)
# Volume options
if '.' in key:
if not 'options' in volume:
volume['options'] = {}
volume['options'][key] = value
if key == 'features.quota' and value == 'on':
volume['quota'] = True
else:
if row.lower() != 'bricks:' and row.lower() != 'options reconfigured:':
if len(volume) > 0:
volumes[volume['name']] = volume
volume = {}
return volumes
def get_quotas(name, nofail):
quotas = {}
if nofail:
out = run_gluster_nofail([ 'volume', 'quota', name, 'list' ])
if not out:
return quotas
else:
out = run_gluster([ 'volume', 'quota', name, 'list' ])
for row in out.split('\n'):
if row[:1] == '/':
q = re.split('\s+', row)
quotas[q[0]] = q[1]
return quotas
def wait_for_peer(host):
for x in range(0, 4):
peers = get_peers()
if host in peers and peers[host][1].lower().find('peer in cluster') != -1:
return True
time.sleep(1)
return False
def probe(host, myhostname):
global module
out = run_gluster([ 'peer', 'probe', host ])
if out.find('localhost') == -1 and not wait_for_peer(host):
module.fail_json(msg='failed to probe peer %s on %s' % (host, myhostname))
changed = True
def probe_all_peers(hosts, peers, myhostname):
for host in hosts:
host = host.strip() # Clean up any extra space for exact comparison
if host not in peers:
probe(host, myhostname)
def create_volume(name, stripe, replica, arbiter, disperse, redundancy, transport, hosts, bricks, force):
args = [ 'volume', 'create' ]
args.append(name)
if stripe:
args.append('stripe')
args.append(str(stripe))
if replica:
args.append('replica')
args.append(str(replica))
if arbiter:
args.append('arbiter')
args.append(str(arbiter))
if disperse:
args.append('disperse')
args.append(str(disperse))
if redundancy:
args.append('redundancy')
args.append(str(redundancy))
args.append('transport')
args.append(transport)
for brick in bricks:
for host in hosts:
args.append(('%s:%s' % (host, brick)))
if force:
args.append('force')
run_gluster(args)
def start_volume(name):
run_gluster([ 'volume', 'start', name ])
def stop_volume(name):
run_gluster_yes([ 'volume', 'stop', name ])
def set_volume_option(name, option, parameter):
run_gluster([ 'volume', 'set', name, option, parameter ])
def add_bricks(name, new_bricks, stripe, replica, force):
args = [ 'volume', 'add-brick', name ]
if stripe:
args.append('stripe')
args.append(str(stripe))
if replica:
args.append('replica')
args.append(str(replica))
args.extend(new_bricks)
if force:
args.append('force')
run_gluster(args)
def do_rebalance(name):
run_gluster([ 'volume', 'rebalance', name, 'start' ])
def enable_quota(name):
run_gluster([ 'volume', 'quota', name, 'enable' ])
def set_quota(name, directory, value):
run_gluster([ 'volume', 'quota', name, 'limit-usage', directory, value ])
def main():
### MAIN ###
global module
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, default=None, aliases=['volume']),
state=dict(required=True, choices=[ 'present', 'absent', 'started', 'stopped', 'rebalanced' ]),
cluster=dict(required=False, default=None, type='list'),
host=dict(required=False, default=None),
stripes=dict(required=False, default=None, type='int'),
replicas=dict(required=False, default=None, type='int'),
arbiters=dict(required=False, default=None, type='int'),
disperses=dict(required=False, default=None, type='int'),
redundancies=dict(required=False, default=None, type='int'),
transport=dict(required=False, default='tcp', choices=[ 'tcp', 'rdma', 'tcp,rdma' ]),
bricks=dict(required=False, default=None, aliases=['brick']),
start_on_create=dict(required=False, default=True, type='bool'),
rebalance=dict(required=False, default=False, type='bool'),
options=dict(required=False, default={}, type='dict'),
quota=dict(required=False),
directory=dict(required=False, default=None),
force=dict(required=False, default=False, type='bool'),
)
)
global glusterbin
glusterbin = module.get_bin_path('gluster', True)
changed = False
action = module.params['state']
volume_name = module.params['name']
cluster= module.params['cluster']
brick_paths = module.params['bricks']
stripes = module.params['stripes']
replicas = module.params['replicas']
arbiters = module.params['arbiters']
disperses = module.params['disperses']
redundancies = module.params['redundancies']
transport = module.params['transport']
myhostname = module.params['host']
start_on_create = module.boolean(module.params['start_on_create'])
rebalance = module.boolean(module.params['rebalance'])
force = module.boolean(module.params['force'])
if not myhostname:
myhostname = socket.gethostname()
# Clean up if last element is empty. Consider that yml can look like this:
# cluster="{% for host in groups['glusterfs'] %}{{ hostvars[host]['private_ip'] }},{% endfor %}"
if cluster is not None and len(cluster) > 1 and cluster[-1] == '':
cluster = cluster[0:-1]
if cluster is None or cluster[0] == '':
cluster = [myhostname]
if brick_paths is not None and "," in brick_paths:
brick_paths = brick_paths.split(",")
else:
brick_paths = [brick_paths]
options = module.params['options']
quota = module.params['quota']
directory = module.params['directory']
# get current state info
peers = get_peers()
volumes = get_volumes()
quotas = {}
if volume_name in volumes and volumes[volume_name]['quota'] and volumes[volume_name]['status'].lower() == 'started':
quotas = get_quotas(volume_name, True)
# do the work!
if action == 'absent':
if volume_name in volumes:
if volumes[volume_name]['status'].lower() != 'stopped':
stop_volume(volume_name)
run_gluster_yes([ 'volume', 'delete', volume_name ])
changed = True
if action == 'present':
probe_all_peers(cluster, peers, myhostname)
# create if it doesn't exist
if volume_name not in volumes:
create_volume(volume_name, stripes, replicas, arbiters, disperses, redundancies, transport, cluster, brick_paths, force)
volumes = get_volumes()
changed = True
if volume_name in volumes:
if volumes[volume_name]['status'].lower() != 'started' and start_on_create:
start_volume(volume_name)
changed = True
# switch bricks
new_bricks = []
removed_bricks = []
all_bricks = []
for node in cluster:
for brick_path in brick_paths:
brick = '%s:%s' % (node, brick_path)
all_bricks.append(brick)
if brick not in volumes[volume_name]['bricks']:
new_bricks.append(brick)
# this module does not yet remove bricks, but we check those anyways
for brick in volumes[volume_name]['bricks']:
if brick not in all_bricks:
removed_bricks.append(brick)
if new_bricks:
add_bricks(volume_name, new_bricks, stripes, replicas, force)
changed = True
# handle quotas
if quota:
if not volumes[volume_name]['quota']:
enable_quota(volume_name)
quotas = get_quotas(volume_name, False)
if directory not in quotas or quotas[directory] != quota:
set_quota(volume_name, directory, quota)
changed = True
# set options
for option in options.keys():
if option not in volumes[volume_name]['options'] or volumes[volume_name]['options'][option] != options[option]:
set_volume_option(volume_name, option, options[option])
changed = True
else:
module.fail_json(msg='failed to create volume %s' % volume_name)
if action != 'delete' and volume_name not in volumes:
module.fail_json(msg='volume not found %s' % volume_name)
if action == 'started':
if volumes[volume_name]['status'].lower() != 'started':
start_volume(volume_name)
changed = True
if action == 'stopped':
if volumes[volume_name]['status'].lower() != 'stopped':
stop_volume(volume_name)
changed = True
if changed:
volumes = get_volumes()
if rebalance:
do_rebalance(volume_name)
facts = {}
facts['glusterfs'] = { 'peers': peers, 'volumes': volumes, 'quotas': quotas }
module.exit_json(changed=changed, ansible_facts=facts)
if __name__ == '__main__':
main()
|
lewiskan/heron
|
refs/heads/master
|
heron/instance/tests/python/network/st_stmgr_client_unittest.py
|
10
|
# Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=missing-docstring
import unittest
from heron.common.src.python.network import StatusCode
from heron.instance.tests.python.network.mock_generator import MockSTStmgrClient
import heron.common.tests.python.mock_protobuf as mock_protobuf
class STStmgrClientTest(unittest.TestCase):
def setUp(self):
self.stmgr_client = MockSTStmgrClient()
def tearDown(self):
self.stmgr_client = None
def test_on_connect_ok(self):
self.stmgr_client.on_connect(StatusCode.OK)
self.assertTrue(self.stmgr_client.register_msg_called)
# timeout_task should be included in timer_tasks
self.assertEqual(len(self.stmgr_client.looper.timer_tasks), 1)
def test_on_connect_error(self):
self.stmgr_client.on_connect(StatusCode.CONNECT_ERROR)
self.assertEqual(len(self.stmgr_client.looper.timer_tasks), 1)
self.assertEqual(self.stmgr_client.looper.timer_tasks[0][1].__name__, 'start_connect')
def test_on_response(self):
with self.assertRaises(RuntimeError):
self.stmgr_client.on_response(StatusCode.INVALID_PACKET, None, None)
with self.assertRaises(RuntimeError):
self.stmgr_client.on_response(StatusCode.OK, None, mock_protobuf.get_mock_instance())
self.stmgr_client.on_response(StatusCode.OK, None, mock_protobuf.get_mock_register_response())
self.assertTrue(self.stmgr_client.handle_register_response_called)
def test_on_error(self):
self.stmgr_client.on_error()
self.assertEqual(len(self.stmgr_client.looper.timer_tasks), 1)
self.assertEqual(self.stmgr_client.looper.timer_tasks[0][1].__name__, 'start_connect')
|
ajgorgas/uip-pc3
|
refs/heads/master
|
semana5/clase5.py
|
3
|
# CLASE 5
# Autor: Abdel G. Martinez L.
#
# Instrucciones: Dada una lista de numeros positivos, retornar el numero mas grande de la lista
def find_max(L):
max = 0
for x in L:
if x > max:
max = x
return max
if __name__ == '__main__':
L = (17, 20, 29, 16)
result = find_max(L)
print(result)
|
spencerparkin/AlgebraSystem
|
refs/heads/master
|
MathAlgEvaluate.py
|
1
|
# MathAlgEvaluate.py
from MathAlgorithm import MathAlgorithm
class MathAlgEvaluate(MathAlgorithm):
def __init__(self):
super().__init__()
def Manipulate(self, math_object):
pass
# Evaluate functions here, such as inverse, reverse, sine, cosine, etc.
|
frreiss/tensorflow-fred
|
refs/heads/master
|
tensorflow/python/keras/layers/preprocessing/benchmarks/category_crossing_benchmark.py
|
4
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for Keras categorical_encoding preprocessing layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import time
from absl import flags
import numpy as np
from tensorflow.python import keras
from tensorflow.python.compat import v2_compat
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras.layers.preprocessing import category_crossing
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
FLAGS = flags.FLAGS
v2_compat.enable_v2_behavior()
# word_gen creates random sequences of ASCII letters (both lowercase and upper).
# The number of unique strings is ~2,700.
def int_gen():
for _ in itertools.count(1):
yield (np.random.randint(0, 5, (1,)), np.random.randint(0, 7, (1,)))
class BenchmarkLayer(benchmark.Benchmark):
"""Benchmark the layer forward pass."""
def run_dataset_implementation(self, batch_size):
num_repeats = 5
starts = []
ends = []
for _ in range(num_repeats):
ds = dataset_ops.Dataset.from_generator(
int_gen, (dtypes.int64, dtypes.int64),
(tensor_shape.TensorShape([1]), tensor_shape.TensorShape([1])))
ds = ds.shuffle(batch_size * 100)
ds = ds.batch(batch_size)
num_batches = 5
ds = ds.take(num_batches)
ds = ds.prefetch(num_batches)
starts.append(time.time())
# Benchmarked code begins here.
for i in ds:
_ = sparse_ops.sparse_cross([i[0], i[1]])
# Benchmarked code ends here.
ends.append(time.time())
avg_time = np.mean(np.array(ends) - np.array(starts)) / num_batches
return avg_time
def bm_layer_implementation(self, batch_size):
input_1 = keras.Input(shape=(1,), dtype=dtypes.int64, name="word")
input_2 = keras.Input(shape=(1,), dtype=dtypes.int64, name="int")
layer = category_crossing.CategoryCrossing()
_ = layer([input_1, input_2])
num_repeats = 5
starts = []
ends = []
for _ in range(num_repeats):
ds = dataset_ops.Dataset.from_generator(
int_gen, (dtypes.int64, dtypes.int64),
(tensor_shape.TensorShape([1]), tensor_shape.TensorShape([1])))
ds = ds.shuffle(batch_size * 100)
ds = ds.batch(batch_size)
num_batches = 5
ds = ds.take(num_batches)
ds = ds.prefetch(num_batches)
starts.append(time.time())
# Benchmarked code begins here.
for i in ds:
_ = layer([i[0], i[1]])
# Benchmarked code ends here.
ends.append(time.time())
avg_time = np.mean(np.array(ends) - np.array(starts)) / num_batches
name = "category_crossing|batch_%s" % batch_size
baseline = self.run_dataset_implementation(batch_size)
extras = {
"dataset implementation baseline": baseline,
"delta seconds": (baseline - avg_time),
"delta percent": ((baseline - avg_time) / baseline) * 100
}
self.report_benchmark(
iters=num_repeats, wall_time=avg_time, extras=extras, name=name)
def benchmark_vocab_size_by_batch(self):
for batch in [32, 64, 256]:
self.bm_layer_implementation(batch_size=batch)
if __name__ == "__main__":
test.main()
|
schoolie/bokeh
|
refs/heads/master
|
sphinx/source/docs/user_guide/examples/charts_boxplot_box_color.py
|
8
|
from bokeh.charts import BoxPlot, output_file, show
from bokeh.sampledata.autompg import autompg as df
p = BoxPlot(df, values='mpg', label='cyl', color='#00cccc',
title="MPG Summary (grouped by CYL)")
output_file("boxplot.html")
show(p)
|
dsajkl/reqiop
|
refs/heads/master
|
common/djangoapps/track/tracker.py
|
239
|
"""
Module that tracks analytics events by sending them to different
configurable backends.
The backends can be configured using Django settings as the example
below::
TRACKING_BACKENDS = {
'tracker_name': {
'ENGINE': 'class.name.for.backend',
'OPTIONS': {
'host': ... ,
'port': ... ,
...
}
}
}
"""
import inspect
from importlib import import_module
from dogapi import dog_stats_api
from django.conf import settings
from track.backends import BaseBackend
__all__ = ['send']
backends = {}
def _initialize_backends_from_django_settings():
"""
Initialize the event tracking backends according to the
configuration in django settings
"""
backends.clear()
config = getattr(settings, 'TRACKING_BACKENDS', {})
for name, values in config.iteritems():
# Ignore empty values to turn-off default tracker backends
if values:
engine = values['ENGINE']
options = values.get('OPTIONS', {})
backends[name] = _instantiate_backend_from_name(engine, options)
def _instantiate_backend_from_name(name, options):
"""
Instantiate an event tracker backend from the full module path to
the backend class. Useful when setting backends from configuration
files.
"""
# Parse backend name
try:
parts = name.split('.')
module_name = '.'.join(parts[:-1])
class_name = parts[-1]
except IndexError:
raise ValueError('Invalid event track backend %s' % name)
# Get and verify the backend class
try:
module = import_module(module_name)
cls = getattr(module, class_name)
if not inspect.isclass(cls) or not issubclass(cls, BaseBackend):
raise TypeError
except (ValueError, AttributeError, TypeError, ImportError):
raise ValueError('Cannot find event track backend %s' % name)
backend = cls(**options)
return backend
@dog_stats_api.timed('track.send')
def send(event):
"""
Send an event object to all the initialized backends.
"""
dog_stats_api.increment('track.send.count')
for name, backend in backends.iteritems():
with dog_stats_api.timer('track.send.backend.{0}'.format(name)):
backend.send(event)
_initialize_backends_from_django_settings()
|
ramisetti/lammps
|
refs/heads/master
|
tools/i-pi/ipi/inputs/ensembles.py
|
41
|
"""Deals with creating the ensembles class.
Copyright (C) 2013, Joshua More and Michele Ceriotti
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http.//www.gnu.org/licenses/>.
Classes:
InputEnsemble: Deals with creating the Ensemble object from a file, and
writing the checkpoints.
"""
import numpy as np
import ipi.engine.thermostats
import ipi.engine.initializer
import ipi.engine.barostats
from ipi.engine.ensembles import *
from ipi.utils.inputvalue import *
from ipi.inputs.barostats import *
from ipi.inputs.thermostats import *
from ipi.inputs.initializer import *
from ipi.utils.units import *
__all__ = ['InputEnsemble']
class InputEnsemble(Input):
"""Ensemble input class.
Handles generating the appropriate ensemble class from the xml input file,
and generating the xml checkpoint tags and data from an instance of the
object.
Attributes:
mode: An optional string giving the mode of ensemble to be simulated.
Defaults to 'unknown'.
Fields:
thermostat: The thermostat to be used for constant temperature dynamics.
barostat: The barostat to be used for constant pressure or stress
dynamics.
timestep: An optional float giving the size of the timestep in atomic
units. Defaults to 1.0.
temperature: An optional float giving the temperature in Kelvin. Defaults
to 1.0.
pressure: An optional float giving the external pressure in atomic units.
Defaults to 1.0.
fixcom: An optional boolean which decides whether the centre of mass
motion will be constrained or not. Defaults to False.
replay_file: An optional string that gives an input file name to get
a trajectory to be re-run.
"""
attribs={"mode" : (InputAttribute, {"dtype" : str,
"help" : "The ensemble that will be sampled during the simulation. 'replay' means that a simulation is restarted from a previous simulation.",
"options" : ['nve', 'nvt', 'npt', 'replay']}) }
fields={"thermostat" : (InputThermo, {"default" : input_default(factory=ipi.engine.thermostats.Thermostat),
"help" : "The thermostat for the atoms, keeps the atom velocity distribution at the correct temperature."} ),
"barostat" : (InputBaro, {"default" : input_default(factory=ipi.engine.barostats.Barostat),
"help" : InputBaro.default_help}),
"timestep": (InputValue, {"dtype" : float,
"default" : 1.0,
"help" : "The time step.",
"dimension" : "time"}),
"temperature" : (InputValue, {"dtype" : float,
"default" : 1.0,
"help" : "The temperature of the system.",
"dimension" : "temperature"}),
"pressure" : (InputValue, {"dtype" : float,
"default" : 1.0,
"help" : "The external pressure.",
"dimension" : "pressure"}),
"fixcom": (InputValue, {"dtype" : bool,
"default" : True,
"help" : "This describes whether the centre of mass of the particles is fixed."}),
"replay_file": (InputInitFile, {"default" : input_default(factory=ipi.engine.initializer.InitBase),
"help" : "This describes the location to read a trajectory file from."})
}
default_help = "Holds all the information that is ensemble specific, such as the temperature and the external pressure, and the thermostats and barostats that control it."
default_label = "ENSEMBLE"
def store(self, ens):
"""Takes an ensemble instance and stores a minimal representation of it.
Args:
ens: An ensemble object.
"""
super(InputEnsemble,self).store(ens)
if type(ens) is ReplayEnsemble:
self.mode.store("rerun")
tens = 0
elif type(ens) is NVEEnsemble:
self.mode.store("nve")
tens = 1
elif type(ens) is NVTEnsemble:
self.mode.store("nvt")
tens = 2
elif type(ens) is NPTEnsemble:
self.mode.store("npt")
tens = 3
self.timestep.store(ens.dt)
self.temperature.store(ens.temp)
if tens == 0:
self.replay_file.store(ens.intraj)
if tens > 1:
self.thermostat.store(ens.thermostat)
self.fixcom.store(ens.fixcom)
if tens > 2:
self.barostat.store(ens.barostat)
if tens == 3:
self.pressure.store(ens.pext)
def fetch(self):
"""Creates an ensemble object.
Returns:
An ensemble object of the appropriate mode and with the appropriate
objects given the attributes of the InputEnsemble object.
"""
super(InputEnsemble,self).fetch()
if self.mode.fetch() == "nve" :
ens = NVEEnsemble(dt=self.timestep.fetch(),
temp=self.temperature.fetch(), fixcom=self.fixcom.fetch())
elif self.mode.fetch() == "nvt" :
ens = NVTEnsemble(dt=self.timestep.fetch(),
temp=self.temperature.fetch(), thermostat=self.thermostat.fetch(), fixcom=self.fixcom.fetch())
elif self.mode.fetch() == "npt" :
ens = NPTEnsemble(dt=self.timestep.fetch(),
temp=self.temperature.fetch(), thermostat=self.thermostat.fetch(), fixcom=self.fixcom.fetch(),
pext=self.pressure.fetch(), barostat=self.barostat.fetch() )
elif self.mode.fetch() == "replay":
ens = ReplayEnsemble(dt=self.timestep.fetch(),
temp=self.temperature.fetch(),fixcom=False,intraj=self.replay_file.fetch() )
else:
raise ValueError("'" + self.mode.fetch() + "' is not a supported ensemble mode.")
return ens
def check(self):
"""Function that deals with optional arguments.
Makes sure that if the ensemble requires a thermostat or barostat that
they have been defined by the user and not given the default values.
"""
super(InputEnsemble,self).check()
if self.mode.fetch() == "nvt":
if self.thermostat._explicit == False:
raise ValueError("No thermostat tag supplied for NVT simulation")
if self.mode.fetch() == "npt":
if self.thermostat._explicit == False:
raise ValueError("No thermostat tag supplied for NPT simulation")
if self.barostat._explicit == False:
raise ValueError("No barostat tag supplied for NPT simulation")
if self.barostat.thermostat._explicit == False:
raise ValueError("No thermostat tag supplied in barostat for NPT simulation")
if self.timestep.fetch() <= 0:
raise ValueError("Non-positive timestep specified.")
if self.temperature.fetch() <= 0:
raise ValueError("Non-positive temperature specified.")
if self.mode.fetch() == "npt":
if not self.pressure._explicit:
raise ValueError("Pressure should be supplied for constant pressure simulation")
if self.mode.fetch() == "npt" or self.mode.fetch() == "nvt":
if not self.temperature._explicit:
raise ValueError("Temperature should be supplied for constant temperature simulation")
|
sidzan/netforce
|
refs/heads/master
|
netforce_mfg/setup.py
|
4
|
#!/usr/bin/env python3
# Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from setuptools import setup
setup(
name="netforce_mfg",
version="3.1.0",
description="Manufacturing module",
)
|
ol-loginov/intellij-community
|
refs/heads/master
|
python/testData/inspections/PyUnresolvedReferencesInspection/dunderPackage.py
|
83
|
__package__ #pass
|
zchking/odoo
|
refs/heads/8.0
|
addons/account/wizard/account_reconcile.py
|
226
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.tools.float_utils import float_round
import openerp.addons.decimal_precision as dp
class account_move_line_reconcile(osv.osv_memory):
"""
Account move line reconcile wizard, it checks for the write off the reconcile entry or directly reconcile.
"""
_name = 'account.move.line.reconcile'
_description = 'Account move line reconcile'
_columns = {
'trans_nbr': fields.integer('# of Transaction', readonly=True),
'credit': fields.float('Credit amount', readonly=True, digits_compute=dp.get_precision('Account')),
'debit': fields.float('Debit amount', readonly=True, digits_compute=dp.get_precision('Account')),
'writeoff': fields.float('Write-Off amount', readonly=True, digits_compute=dp.get_precision('Account')),
}
def default_get(self, cr, uid, fields, context=None):
res = super(account_move_line_reconcile, self).default_get(cr, uid, fields, context=context)
data = self.trans_rec_get(cr, uid, context['active_ids'], context)
if 'trans_nbr' in fields:
res.update({'trans_nbr':data['trans_nbr']})
if 'credit' in fields:
res.update({'credit':data['credit']})
if 'debit' in fields:
res.update({'debit':data['debit']})
if 'writeoff' in fields:
res.update({'writeoff':data['writeoff']})
return res
def trans_rec_get(self, cr, uid, ids, context=None):
account_move_line_obj = self.pool.get('account.move.line')
if context is None:
context = {}
credit = debit = 0
account_id = False
count = 0
for line in account_move_line_obj.browse(cr, uid, context['active_ids'], context=context):
if not line.reconcile_id and not line.reconcile_id.id:
count += 1
credit += line.credit
debit += line.debit
account_id = line.account_id.id
precision = self.pool['decimal.precision'].precision_get(cr, uid, 'Account')
writeoff = float_round(debit-credit, precision_digits=precision)
credit = float_round(credit, precision_digits=precision)
debit = float_round(debit, precision_digits=precision)
return {'trans_nbr': count, 'account_id': account_id, 'credit': credit, 'debit': debit, 'writeoff': writeoff}
def trans_rec_addendum_writeoff(self, cr, uid, ids, context=None):
return self.pool.get('account.move.line.reconcile.writeoff').trans_rec_addendum(cr, uid, ids, context)
def trans_rec_reconcile_partial_reconcile(self, cr, uid, ids, context=None):
return self.pool.get('account.move.line.reconcile.writeoff').trans_rec_reconcile_partial(cr, uid, ids, context)
def trans_rec_reconcile_full(self, cr, uid, ids, context=None):
account_move_line_obj = self.pool.get('account.move.line')
period_obj = self.pool.get('account.period')
date = False
period_id = False
journal_id= False
account_id = False
if context is None:
context = {}
date = time.strftime('%Y-%m-%d')
ids = period_obj.find(cr, uid, dt=date, context=context)
if ids:
period_id = ids[0]
account_move_line_obj.reconcile(cr, uid, context['active_ids'], 'manual', account_id,
period_id, journal_id, context=context)
return {'type': 'ir.actions.act_window_close'}
class account_move_line_reconcile_writeoff(osv.osv_memory):
"""
It opens the write off wizard form, in that user can define the journal, account, analytic account for reconcile
"""
_name = 'account.move.line.reconcile.writeoff'
_description = 'Account move line reconcile (writeoff)'
_columns = {
'journal_id': fields.many2one('account.journal','Write-Off Journal', required=True),
'writeoff_acc_id': fields.many2one('account.account','Write-Off account', required=True),
'date_p': fields.date('Date'),
'comment': fields.char('Comment', required=True),
'analytic_id': fields.many2one('account.analytic.account', 'Analytic Account', domain=[('parent_id', '!=', False)]),
}
_defaults = {
'date_p': lambda *a: time.strftime('%Y-%m-%d'),
'comment': _('Write-off'),
}
def trans_rec_addendum(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
if context is None:
context = {}
model_data_ids = mod_obj.search(cr, uid,[('model','=','ir.ui.view'),('name','=','account_move_line_reconcile_writeoff')], context=context)
resource_id = mod_obj.read(cr, uid, model_data_ids, fields=['res_id'], context=context)[0]['res_id']
return {
'name': _('Reconcile Writeoff'),
'context': context,
'view_type': 'form',
'view_mode': 'form',
'res_model': 'account.move.line.reconcile.writeoff',
'views': [(resource_id,'form')],
'type': 'ir.actions.act_window',
'target': 'new',
}
def trans_rec_reconcile_partial(self, cr, uid, ids, context=None):
account_move_line_obj = self.pool.get('account.move.line')
if context is None:
context = {}
account_move_line_obj.reconcile_partial(cr, uid, context['active_ids'], 'manual', context=context)
return {'type': 'ir.actions.act_window_close'}
def trans_rec_reconcile(self, cr, uid, ids, context=None):
context = dict(context or {})
account_move_line_obj = self.pool.get('account.move.line')
period_obj = self.pool.get('account.period')
if context is None:
context = {}
data = self.read(cr, uid, ids,context=context)[0]
account_id = data['writeoff_acc_id'][0]
context['date_p'] = data['date_p']
journal_id = data['journal_id'][0]
context['comment'] = data['comment']
if data['analytic_id']:
context['analytic_id'] = data['analytic_id'][0]
if context['date_p']:
date = context['date_p']
ids = period_obj.find(cr, uid, dt=date, context=context)
if ids:
period_id = ids[0]
account_move_line_obj.reconcile(cr, uid, context['active_ids'], 'manual', account_id,
period_id, journal_id, context=context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
siosio/intellij-community
|
refs/heads/master
|
python/testData/mover/simpleBlankLines.py
|
83
|
if a:
a = 1
b =<caret> 2
c = 3
|
ict-felix/stack
|
refs/heads/master
|
vt_manager/src/python/vt_manager/models/Ip4Range.py
|
3
|
from django.db import models
from django.contrib import auth
from threading import Lock
import inspect
from vt_manager.utils.IP4Utils import IP4Utils
from vt_manager.utils.MutexStore import MutexStore
from vt_manager.models.Ip4Slot import Ip4Slot
'''
@author: msune
'''
class Ip4Range(models.Model):
"""Ip4Range"""
class Meta:
"""Meta Class for your model."""
app_label = 'vt_manager'
'''
Private attributes
'''
#Range name
name = models.CharField(max_length = 255, default="", verbose_name = "Range name", unique=True)
isGlobal = models.BooleanField(verbose_name="Global range",default=1, help_text="Global ranges will be used by servers which are not subscribed to any specific range")
#Range parameters
startIp = models.IPAddressField(verbose_name = "Range start IP", editable = True, validators = [IP4Utils.checkValidIp])
endIp = models.IPAddressField(verbose_name = "Range end IP", editable = True, validators = [IP4Utils.checkValidIp])
netMask = models.IPAddressField(verbose_name = "Network mask", editable = True, validators = [IP4Utils.checkValidNetmask])
#Networking parameters associated to this range
gw = models.IPAddressField(blank = True, null=True, validators = [IP4Utils.checkValidIp])
dns1 = models.IPAddressField(blank = True, null=True, default="10.216.24.2", validators = [IP4Utils.checkValidIp], help_text="DNS1 must be the OFELIA internal 10.216.24.2. It will be used by the VMs to resolve the LDAP address and authenticate users.")
dns2 = models.IPAddressField(blank = True, null=True, validators = [IP4Utils.checkValidIp])
#Pool of ips both assigned and excluded (particular case of assignment)
ips = models.ManyToManyField('Ip4Slot', blank = True, null = True, editable = False, related_name = "Ip4Range")
nextAvailableIp = models.IPAddressField(blank = True, null=True, validators = [IP4Utils.checkValidIp],editable=False,verbose_name="Next available Ip4 slot")
#Statistics
numberOfSlots = models.BigIntegerField(blank = True, null=True, editable = False)
#Mutex
mutex = None
#Defines soft or hard state of the range
doSave = True
'''
Private methods
'''
@staticmethod
def constructor(name,startIp,endIp,netmask,gw,dns1,dns2,isGlobal=True,save=True):
self = Ip4Range()
try:
#Default constructor
IP4Utils.checkValidIp(startIp)
IP4Utils.checkValidIp(endIp)
IP4Utils.checkValidNetmask(netmask)
IP4Utils.checkValidIp(gw)
IP4Utils.checkValidIp(dns1)
if not dns2 == "":
IP4Utils.checkValidIp(dns2)
self.name = name
self.isGlobal= isGlobal
self.startIp = startIp
self.endIp = endIp
self.netMask = netmask
self.gw = gw
self.dns1 = dns1
if not dns2 == "":
self.dns2 = dns2
#Create an iterator
it= IP4Utils.getIpIterator(self.startIp,self.endIp,self.netMask)
self.nextAvailableIp = it.getNextIp()
#Number of Slots
try:
self.numberOfSlots = IP4Utils.getNumberOfSlotsInRange(startIp,endIp,netmask)
except Exception as e:
print "Exception doing slot calculation"+str(e)
self.numberOfSlots = -1
self.doSave = save
if save:
self.save()
except Exception as e:
#self.delete()
raise e
return self
'''
Private methods
'''
def __setStartIp(self, value):
IP4Utils.checkValidIp(value)
self.startIp = value
self.autoSave()
def __setEndIp(self, value):
IP4Utils.checkValidIp(value)
self.endIp = value
self.autoSave()
def __setNetmask(self, value):
IP4Utils.checkValidNetmask(value)
self.netMask = value
self.autoSave()
def __isIpAvailable(self,ip):
return self.ips.filter(ip=ip).count() == 0
def autoSave(self):
if self.doSave:
self.save()
'''
Public methods
'''
def getLockIdentifier(self):
#Uniquely identifies object by a key
return inspect.currentframe().f_code.co_filename+str(self)+str(self.id)
def getName(self):
return self.name
def getIsGlobal(self):
return self.isGlobal
def getStartIp(self):
return self.startIp
def getEndIp(self):
return self.endIp
def getNetmask(self):
return self.netMask
def getGatewayIp(self):
return self.gw
def getDNS1(self):
return self.dns1
def getDNS2(self):
return self.dns2
def getExcludedIps(self):
return self.ips.filter(isExcluded=True).order_by('ip')
def getAllocatedIps(self):
return self.ips.filter(isExcluded=False).order_by('ip')
def getNumberOfSlots(self):
return int(self.numberOfSlots)
def getPercentageRangeUsage(self):
if not self.numberOfSlots == -1:
return float((self.ips.all().count()/self.numberOfSlots)*100)
return -1
def destroy(self):
with MutexStore.getObjectLock(self.getLockIdentifier()):
if self.ips.filter(isExcluded=False).count() > 0:
raise Exception("Cannot delete Ip4Range. Range still contains allocated IPs")
for ip in self.ips.all():
#Delete excluded ips
ip.delete()
self.delete()
def allocateIp(self):
'''
Allocates an IP address of the range
'''
with MutexStore.getObjectLock(self.getLockIdentifier()):
#Implements first fit algorithm
if self.nextAvailableIp == None:
raise Exception("Could not allocate any IP")
newIp = Ip4Slot.ipFactory(self,self.nextAvailableIp)
self.ips.add(newIp)
#Try to find new slot
try:
it= IP4Utils.getIpIterator(self.nextAvailableIp,self.endIp,self.netMask)
while True:
ip = it.getNextIp()
if self.__isIpAvailable(ip):
break
self.nextAvailableIp = ip
except Exception as e:
self.nextAvailableIp = None
self.autoSave()
return newIp
def releaseIp(self,ipObj):
'''
Releases an IP address of the range (but it does not destroy the object!!)
'''
with MutexStore.getObjectLock(self.getLockIdentifier()):
ipStr = ipObj.getIp()
if not self.ips.filter(ip=ipStr,isExcluded=False).count() > 0:
raise Exception("Cannot release Ip %s. Reason may be is unallocated or is an excluded Ip",ipStr)
self.ips.remove(ipObj)
#Determine new available Ip
if not self.nextAvailableIp == None:
if IP4Utils.compareIps(ipStr,self.nextAvailableIp) > 0:
#Do nothing
pass
else:
self.nextAvailableIp = ipStr
else:
#No more gaps
self.nextAvailableIp = ipStr
self.autoSave()
def addExcludedIp(self,ipStr,comment):
'''
Add an IP to the exclusion list
'''
with MutexStore.getObjectLock(self.getLockIdentifier()):
#Check is not already allocated
if not self.__isIpAvailable(ipStr):
raise Exception("Ip already allocated or marked as excluded")
#then forbidd
if not IP4Utils.isIpInRange(ipStr,self.startIp,self.endIp):
raise Exception("Ip is not in range")
newIp = Ip4Slot.excludedIpFactory(self,ipStr,comment)
self.ips.add(newIp)
#if was nextSlot shift
if self.nextAvailableIp == ipStr:
try:
it= IP4Utils.getIpIterator(self.nextAvailableIp,self.endIp,self.netMask)
while True:
ip = it.getNextIp()
if self.__isIpAvailable(ip):
break
self.nextAvailableIp = ip
except Exception as e:
self.nextAvailableIp = None
self.autoSave()
def removeExcludedIp(self,ipObj):
'''
Deletes an IP from the exclusion list (but it does not destroy the object!!)
'''
with MutexStore.getObjectLock(self.getLockIdentifier()):
ipStr = ipObj.getIp()
if not self.ips.get(ip=ipStr).isExcludedIp():
raise Exception("Cannot release Ip. Reason may be is unallocated or is not excluded Ip")
self.ips.remove(ipObj)
#Determine new available Ip
if not self.nextAvailableIp == None:
if IP4Utils.compareIps(ipStr,self.nextAvailableIp) > 0:
#Do nothing
pass
else:
self.nextAvailableIp = ipStr
else:
#No more gaps
self.nextAvailableIp = ipStr
self.autoSave()
'''
Static methods
'''
@staticmethod
def getAllocatedGlobalNumberOfSlots():
allocated = 0
for range in Ip4Range.objects.filter(isGlobal=True):
allocated += range.ips.all().count()
return allocated
@staticmethod
def getGlobalNumberOfSlots():
slots = 0
for range in Ip4Range.objects.filter(isGlobal=True):
slots += range.numberOfSlots
return int(slots)
def rebasePointer(self):
'''Used when pointer has lost track mostly due to bug #'''
with MutexStore.getObjectLock(self.getLockIdentifier()):
print "Rebasing pointer of range: "+str(self.id)
print "Current pointer point to: "+self.nextAvailableIp
try:
it= IP4Utils.getIpIterator(self.startIp,self.endIp,self.netMask)
while True:
ip = it.getNextIp()
if self.__isIpAvailable(ip):
break
self.nextAvailableIp = ip
except Exception as e:
self.nextAvailableIp = None
print "Pointer will be rebased to: "+self.nextAvailableIp
self.save()
@staticmethod
def rebasePointers():
'''Used when pointer has lost track mostly due to bug #'''
for range in Ip4Range.objects.all():
range.rebasePointer()
#slot = RangeSlot("127.0.0.1","127.0.0.255","255.255.255.0")
#slot.allocateIp()
#try:
# slot.releaseIp("d")
#except Exception:
# pass
#slot.allocateIp()
|
zhongzw/skia-sdl
|
refs/heads/master
|
third_party/externals/gyp/test/win/gyptest-link-warnings-as-errors.py
|
239
|
#!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure linker warnings-as-errors setting is extracted properly.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
test.run_gyp('warn-as-error.gyp', chdir=CHDIR)
test.build('warn-as-error.gyp', 'test_on', chdir=CHDIR, status=1)
test.build('warn-as-error.gyp', 'test_off', chdir=CHDIR)
test.build('warn-as-error.gyp', 'test_default', chdir=CHDIR)
test.pass_test()
|
ishank08/scikit-learn
|
refs/heads/master
|
examples/neighbors/plot_approximate_nearest_neighbors_hyperparameters.py
|
102
|
"""
=================================================
Hyper-parameters of Approximate Nearest Neighbors
=================================================
This example demonstrates the behaviour of the
accuracy of the nearest neighbor queries of Locality Sensitive Hashing
Forest as the number of candidates and the number of estimators (trees)
vary.
In the first plot, accuracy is measured with the number of candidates. Here,
the term "number of candidates" refers to maximum bound for the number of
distinct points retrieved from each tree to calculate the distances. Nearest
neighbors are selected from this pool of candidates. Number of estimators is
maintained at three fixed levels (1, 5, 10).
In the second plot, the number of candidates is fixed at 50. Number of trees
is varied and the accuracy is plotted against those values. To measure the
accuracy, the true nearest neighbors are required, therefore
:class:`sklearn.neighbors.NearestNeighbors` is used to compute the exact
neighbors.
"""
from __future__ import division
print(__doc__)
# Author: Maheshakya Wijewardena <maheshakya.10@cse.mrt.ac.lk>
#
# License: BSD 3 clause
###############################################################################
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Initialize size of the database, iterations and required neighbors.
n_samples = 10000
n_features = 100
n_queries = 30
rng = np.random.RandomState(42)
# Generate sample data
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=10,
random_state=0)
X_index = X[:n_samples]
X_query = X[n_samples:]
# Get exact neighbors
nbrs = NearestNeighbors(n_neighbors=1, algorithm='brute',
metric='cosine').fit(X_index)
neighbors_exact = nbrs.kneighbors(X_query, return_distance=False)
# Set `n_candidate` values
n_candidates_values = np.linspace(10, 500, 5).astype(np.int)
n_estimators_for_candidate_value = [1, 5, 10]
n_iter = 10
stds_accuracies = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]),
dtype=float)
accuracies_c = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]), dtype=float)
# LSH Forest is a stochastic index: perform several iteration to estimate
# expected accuracy and standard deviation displayed as error bars in
# the plots
for j, value in enumerate(n_estimators_for_candidate_value):
for i, n_candidates in enumerate(n_candidates_values):
accuracy_c = []
for seed in range(n_iter):
lshf = LSHForest(n_estimators=value,
n_candidates=n_candidates, n_neighbors=1,
random_state=seed)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query,
return_distance=False)
accuracy_c.append(np.sum(np.equal(neighbors_approx,
neighbors_exact)) /
n_queries)
stds_accuracies[j, i] = np.std(accuracy_c)
accuracies_c[j, i] = np.mean(accuracy_c)
# Set `n_estimators` values
n_estimators_values = [1, 5, 10, 20, 30, 40, 50]
accuracies_trees = np.zeros(len(n_estimators_values), dtype=float)
# Calculate average accuracy for each value of `n_estimators`
for i, n_estimators in enumerate(n_estimators_values):
lshf = LSHForest(n_estimators=n_estimators, n_neighbors=1)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query, return_distance=False)
accuracies_trees[i] = np.sum(np.equal(neighbors_approx,
neighbors_exact))/n_queries
###############################################################################
# Plot the accuracy variation with `n_candidates`
plt.figure()
colors = ['c', 'm', 'y']
for i, n_estimators in enumerate(n_estimators_for_candidate_value):
label = 'n_estimators = %d ' % n_estimators
plt.plot(n_candidates_values, accuracies_c[i, :],
'o-', c=colors[i], label=label)
plt.errorbar(n_candidates_values, accuracies_c[i, :],
stds_accuracies[i, :], c=colors[i])
plt.legend(loc='upper left', prop=dict(size='small'))
plt.ylim([0, 1.2])
plt.xlim(min(n_candidates_values), max(n_candidates_values))
plt.ylabel("Accuracy")
plt.xlabel("n_candidates")
plt.grid(which='both')
plt.title("Accuracy variation with n_candidates")
# Plot the accuracy variation with `n_estimators`
plt.figure()
plt.scatter(n_estimators_values, accuracies_trees, c='k')
plt.plot(n_estimators_values, accuracies_trees, c='g')
plt.ylim([0, 1.2])
plt.xlim(min(n_estimators_values), max(n_estimators_values))
plt.ylabel("Accuracy")
plt.xlabel("n_estimators")
plt.grid(which='both')
plt.title("Accuracy variation with n_estimators")
plt.show()
|
plotly/plotly.py
|
refs/heads/master
|
packages/python/plotly/plotly/graph_objs/splom/marker/colorbar/title/_font.py
|
2
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "splom.marker.colorbar.title"
_path_str = "splom.marker.colorbar.title.font"
_valid_props = {"color", "family", "size"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Font object
Sets this color bar's title font. Note that the title's font
used to be set by the now deprecated `titlefont` attribute.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.splom.marker.c
olorbar.title.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.splom.marker.colorbar.title.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.splom.marker.colorbar.title.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
0x46616c6b/ansible-modules-core
|
refs/heads/devel
|
database/__init__.py
|
12133432
| |
prymatex/SublimeCodeIntel
|
refs/heads/master
|
arch/_linux_libcpp6_x86_py33/__init__.py
|
12133432
| |
alexshin/django-guardian
|
refs/heads/devel
|
example_project/integration_tests/models.py
|
12133432
| |
tsoporan/tehorng
|
refs/heads/master
|
reporting/__init__.py
|
12133432
| |
moreati/django
|
refs/heads/master
|
django/template/loaders/__init__.py
|
12133432
| |
cecep-edu/edx-platform
|
refs/heads/eucalyptus.2
|
openedx/core/djangoapps/credentials/tests/__init__.py
|
12133432
| |
SCSSG/Odoo-SCS
|
refs/heads/master
|
addons/website/__openerp__.py
|
311
|
{
'name': 'Website Builder',
'category': 'Website',
'summary': 'Build Your Enterprise Website',
'website': 'https://www.odoo.com/page/website-builder',
'version': '1.0',
'description': """
OpenERP Website CMS
===================
""",
'author': 'OpenERP SA',
'depends': ['web', 'share', 'mail'],
'installable': True,
'data': [
'data/data.xml',
'security/ir.model.access.csv',
'security/ir_ui_view.xml',
'views/website_templates.xml',
'views/website_views.xml',
'views/snippets.xml',
'views/themes.xml',
'views/res_config.xml',
'views/ir_actions.xml',
'views/website_backend_navbar.xml',
],
'demo': [
'data/demo.xml',
],
'qweb': ['static/src/xml/website.backend.xml'],
'application': True,
}
|
hernandito/SickRage
|
refs/heads/master
|
lib/hachoir_parser/program/prc.py
|
86
|
"""
PRC (Palm resource) parser.
Author: Sebastien Ponce
Creation date: 29 october 2008
"""
from hachoir_parser import Parser
from hachoir_core.field import (FieldSet,
UInt16, UInt32, TimestampMac32,
String, RawBytes)
from hachoir_core.endian import BIG_ENDIAN
class PRCHeader(FieldSet):
static_size = 78*8
def createFields(self):
yield String(self, "name", 32, "Name")
yield UInt16(self, "flags", "Flags")
yield UInt16(self, "version", "Version")
yield TimestampMac32(self, "create_time", "Creation time")
yield TimestampMac32(self, "mod_time", "Modification time")
yield TimestampMac32(self, "backup_time", "Backup time")
yield UInt32(self, "mod_num", "mod num")
yield UInt32(self, "app_info", "app info")
yield UInt32(self, "sort_info", "sort info")
yield UInt32(self, "type", "type")
yield UInt32(self, "id", "id")
yield UInt32(self, "unique_id_seed", "unique_id_seed")
yield UInt32(self, "next_record_list", "next_record_list")
yield UInt16(self, "num_records", "num_records")
class ResourceHeader(FieldSet):
static_size = 10*8
def createFields(self):
yield String(self, "name", 4, "Name of the resource")
yield UInt16(self, "flags", "ID number of the resource")
yield UInt32(self, "offset", "Pointer to the resource data")
def createDescription(self):
return "Resource Header (%s)" % self["name"]
class PRCFile(Parser):
PARSER_TAGS = {
"id": "prc",
"category": "program",
"file_ext": ("prc", ""),
"min_size": ResourceHeader.static_size, # At least one program header
"mime": (
u"application/x-pilot-prc",
u"application/x-palmpilot"),
"description": "Palm Resource File"
}
endian = BIG_ENDIAN
def validate(self):
# FIXME: Implement the validation function!
return False
def createFields(self):
# Parse header and program headers
yield PRCHeader(self, "header", "Header")
lens = []
firstOne = True
poff = 0
for index in xrange(self["header/num_records"].value):
r = ResourceHeader(self, "res_header[]")
if firstOne:
firstOne = False
else:
lens.append(r["offset"].value - poff)
poff = r["offset"].value
yield r
lens.append(self.size/8 - poff)
yield UInt16(self, "placeholder", "Place holder bytes")
for i in range(len(lens)):
yield RawBytes(self, "res[]", lens[i], '"'+self["res_header["+str(i)+"]/name"].value+"\" Resource")
def createDescription(self):
return "Palm Resource file"
|
akionux/OpenFOAM-2.3.x
|
refs/heads/master
|
tutorials/multiphase/stirringInterPTFoam/laminar/stirringCulture/genKinematicCloudPositions.py
|
1
|
#!/usr/bin/env python
# create parcel injections
# http://www.geocities.co.jp/SiliconValley-SantaClara/1183/study/OpenFOAM/injection.html
from random import random
# input start
minx = 0
maxx = 0.032
miny = 0.0
maxy = 0.020
minz = 0
maxz = 0.017
nx = 32
ny = 20
nz = 17
turbdx = 3.2e-5
turbdy = 2.0e-5
turbdz = 1.7e-5
d = 1e-3
v = (0, -1, 0)
rho = 964
mdot = 1
# input end
rangex = maxx - minx
rangey = maxy - miny
rangez = maxz - minz
dx = rangex/(nx + 1)
dy = rangey/(ny + 1)
dz = rangez/(nz + 1)
print('''/*--------------------------------*- C++ -*----------------------------------*\\
| ========= | |
| \\ / F ield | OpenFOAM: The Open Source CFD Toolbox |
| \\ / O peration | Version: 2.0.1 |
| \\ / A nd | Web: www.OpenFOAM.com |
| \\/ M anipulation | |
\*---------------------------------------------------------------------------*/
FoamFile
{
version 2.0;
format ascii;
class vectorField;
object kinematicCloudPositions;
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
(''')
for i in range(0, nx):
for j in range(0, ny):
for k in range(0, nz):
x = minx + dx*(i + 1) + turbdx*random()
y = miny + dy*(j + 1) + turbdy*random()
z = minz + dz*(k + 1) + turbdz*random()
#print "(%f %f %f) (%f %f %f) %e %f %e" % \
# (x, y, z, v[0], v[1], v[2], d, rho, mdot)
print "(%f %f %f)" % (x, y, z)
print(''')
// ************************************************************************* //''')
|
liavkoren/djangoDev
|
refs/heads/master
|
tests/admin_inlines/tests.py
|
3
|
from __future__ import unicode_literals
from django.contrib.admin.tests import AdminSeleniumWebDriverTestCase
from django.contrib.admin.helpers import InlineAdminForm
from django.contrib.auth.models import User, Permission
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase, override_settings
# local test models
from .admin import InnerInline
from .models import (Holder, Inner, Holder2, Inner2, Holder3, Inner3, Person,
OutfitItem, Fashionista, Teacher, Parent, Child, Author, Book, Profile,
ProfileCollection, ParentModelWithCustomPk, ChildModel1, ChildModel2,
Sighting, Novel, Chapter, FootNote, BinaryTree, SomeParentModel,
SomeChildModel)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_inlines.urls")
class TestInline(TestCase):
fixtures = ['admin-views-users.xml']
def setUp(self):
holder = Holder(dummy=13)
holder.save()
Inner(dummy=42, holder=holder).save()
self.change_url = '/admin/admin_inlines/holder/%i/' % holder.id
result = self.client.login(username='super', password='secret')
self.assertEqual(result, True)
def tearDown(self):
self.client.logout()
def test_can_delete(self):
"""
can_delete should be passed to inlineformset factory.
"""
response = self.client.get(self.change_url)
inner_formset = response.context['inline_admin_formsets'][0].formset
expected = InnerInline.can_delete
actual = inner_formset.can_delete
self.assertEqual(expected, actual, 'can_delete must be equal')
def test_readonly_stacked_inline_label(self):
"""Bug #13174."""
holder = Holder.objects.create(dummy=42)
Inner.objects.create(holder=holder, dummy=42, readonly='')
response = self.client.get('/admin/admin_inlines/holder/%i/'
% holder.id)
self.assertContains(response, '<label>Inner readonly label:</label>')
def test_many_to_many_inlines(self):
"Autogenerated many-to-many inlines are displayed correctly (#13407)"
response = self.client.get('/admin/admin_inlines/author/add/')
# The heading for the m2m inline block uses the right text
self.assertContains(response, '<h2>Author-book relationships</h2>')
# The "add another" label is correct
self.assertContains(response, 'Add another Author-book relationship')
# The '+' is dropped from the autogenerated form prefix (Author_books+)
self.assertContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_primary(self):
person = Person.objects.create(firstname='Imelda')
item = OutfitItem.objects.create(name='Shoes')
# Imelda likes shoes, but can't carry her own bags.
data = {
'shoppingweakness_set-TOTAL_FORMS': 1,
'shoppingweakness_set-INITIAL_FORMS': 0,
'shoppingweakness_set-MAX_NUM_FORMS': 0,
'_save': 'Save',
'person': person.id,
'max_weight': 0,
'shoppingweakness_set-0-item': item.id,
}
response = self.client.post('/admin/admin_inlines/fashionista/add/', data)
self.assertEqual(response.status_code, 302)
self.assertEqual(len(Fashionista.objects.filter(person__firstname='Imelda')), 1)
def test_tabular_non_field_errors(self):
"""
Ensure that non_field_errors are displayed correctly, including the
right value for colspan. Refs #13510.
"""
data = {
'title_set-TOTAL_FORMS': 1,
'title_set-INITIAL_FORMS': 0,
'title_set-MAX_NUM_FORMS': 0,
'_save': 'Save',
'title_set-0-title1': 'a title',
'title_set-0-title2': 'a different title',
}
response = self.client.post('/admin/admin_inlines/titlecollection/add/', data)
# Here colspan is "4": two fields (title1 and title2), one hidden field and the delete checkbox.
self.assertContains(response, '<tr><td colspan="4"><ul class="errorlist"><li>The two titles must be the same</li></ul></td></tr>')
def test_no_parent_callable_lookup(self):
"""Admin inline `readonly_field` shouldn't invoke parent ModelAdmin callable"""
# Identically named callable isn't present in the parent ModelAdmin,
# rendering of the add view shouldn't explode
response = self.client.get('/admin/admin_inlines/novel/add/')
self.assertEqual(response.status_code, 200)
# View should have the child inlines section
self.assertContains(response, '<div class="inline-group" id="chapter_set-group">')
def test_callable_lookup(self):
"""Admin inline should invoke local callable when its name is listed in readonly_fields"""
response = self.client.get('/admin/admin_inlines/poll/add/')
self.assertEqual(response.status_code, 200)
# Add parent object view should have the child inlines section
self.assertContains(response, '<div class="inline-group" id="question_set-group">')
# The right callable should be used for the inline readonly_fields
# column cells
self.assertContains(response, '<p>Callable in QuestionInline</p>')
def test_help_text(self):
"""
Ensure that the inlines' model field help texts are displayed when
using both the stacked and tabular layouts.
Ref #8190.
"""
response = self.client.get('/admin/admin_inlines/holder4/add/')
self.assertContains(response, '<p class="help">Awesome stacked help text is awesome.</p>', 4)
self.assertContains(response, '<img src="/static/admin/img/icon-unknown.gif" class="help help-tooltip" width="10" height="10" alt="(Awesome tabular help text is awesome.)" title="Awesome tabular help text is awesome." />', 1)
# ReadOnly fields
response = self.client.get('/admin/admin_inlines/capofamiglia/add/')
self.assertContains(response, '<img src="/static/admin/img/icon-unknown.gif" class="help help-tooltip" width="10" height="10" alt="(Help text for ReadOnlyInline)" title="Help text for ReadOnlyInline" />', 1)
def test_inline_hidden_field_no_column(self):
"""#18263 -- Make sure hidden fields don't get a column in tabular inlines"""
parent = SomeParentModel.objects.create(name='a')
SomeChildModel.objects.create(name='b', position='0', parent=parent)
SomeChildModel.objects.create(name='c', position='1', parent=parent)
response = self.client.get('/admin/admin_inlines/someparentmodel/%s/' % parent.pk)
self.assertNotContains(response, '<td class="field-position">')
self.assertContains(response, (
'<input id="id_somechildmodel_set-1-position" '
'name="somechildmodel_set-1-position" type="hidden" value="1" />'))
def test_non_related_name_inline(self):
"""
Ensure that multiple inlines with related_name='+' have correct form
prefixes. Bug #16838.
"""
response = self.client.get('/admin/admin_inlines/capofamiglia/add/')
self.assertContains(response,
'<input type="hidden" name="-1-0-id" id="id_-1-0-id" />', html=True)
self.assertContains(response,
'<input type="hidden" name="-1-0-capo_famiglia" id="id_-1-0-capo_famiglia" />', html=True)
self.assertContains(response,
'<input id="id_-1-0-name" type="text" class="vTextField" '
'name="-1-0-name" maxlength="100" />', html=True)
self.assertContains(response,
'<input type="hidden" name="-2-0-id" id="id_-2-0-id" />', html=True)
self.assertContains(response,
'<input type="hidden" name="-2-0-capo_famiglia" id="id_-2-0-capo_famiglia" />', html=True)
self.assertContains(response,
'<input id="id_-2-0-name" type="text" class="vTextField" '
'name="-2-0-name" maxlength="100" />', html=True)
@override_settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=True)
def test_localize_pk_shortcut(self):
"""
Ensure that the "View on Site" link is correct for locales that use
thousand separators
"""
holder = Holder.objects.create(pk=123456789, dummy=42)
inner = Inner.objects.create(pk=987654321, holder=holder, dummy=42, readonly='')
response = self.client.get('/admin/admin_inlines/holder/%i/' % holder.id)
inner_shortcut = 'r/%s/%s/' % (ContentType.objects.get_for_model(inner).pk, inner.pk)
self.assertContains(response, inner_shortcut)
def test_custom_pk_shortcut(self):
"""
Ensure that the "View on Site" link is correct for models with a
custom primary key field. Bug #18433.
"""
parent = ParentModelWithCustomPk.objects.create(my_own_pk="foo", name="Foo")
child1 = ChildModel1.objects.create(my_own_pk="bar", name="Bar", parent=parent)
child2 = ChildModel2.objects.create(my_own_pk="baz", name="Baz", parent=parent)
response = self.client.get('/admin/admin_inlines/parentmodelwithcustompk/foo/')
child1_shortcut = 'r/%s/%s/' % (ContentType.objects.get_for_model(child1).pk, child1.pk)
child2_shortcut = 'r/%s/%s/' % (ContentType.objects.get_for_model(child2).pk, child2.pk)
self.assertContains(response, child1_shortcut)
self.assertContains(response, child2_shortcut)
def test_create_inlines_on_inherited_model(self):
"""
Ensure that an object can be created with inlines when it inherits
another class. Bug #19524.
"""
data = {
'name': 'Martian',
'sighting_set-TOTAL_FORMS': 1,
'sighting_set-INITIAL_FORMS': 0,
'sighting_set-MAX_NUM_FORMS': 0,
'sighting_set-0-place': 'Zone 51',
'_save': 'Save',
}
response = self.client.post('/admin/admin_inlines/extraterrestrial/add/', data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Sighting.objects.filter(et__name='Martian').count(), 1)
def test_custom_get_extra_form(self):
bt_head = BinaryTree.objects.create(name="Tree Head")
BinaryTree.objects.create(name="First Child", parent=bt_head)
# The maximum number of forms should respect 'get_max_num' on the
# ModelAdmin
max_forms_input = '<input id="id_binarytree_set-MAX_NUM_FORMS" name="binarytree_set-MAX_NUM_FORMS" type="hidden" value="%d" />'
# The total number of forms will remain the same in either case
total_forms_hidden = '<input id="id_binarytree_set-TOTAL_FORMS" name="binarytree_set-TOTAL_FORMS" type="hidden" value="2" />'
response = self.client.get('/admin/admin_inlines/binarytree/add/')
self.assertContains(response, max_forms_input % 3)
self.assertContains(response, total_forms_hidden)
response = self.client.get("/admin/admin_inlines/binarytree/%d/" % bt_head.id)
self.assertContains(response, max_forms_input % 2)
self.assertContains(response, total_forms_hidden)
def test_inline_nonauto_noneditable_pk(self):
response = self.client.get('/admin/admin_inlines/author/add/')
self.assertContains(response,
'<input id="id_nonautopkbook_set-0-rand_pk" name="nonautopkbook_set-0-rand_pk" type="hidden" />',
html=True)
self.assertContains(response,
'<input id="id_nonautopkbook_set-2-0-rand_pk" name="nonautopkbook_set-2-0-rand_pk" type="hidden" />',
html=True)
def test_inline_editable_pk(self):
response = self.client.get('/admin/admin_inlines/author/add/')
self.assertContains(response,
'<input class="vIntegerField" id="id_editablepkbook_set-0-manual_pk" name="editablepkbook_set-0-manual_pk" type="text" />',
html=True, count=1)
self.assertContains(response,
'<input class="vIntegerField" id="id_editablepkbook_set-2-0-manual_pk" name="editablepkbook_set-2-0-manual_pk" type="text" />',
html=True, count=1)
def test_stacked_inline_edit_form_contains_has_original_class(self):
holder = Holder.objects.create(dummy=1)
holder.inner_set.create(dummy=1)
response = self.client.get('/admin/admin_inlines/holder/%s/' % holder.pk)
self.assertContains(
response,
'<div class="inline-related has_original" id="inner_set-0">',
count=1
)
self.assertContains(
response,
'<div class="inline-related" id="inner_set-1">',
count=1
)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_inlines.urls")
class TestInlineMedia(TestCase):
fixtures = ['admin-views-users.xml']
def setUp(self):
result = self.client.login(username='super', password='secret')
self.assertEqual(result, True)
def tearDown(self):
self.client.logout()
def test_inline_media_only_base(self):
holder = Holder(dummy=13)
holder.save()
Inner(dummy=42, holder=holder).save()
change_url = '/admin/admin_inlines/holder/%i/' % holder.id
response = self.client.get(change_url)
self.assertContains(response, 'my_awesome_admin_scripts.js')
def test_inline_media_only_inline(self):
holder = Holder3(dummy=13)
holder.save()
Inner3(dummy=42, holder=holder).save()
change_url = '/admin/admin_inlines/holder3/%i/' % holder.id
response = self.client.get(change_url)
self.assertContains(response, 'my_awesome_inline_scripts.js')
def test_all_inline_media(self):
holder = Holder2(dummy=13)
holder.save()
Inner2(dummy=42, holder=holder).save()
change_url = '/admin/admin_inlines/holder2/%i/' % holder.id
response = self.client.get(change_url)
self.assertContains(response, 'my_awesome_admin_scripts.js')
self.assertContains(response, 'my_awesome_inline_scripts.js')
@override_settings(ROOT_URLCONF="admin_inlines.urls")
class TestInlineAdminForm(TestCase):
def test_immutable_content_type(self):
"""Regression for #9362
The problem depends only on InlineAdminForm and its "original"
argument, so we can safely set the other arguments to None/{}. We just
need to check that the content_type argument of Child isn't altered by
the internals of the inline form."""
sally = Teacher.objects.create(name='Sally')
john = Parent.objects.create(name='John')
joe = Child.objects.create(name='Joe', teacher=sally, parent=john)
iaf = InlineAdminForm(None, None, {}, {}, joe)
parent_ct = ContentType.objects.get_for_model(Parent)
self.assertEqual(iaf.original.content_type, parent_ct)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_inlines.urls")
class TestInlineProtectedOnDelete(TestCase):
fixtures = ['admin-views-users.xml']
def setUp(self):
result = self.client.login(username='super', password='secret')
self.assertEqual(result, True)
def tearDown(self):
self.client.logout()
def test_deleting_inline_with_protected_delete_does_not_validate(self):
lotr = Novel.objects.create(name='Lord of the rings')
chapter = Chapter.objects.create(novel=lotr, name='Many Meetings')
foot_note = FootNote.objects.create(chapter=chapter, note='yadda yadda')
change_url = '/admin/admin_inlines/novel/%i/' % lotr.id
response = self.client.get(change_url)
data = {
'name': lotr.name,
'chapter_set-TOTAL_FORMS': 1,
'chapter_set-INITIAL_FORMS': 1,
'chapter_set-MAX_NUM_FORMS': 1000,
'_save': 'Save',
'chapter_set-0-id': chapter.id,
'chapter_set-0-name': chapter.name,
'chapter_set-0-novel': lotr.id,
'chapter_set-0-DELETE': 'on'
}
response = self.client.post(change_url, data)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Deleting chapter %s would require deleting "
"the following protected related objects: foot note %s"
% (chapter, foot_note))
@override_settings(ROOT_URLCONF="admin_inlines.urls")
class TestInlinePermissions(TestCase):
"""
Make sure the admin respects permissions for objects that are edited
inline. Refs #8060.
"""
def setUp(self):
self.user = User(username='admin')
self.user.is_staff = True
self.user.is_active = True
self.user.set_password('secret')
self.user.save()
self.author_ct = ContentType.objects.get_for_model(Author)
self.holder_ct = ContentType.objects.get_for_model(Holder2)
self.book_ct = ContentType.objects.get_for_model(Book)
self.inner_ct = ContentType.objects.get_for_model(Inner2)
# User always has permissions to add and change Authors, and Holders,
# the main (parent) models of the inlines. Permissions on the inlines
# vary per test.
permission = Permission.objects.get(codename='add_author', content_type=self.author_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_author', content_type=self.author_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='add_holder2', content_type=self.holder_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_holder2', content_type=self.holder_ct)
self.user.user_permissions.add(permission)
author = Author.objects.create(pk=1, name='The Author')
book = author.books.create(name='The inline Book')
self.author_change_url = '/admin/admin_inlines/author/%i/' % author.id
# Get the ID of the automatically created intermediate model for thw Author-Book m2m
author_book_auto_m2m_intermediate = Author.books.through.objects.get(author=author, book=book)
self.author_book_auto_m2m_intermediate_id = author_book_auto_m2m_intermediate.pk
holder = Holder2.objects.create(dummy=13)
inner2 = Inner2.objects.create(dummy=42, holder=holder)
self.holder_change_url = '/admin/admin_inlines/holder2/%i/' % holder.id
self.inner2_id = inner2.id
self.assertEqual(
self.client.login(username='admin', password='secret'),
True)
def tearDown(self):
self.client.logout()
def test_inline_add_m2m_noperm(self):
response = self.client.get('/admin/admin_inlines/author/add/')
# No change permission on books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_add_fk_noperm(self):
response = self.client.get('/admin/admin_inlines/holder2/add/')
# No permissions on Inner2s, so no inline
self.assertNotContains(response, '<h2>Inner2s</h2>')
self.assertNotContains(response, 'Add another Inner2')
self.assertNotContains(response, 'id="id_inner2_set-TOTAL_FORMS"')
def test_inline_change_m2m_noperm(self):
response = self.client.get(self.author_change_url)
# No change permission on books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_change_fk_noperm(self):
response = self.client.get(self.holder_change_url)
# No permissions on Inner2s, so no inline
self.assertNotContains(response, '<h2>Inner2s</h2>')
self.assertNotContains(response, 'Add another Inner2')
self.assertNotContains(response, 'id="id_inner2_set-TOTAL_FORMS"')
def test_inline_add_m2m_add_perm(self):
permission = Permission.objects.get(codename='add_book', content_type=self.book_ct)
self.user.user_permissions.add(permission)
response = self.client.get('/admin/admin_inlines/author/add/')
# No change permission on Books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_add_fk_add_perm(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get('/admin/admin_inlines/holder2/add/')
# Add permission on inner2s, so we get the inline
self.assertContains(response, '<h2>Inner2s</h2>')
self.assertContains(response, 'Add another Inner2')
self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" '
'value="3" name="inner2_set-TOTAL_FORMS" />', html=True)
def test_inline_change_m2m_add_perm(self):
permission = Permission.objects.get(codename='add_book', content_type=self.book_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.author_change_url)
# No change permission on books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
self.assertNotContains(response, 'id="id_Author_books-0-DELETE"')
def test_inline_change_m2m_change_perm(self):
permission = Permission.objects.get(codename='change_book', content_type=self.book_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.author_change_url)
# We have change perm on books, so we can add/change/delete inlines
self.assertContains(response, '<h2>Author-book relationships</h2>')
self.assertContains(response, 'Add another Author-book relationship')
self.assertContains(response, '<input type="hidden" id="id_Author_books-TOTAL_FORMS" '
'value="4" name="Author_books-TOTAL_FORMS" />', html=True)
self.assertContains(response, '<input type="hidden" id="id_Author_books-0-id" '
'value="%i" name="Author_books-0-id" />' % self.author_book_auto_m2m_intermediate_id, html=True)
self.assertContains(response, 'id="id_Author_books-0-DELETE"')
def test_inline_change_fk_add_perm(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Add permission on inner2s, so we can add but not modify existing
self.assertContains(response, '<h2>Inner2s</h2>')
self.assertContains(response, 'Add another Inner2')
# 3 extra forms only, not the existing instance form
self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" '
'value="3" name="inner2_set-TOTAL_FORMS" />', html=True)
self.assertNotContains(response, '<input type="hidden" id="id_inner2_set-0-id" '
'value="%i" name="inner2_set-0-id" />' % self.inner2_id, html=True)
def test_inline_change_fk_change_perm(self):
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Change permission on inner2s, so we can change existing but not add new
self.assertContains(response, '<h2>Inner2s</h2>')
# Just the one form for existing instances
self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" '
'value="1" name="inner2_set-TOTAL_FORMS" />', html=True)
self.assertContains(response, '<input type="hidden" id="id_inner2_set-0-id" '
'value="%i" name="inner2_set-0-id" />' % self.inner2_id, html=True)
# max-num 0 means we can't add new ones
self.assertContains(response, '<input type="hidden" id="id_inner2_set-MAX_NUM_FORMS" '
'value="0" name="inner2_set-MAX_NUM_FORMS" />', html=True)
def test_inline_change_fk_add_change_perm(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Add/change perm, so we can add new and change existing
self.assertContains(response, '<h2>Inner2s</h2>')
# One form for existing instance and three extra for new
self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" '
'value="4" name="inner2_set-TOTAL_FORMS" />', html=True)
self.assertContains(response, '<input type="hidden" id="id_inner2_set-0-id" '
'value="%i" name="inner2_set-0-id" />' % self.inner2_id, html=True)
def test_inline_change_fk_change_del_perm(self):
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='delete_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Change/delete perm on inner2s, so we can change/delete existing
self.assertContains(response, '<h2>Inner2s</h2>')
# One form for existing instance only, no new
self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" '
'value="1" name="inner2_set-TOTAL_FORMS" />', html=True)
self.assertContains(response, '<input type="hidden" id="id_inner2_set-0-id" '
'value="%i" name="inner2_set-0-id" />' % self.inner2_id, html=True)
self.assertContains(response, 'id="id_inner2_set-0-DELETE"')
def test_inline_change_fk_all_perms(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='delete_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# All perms on inner2s, so we can add/change/delete
self.assertContains(response, '<h2>Inner2s</h2>')
# One form for existing instance only, three for new
self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" '
'value="4" name="inner2_set-TOTAL_FORMS" />', html=True)
self.assertContains(response, '<input type="hidden" id="id_inner2_set-0-id" '
'value="%i" name="inner2_set-0-id" />' % self.inner2_id, html=True)
self.assertContains(response, 'id="id_inner2_set-0-DELETE"')
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_inlines.urls")
class SeleniumFirefoxTests(AdminSeleniumWebDriverTestCase):
available_apps = ['admin_inlines'] + AdminSeleniumWebDriverTestCase.available_apps
fixtures = ['admin-views-users.xml']
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
def test_add_stackeds(self):
"""
Ensure that the "Add another XXX" link correctly adds items to the
stacked formset.
"""
self.admin_login(username='super', password='secret')
self.selenium.get('%s%s' % (self.live_server_url,
'/admin/admin_inlines/holder4/add/'))
inline_id = '#inner4stacked_set-group'
rows_length = lambda: len(self.selenium.find_elements_by_css_selector(
'%s .dynamic-inner4stacked_set' % inline_id))
self.assertEqual(rows_length(), 3)
add_button = self.selenium.find_element_by_link_text(
'Add another Inner4 stacked')
add_button.click()
self.assertEqual(rows_length(), 4)
def test_delete_stackeds(self):
self.admin_login(username='super', password='secret')
self.selenium.get('%s%s' % (self.live_server_url,
'/admin/admin_inlines/holder4/add/'))
inline_id = '#inner4stacked_set-group'
rows_length = lambda: len(self.selenium.find_elements_by_css_selector(
'%s .dynamic-inner4stacked_set' % inline_id))
self.assertEqual(rows_length(), 3)
add_button = self.selenium.find_element_by_link_text(
'Add another Inner4 stacked')
add_button.click()
add_button.click()
self.assertEqual(rows_length(), 5, msg="sanity check")
for delete_link in self.selenium.find_elements_by_css_selector(
'%s .inline-deletelink' % inline_id):
delete_link.click()
self.assertEqual(rows_length(), 3)
def test_add_inlines(self):
"""
Ensure that the "Add another XXX" link correctly adds items to the
inline form.
"""
self.admin_login(username='super', password='secret')
self.selenium.get('%s%s' % (self.live_server_url,
'/admin/admin_inlines/profilecollection/add/'))
# Check that there's only one inline to start with and that it has the
# correct ID.
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')), 1)
self.assertEqual(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')[0].get_attribute('id'),
'profile_set-0')
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-0 input[name=profile_set-0-first_name]')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-0 input[name=profile_set-0-last_name]')), 1)
# Add an inline
self.selenium.find_element_by_link_text('Add another Profile').click()
# Check that the inline has been added, that it has the right id, and
# that it contains the right fields.
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')), 2)
self.assertEqual(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')[1].get_attribute('id'), 'profile_set-1')
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-1 input[name=profile_set-1-first_name]')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-1 input[name=profile_set-1-last_name]')), 1)
# Let's add another one to be sure
self.selenium.find_element_by_link_text('Add another Profile').click()
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')), 3)
self.assertEqual(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')[2].get_attribute('id'), 'profile_set-2')
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-2 input[name=profile_set-2-first_name]')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-2 input[name=profile_set-2-last_name]')), 1)
# Enter some data and click 'Save'
self.selenium.find_element_by_name('profile_set-0-first_name').send_keys('0 first name 1')
self.selenium.find_element_by_name('profile_set-0-last_name').send_keys('0 last name 2')
self.selenium.find_element_by_name('profile_set-1-first_name').send_keys('1 first name 1')
self.selenium.find_element_by_name('profile_set-1-last_name').send_keys('1 last name 2')
self.selenium.find_element_by_name('profile_set-2-first_name').send_keys('2 first name 1')
self.selenium.find_element_by_name('profile_set-2-last_name').send_keys('2 last name 2')
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
self.wait_page_loaded()
# Check that the objects have been created in the database
self.assertEqual(ProfileCollection.objects.all().count(), 1)
self.assertEqual(Profile.objects.all().count(), 3)
def test_delete_inlines(self):
self.admin_login(username='super', password='secret')
self.selenium.get('%s%s' % (self.live_server_url,
'/admin/admin_inlines/profilecollection/add/'))
# Add a few inlines
self.selenium.find_element_by_link_text('Add another Profile').click()
self.selenium.find_element_by_link_text('Add another Profile').click()
self.selenium.find_element_by_link_text('Add another Profile').click()
self.selenium.find_element_by_link_text('Add another Profile').click()
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'#profile_set-group table tr.dynamic-profile_set')), 5)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-0')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-1')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-2')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-3')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-4')), 1)
# Click on a few delete buttons
self.selenium.find_element_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-1 td.delete a').click()
self.selenium.find_element_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-2 td.delete a').click()
# Verify that they're gone and that the IDs have been re-sequenced
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'#profile_set-group table tr.dynamic-profile_set')), 3)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-0')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-1')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-2')), 1)
def test_alternating_rows(self):
self.admin_login(username='super', password='secret')
self.selenium.get('%s%s' % (self.live_server_url,
'/admin/admin_inlines/profilecollection/add/'))
# Add a few inlines
self.selenium.find_element_by_link_text('Add another Profile').click()
self.selenium.find_element_by_link_text('Add another Profile').click()
row_selector = 'form#profilecollection_form tr.dynamic-profile_set'
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
"%s.row1" % row_selector)), 2, msg="Expect two row1 styled rows")
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
"%s.row2" % row_selector)), 1, msg="Expect one row2 styled row")
class SeleniumChromeTests(SeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class SeleniumIETests(SeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
|
jdemon519/cfme_tests
|
refs/heads/master
|
cfme/tests/openstack/infrastructure/test_host_power_control.py
|
1
|
import pytest
from navmazing import NavigationDestinationNotFound
from cfme.infrastructure.host import Host
from cfme.infrastructure.provider.openstack_infra import OpenstackInfraProvider
from cfme.web_ui import Quadicon
from utils import testgen
from utils.appliance.implementations.ui import navigate_to
from utils.version import current_version
pytest_generate_tests = testgen.generate([OpenstackInfraProvider],
scope='module')
pytestmark = [pytest.mark.uncollectif(lambda: current_version() < '5.7')]
@pytest.mark.usefixtures("setup_provider_modscope")
@pytest.fixture(scope='module')
def host_on(provider):
try:
navigate_to(provider, 'ProviderNodes')
except NavigationDestinationNotFound:
assert "Missing nodes in provider's details"
my_quads = list(Quadicon.all())
quad = my_quads[0]
my_host_on = Host(name=quad.name)
if my_host_on.get_power_state() == 'off':
my_host_on.power_on()
my_host_on.wait_for_host_state_change('on', 1000)
return my_host_on
@pytest.mark.usefixtures("setup_provider_modscope")
@pytest.fixture(scope='module')
def host_off(provider):
try:
navigate_to(provider, 'ProviderNodes')
except NavigationDestinationNotFound:
assert "Missing nodes in provider's details"
my_quads = list(Quadicon.all())
quad = my_quads[0]
my_host_off = Host(name=quad.name)
if my_host_off.get_power_state() == 'on':
my_host_off.power_off()
my_host_off.wait_for_host_state_change('off', 1000)
return my_host_off
def test_host_power_off(host_on):
host_on.power_off()
host_on.refresh()
result = host_on.wait_for_host_state_change('off', 1000)
assert result
def test_host_power_on(host_off):
host_off.power_on()
host_off.refresh()
result = host_off.wait_for_host_state_change('on', 1000)
assert result
|
gmr/httpbl
|
refs/heads/master
|
httpbl.py
|
1
|
"""
Project Honeypot Http:BL API Client
Example:
.. code:: python
import httpbl
ip_address = '127.10.20.5'
print 'Querying {}'.format(ip_address)
bl = httpbl.HttpBL('my-key')
print(bl.query(ip_address))
"""
import socket
__version__ = '1.0.1'
DNSBL_SUFFIX = 'dnsbl.httpbl.org.'
# Visitor Types
SEARCH_ENGINE = 0
SUSPICIOUS = 1
HARVESTER = 2
COMMENT_SPAMMER = 4
# List of Search Engines, used to return the name of the search engine
SEARCH_ENGINES = ['Undocumented',
'AltaVista',
'Ask',
'Baidu',
'Excite',
'Google',
'Looksmart',
'Lycos',
'MSN',
'Yahoo',
'Cuil',
'InfoSeek',
'Miscellaneous']
# Text mappings for visitor types
DESCRIPTIONS = {COMMENT_SPAMMER: 'Comment Spammer',
HARVESTER: 'Harvester',
SEARCH_ENGINE: 'Search Engine',
SUSPICIOUS: 'Suspicious'}
class HttpBL(object):
"""Query the the Project Honeypot Http:BL API"""
def __init__(self, key):
"""Initialize the HttpBL object with your Project Honeypot Key
:param key: Project Honeypot Http:BL Key
:type key: str
"""
self.key = key
def query(self, ip_address):
"""Query the Project Honeypot Http:BL API for the given IP address
:param ip_address: IP address to query
:type ip_address: str
:rtype: dict
"""
try:
return self._decode_response(
socket.gethostbyname(self._build_query(ip_address)))
except socket.gaierror: # Not listed
return {
'days_since_last_activity': None,
'name': None,
'threat_score': 0,
'type': None
}
def _build_query(self, ip_address):
"""Returns the Http:BL query string to use
:param ip_address: IP address to query
:type ip_address: str
:returns: str
"""
return '{}.{}.{}'.format(
self.key, self._reverse_ip(ip_address), DNSBL_SUFFIX)
def _reverse_ip(self, ip_address):
"""Take an IP address in 127.0.0.1 format and return it as 1.0.0.127
:param ip_address: IP address to query
:type ip_address: str
:returns: str
"""
return '.'.join(ip_address.split('.')[::-1])
def _decode_response(self, ip_address):
"""Decodes a HttpBL response IP and return data structure of response
data.
:param ip_address: IP address to query
:type ip_address: str
:rtype: dict
:raises: ValueError
"""
# Reverse the IP, reassign the octets to integers
vt, ts, days, rc = [int(o) for o in ip_address.split('.')[::-1]]
# 127 reflects a valid query response, all others are errors
if rc != 127:
raise ValueError('Invalid Response Code: {}'.format(rc))
# Build a list of visitor types since one IP can be multiple
visitor_types = []
if vt & COMMENT_SPAMMER:
visitor_types.append(COMMENT_SPAMMER)
if vt & HARVESTER:
visitor_types.append(HARVESTER)
if vt & SUSPICIOUS:
visitor_types.append(SUSPICIOUS)
name = None
if not vt:
try:
name = SEARCH_ENGINES[ts]
except IndexError:
name = SEARCH_ENGINES[0]
# Return the response dictionary
return {'days_since_last_activity': days if vt else None,
'name': name,
'threat_score': ts if vt else None,
'type': visitor_types if vt else [SEARCH_ENGINE]}
|
kazemakase/scikit-learn
|
refs/heads/master
|
sklearn/tests/test_kernel_approximation.py
|
244
|
import numpy as np
from scipy.sparse import csr_matrix
from sklearn.utils.testing import assert_array_equal, assert_equal, assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal, assert_raises
from sklearn.utils.testing import assert_less_equal
from sklearn.metrics.pairwise import kernel_metrics
from sklearn.kernel_approximation import RBFSampler
from sklearn.kernel_approximation import AdditiveChi2Sampler
from sklearn.kernel_approximation import SkewedChi2Sampler
from sklearn.kernel_approximation import Nystroem
from sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel
# generate data
rng = np.random.RandomState(0)
X = rng.random_sample(size=(300, 50))
Y = rng.random_sample(size=(300, 50))
X /= X.sum(axis=1)[:, np.newaxis]
Y /= Y.sum(axis=1)[:, np.newaxis]
def test_additive_chi2_sampler():
# test that AdditiveChi2Sampler approximates kernel on random data
# compute exact kernel
# appreviations for easier formular
X_ = X[:, np.newaxis, :]
Y_ = Y[np.newaxis, :, :]
large_kernel = 2 * X_ * Y_ / (X_ + Y_)
# reduce to n_samples_x x n_samples_y by summing over features
kernel = (large_kernel.sum(axis=2))
# approximate kernel mapping
transform = AdditiveChi2Sampler(sample_steps=3)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
X_sp_trans = transform.fit_transform(csr_matrix(X))
Y_sp_trans = transform.transform(csr_matrix(Y))
assert_array_equal(X_trans, X_sp_trans.A)
assert_array_equal(Y_trans, Y_sp_trans.A)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
# test error on invalid sample_steps
transform = AdditiveChi2Sampler(sample_steps=4)
assert_raises(ValueError, transform.fit, X)
# test that the sample interval is set correctly
sample_steps_available = [1, 2, 3]
for sample_steps in sample_steps_available:
# test that the sample_interval is initialized correctly
transform = AdditiveChi2Sampler(sample_steps=sample_steps)
assert_equal(transform.sample_interval, None)
# test that the sample_interval is changed in the fit method
transform.fit(X)
assert_not_equal(transform.sample_interval_, None)
# test that the sample_interval is set correctly
sample_interval = 0.3
transform = AdditiveChi2Sampler(sample_steps=4,
sample_interval=sample_interval)
assert_equal(transform.sample_interval, sample_interval)
transform.fit(X)
assert_equal(transform.sample_interval_, sample_interval)
def test_skewed_chi2_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
c = 0.03
# appreviations for easier formular
X_c = (X + c)[:, np.newaxis, :]
Y_c = (Y + c)[np.newaxis, :, :]
# we do it in log-space in the hope that it's more stable
# this array is n_samples_x x n_samples_y big x n_features
log_kernel = ((np.log(X_c) / 2.) + (np.log(Y_c) / 2.) + np.log(2.) -
np.log(X_c + Y_c))
# reduce to n_samples_x x n_samples_y by summing over features in log-space
kernel = np.exp(log_kernel.sum(axis=2))
# approximate kernel mapping
transform = SkewedChi2Sampler(skewedness=c, n_components=1000,
random_state=42)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
def test_rbf_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
gamma = 10.
kernel = rbf_kernel(X, Y, gamma=gamma)
# approximate kernel mapping
rbf_transform = RBFSampler(gamma=gamma, n_components=1000, random_state=42)
X_trans = rbf_transform.fit_transform(X)
Y_trans = rbf_transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
error = kernel - kernel_approx
assert_less_equal(np.abs(np.mean(error)), 0.01) # close to unbiased
np.abs(error, out=error)
assert_less_equal(np.max(error), 0.1) # nothing too far off
assert_less_equal(np.mean(error), 0.05) # mean is fairly close
def test_input_validation():
# Regression test: kernel approx. transformers should work on lists
# No assertions; the old versions would simply crash
X = [[1, 2], [3, 4], [5, 6]]
AdditiveChi2Sampler().fit(X).transform(X)
SkewedChi2Sampler().fit(X).transform(X)
RBFSampler().fit(X).transform(X)
X = csr_matrix(X)
RBFSampler().fit(X).transform(X)
def test_nystroem_approximation():
# some basic tests
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 4))
# With n_components = n_samples this is exact
X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X)
K = rbf_kernel(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
trans = Nystroem(n_components=2, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test callable kernel
linear_kernel = lambda X, Y: np.dot(X, Y.T)
trans = Nystroem(n_components=2, kernel=linear_kernel, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test that available kernels fit and transform
kernels_available = kernel_metrics()
for kern in kernels_available:
trans = Nystroem(n_components=2, kernel=kern, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
def test_nystroem_singular_kernel():
# test that nystroem works with singular kernel matrix
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
X = np.vstack([X] * 2) # duplicate samples
gamma = 100
N = Nystroem(gamma=gamma, n_components=X.shape[0]).fit(X)
X_transformed = N.transform(X)
K = rbf_kernel(X, gamma=gamma)
assert_array_almost_equal(K, np.dot(X_transformed, X_transformed.T))
assert_true(np.all(np.isfinite(Y)))
def test_nystroem_poly_kernel_params():
# Non-regression: Nystroem should pass other parameters beside gamma.
rnd = np.random.RandomState(37)
X = rnd.uniform(size=(10, 4))
K = polynomial_kernel(X, degree=3.1, coef0=.1)
nystroem = Nystroem(kernel="polynomial", n_components=X.shape[0],
degree=3.1, coef0=.1)
X_transformed = nystroem.fit_transform(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
def test_nystroem_callable():
# Test Nystroem on a callable.
rnd = np.random.RandomState(42)
n_samples = 10
X = rnd.uniform(size=(n_samples, 4))
def logging_histogram_kernel(x, y, log):
"""Histogram kernel that writes to a log."""
log.append(1)
return np.minimum(x, y).sum()
kernel_log = []
X = list(X) # test input validation
Nystroem(kernel=logging_histogram_kernel,
n_components=(n_samples - 1),
kernel_params={'log': kernel_log}).fit(X)
assert_equal(len(kernel_log), n_samples * (n_samples - 1) / 2)
|
ChinaMassClouds/copenstack-server
|
refs/heads/master
|
openstack/src/horizon-2014.2/openstack_dashboard/dashboards/project/data_processing/data_image_registry/tables.py
|
10
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django import template
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import tables
from openstack_dashboard.api import sahara as saharaclient
LOG = logging.getLogger(__name__)
class EditTagsAction(tables.LinkAction):
name = "edit_tags"
verbose_name = _("Edit Tags")
url = "horizon:project:data_processing.data_image_registry:edit_tags"
classes = ("ajax-modal",)
def tags_to_string(image):
template_name = (
'project/data_processing.data_image_registry/_list_tags.html')
context = {"image": image}
return template.loader.render_to_string(template_name, context)
class RegisterImage(tables.LinkAction):
name = "register"
verbose_name = _("Register Image")
url = "horizon:project:data_processing.data_image_registry:register"
classes = ("ajax-modal",)
icon = "plus"
class UnregisterImages(tables.BatchAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Unregister Image",
u"Unregister Images",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Unregistered Image",
u"Unregistered Images",
count
)
name = "Unregister"
classes = ('btn-danger', 'btn-terminate')
def action(self, request, obj_id):
saharaclient.image_unregister(request, obj_id)
class ImageRegistryTable(tables.DataTable):
name = tables.Column("name",
verbose_name=_("Image"),
link=("horizon:project:"
"images:images:detail"))
tags = tables.Column(tags_to_string,
verbose_name=_("Tags"))
class Meta:
name = "image_registry"
verbose_name = _("Image Registry")
table_actions = (RegisterImage, UnregisterImages,)
row_actions = (EditTagsAction, UnregisterImages,)
|
mic4ael/indico
|
refs/heads/master
|
indico/migrations/versions/20180126_1130_093533d27a96_add_visibility_options_to_contribution.py
|
7
|
"""Add visibility options to contribution fields
Revision ID: 093533d27a96
Revises: 9c4418d7a6aa
Create Date: 2017-11-30 17:15:07.141552
"""
import sqlalchemy as sa
from alembic import op
from indico.core.db.sqlalchemy import PyIntEnum
from indico.modules.events.contributions.models.fields import ContributionFieldVisibility
# revision identifiers, used by Alembic.
revision = '093533d27a96'
down_revision = '9c4418d7a6aa'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('contribution_fields',
sa.Column('is_user_editable', sa.Boolean(), nullable=False, server_default='true'),
schema='events')
op.add_column('contribution_fields',
sa.Column('visibility', PyIntEnum(ContributionFieldVisibility),
nullable=False, server_default='1'),
schema='events')
op.alter_column('contribution_fields', 'is_user_editable', server_default=None, schema='events')
op.alter_column('contribution_fields', 'visibility', server_default=None, schema='events')
def downgrade():
op.drop_column('contribution_fields', 'visibility', schema='events')
op.drop_column('contribution_fields', 'is_user_editable', schema='events')
|
onestarshang/flask_super_config
|
refs/heads/master
|
venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/sanitizer.py
|
1734
|
from __future__ import absolute_import, division, unicode_literals
from . import _base
from ..sanitizer import HTMLSanitizerMixin
class Filter(_base.Filter, HTMLSanitizerMixin):
def __iter__(self):
for token in _base.Filter.__iter__(self):
token = self.sanitize_token(token)
if token:
yield token
|
Azure/azure-sdk-for-python
|
refs/heads/sync-eng/common-js-nightly-docs-2-1768-ForTestPipeline
|
sdk/hdinsight/azure-mgmt-hdinsight/azure/mgmt/hdinsight/_version.py
|
6
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
VERSION = "8.0.0"
|
Dunkas12/BeepBoopBot
|
refs/heads/master
|
lib/requests/packages/chardet/chardistribution.py
|
2754
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .euctwfreq import (EUCTWCharToFreqOrder, EUCTW_TABLE_SIZE,
EUCTW_TYPICAL_DISTRIBUTION_RATIO)
from .euckrfreq import (EUCKRCharToFreqOrder, EUCKR_TABLE_SIZE,
EUCKR_TYPICAL_DISTRIBUTION_RATIO)
from .gb2312freq import (GB2312CharToFreqOrder, GB2312_TABLE_SIZE,
GB2312_TYPICAL_DISTRIBUTION_RATIO)
from .big5freq import (Big5CharToFreqOrder, BIG5_TABLE_SIZE,
BIG5_TYPICAL_DISTRIBUTION_RATIO)
from .jisfreq import (JISCharToFreqOrder, JIS_TABLE_SIZE,
JIS_TYPICAL_DISTRIBUTION_RATIO)
from .compat import wrap_ord
ENOUGH_DATA_THRESHOLD = 1024
SURE_YES = 0.99
SURE_NO = 0.01
MINIMUM_DATA_THRESHOLD = 3
class CharDistributionAnalysis:
def __init__(self):
# Mapping table to get frequency order from char order (get from
# GetOrder())
self._mCharToFreqOrder = None
self._mTableSize = None # Size of above table
# This is a constant value which varies from language to language,
# used in calculating confidence. See
# http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html
# for further detail.
self._mTypicalDistributionRatio = None
self.reset()
def reset(self):
"""reset analyser, clear any state"""
# If this flag is set to True, detection is done and conclusion has
# been made
self._mDone = False
self._mTotalChars = 0 # Total characters encountered
# The number of characters whose frequency order is less than 512
self._mFreqChars = 0
def feed(self, aBuf, aCharLen):
"""feed a character with known length"""
if aCharLen == 2:
# we only care about 2-bytes character in our distribution analysis
order = self.get_order(aBuf)
else:
order = -1
if order >= 0:
self._mTotalChars += 1
# order is valid
if order < self._mTableSize:
if 512 > self._mCharToFreqOrder[order]:
self._mFreqChars += 1
def get_confidence(self):
"""return confidence based on existing data"""
# if we didn't receive any character in our consideration range,
# return negative answer
if self._mTotalChars <= 0 or self._mFreqChars <= MINIMUM_DATA_THRESHOLD:
return SURE_NO
if self._mTotalChars != self._mFreqChars:
r = (self._mFreqChars / ((self._mTotalChars - self._mFreqChars)
* self._mTypicalDistributionRatio))
if r < SURE_YES:
return r
# normalize confidence (we don't want to be 100% sure)
return SURE_YES
def got_enough_data(self):
# It is not necessary to receive all data to draw conclusion.
# For charset detection, certain amount of data is enough
return self._mTotalChars > ENOUGH_DATA_THRESHOLD
def get_order(self, aBuf):
# We do not handle characters based on the original encoding string,
# but convert this encoding string to a number, here called order.
# This allows multiple encodings of a language to share one frequency
# table.
return -1
class EUCTWDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCTWCharToFreqOrder
self._mTableSize = EUCTW_TABLE_SIZE
self._mTypicalDistributionRatio = EUCTW_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-TW encoding, we are interested
# first byte range: 0xc4 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xC4:
return 94 * (first_char - 0xC4) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class EUCKRDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCKRCharToFreqOrder
self._mTableSize = EUCKR_TABLE_SIZE
self._mTypicalDistributionRatio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-KR encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xB0:
return 94 * (first_char - 0xB0) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class GB2312DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = GB2312CharToFreqOrder
self._mTableSize = GB2312_TABLE_SIZE
self._mTypicalDistributionRatio = GB2312_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for GB2312 encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0xB0) and (second_char >= 0xA1):
return 94 * (first_char - 0xB0) + second_char - 0xA1
else:
return -1
class Big5DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = Big5CharToFreqOrder
self._mTableSize = BIG5_TABLE_SIZE
self._mTypicalDistributionRatio = BIG5_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for big5 encoding, we are interested
# first byte range: 0xa4 -- 0xfe
# second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if first_char >= 0xA4:
if second_char >= 0xA1:
return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63
else:
return 157 * (first_char - 0xA4) + second_char - 0x40
else:
return -1
class SJISDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for sjis encoding, we are interested
# first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe
# second byte range: 0x40 -- 0x7e, 0x81 -- oxfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0x81) and (first_char <= 0x9F):
order = 188 * (first_char - 0x81)
elif (first_char >= 0xE0) and (first_char <= 0xEF):
order = 188 * (first_char - 0xE0 + 31)
else:
return -1
order = order + second_char - 0x40
if second_char > 0x7F:
order = -1
return order
class EUCJPDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-JP encoding, we are interested
# first byte range: 0xa0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
char = wrap_ord(aBuf[0])
if char >= 0xA0:
return 94 * (char - 0xA1) + wrap_ord(aBuf[1]) - 0xa1
else:
return -1
|
infowantstobeseen/pyglet-darwincore
|
refs/heads/master
|
tests/font/ALIGN_MULTILINE.py
|
19
|
#!/usr/bin/env python
'''Test that font.Text alignment works with multiple lines.
Three labels will be rendered at the top-left, center and bottom-right of the
window. Resize the window to ensure the alignment is as specified.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import unittest
from pyglet import font
from . import base_text
class TEST_ALIGN_MULTILINE(base_text.TextTestBase):
font_name = ''
window_size = 400, 500
def render(self):
fnt = font.load(self.font_name, self.font_size)
w = self.window.width
h = self.window.height
self.labels = [
font.Text(fnt,
'This text is top-left aligned with several lines.',
0, h, width=w,
halign='left', valign='top'),
font.Text(fnt,
'This text is centered in the middle.',
0, h//2, width=w,
halign='center', valign='center'),
font.Text(fnt,
'This text is aligned to the bottom-right of the window.',
0, 0, width=w,
halign='right', valign='bottom'),
]
def on_resize(self, width, height):
for label in self.labels:
label.width = width
self.labels[0].y = height
self.labels[1].y = height // 2
def draw(self):
for label in self.labels:
label.draw()
if __name__ == '__main__':
unittest.main()
|
shikil/sympy
|
refs/heads/master
|
sympy/core/tests/test_equal.py
|
111
|
from sympy import Symbol, Dummy, Rational, exp
def test_equal():
b = Symbol("b")
a = Symbol("a")
e1 = a + b
e2 = 2*a*b
e3 = a**3*b**2
e4 = a*b + b*a
assert not e1 == e2
assert not e1 == e2
assert e1 != e2
assert e2 == e4
assert e2 != e3
assert not e2 == e3
x = Symbol("x")
e1 = exp(x + 1/x)
y = Symbol("x")
e2 = exp(y + 1/y)
assert e1 == e2
assert not e1 != e2
y = Symbol("y")
e2 = exp(y + 1/y)
assert not e1 == e2
assert e1 != e2
e5 = Rational(3) + 2*x - x - x
assert e5 == 3
assert 3 == e5
assert e5 != 4
assert 4 != e5
assert e5 != 3 + x
assert 3 + x != e5
def test_expevalbug():
x = Symbol("x")
e1 = exp(1*x)
e3 = exp(x)
assert e1 == e3
def test_cmp_bug1():
class T(object):
pass
t = T()
x = Symbol("x")
assert not (x == t)
assert (x != t)
def test_cmp_bug2():
class T(object):
pass
t = T()
assert not (Symbol == t)
assert (Symbol != t)
def test_cmp_issue_4357():
""" Check that Basic subclasses can be compared with sympifiable objects.
https://github.com/sympy/sympy/issues/4357
"""
assert not (Symbol == 1)
assert (Symbol != 1)
assert not (Symbol == 'x')
assert (Symbol != 'x')
def test_dummy_eq():
x = Symbol('x')
y = Symbol('y')
u = Dummy('u')
assert (u**2 + 1).dummy_eq(x**2 + 1) is True
assert ((u**2 + 1) == (x**2 + 1)) is False
assert (u**2 + y).dummy_eq(x**2 + y, x) is True
assert (u**2 + y).dummy_eq(x**2 + y, y) is False
|
JasonBristol/spor-ct
|
refs/heads/master
|
spor/investigations/__init__.py
|
12133432
| |
Mixser/django
|
refs/heads/master
|
tests/migrations/test_migrations_first/__init__.py
|
12133432
| |
wong2/sentry
|
refs/heads/master
|
tests/sentry/interfaces/test_base.py
|
12133432
| |
FokkeZB/titanium_mobile
|
refs/heads/master
|
support/common/markdown/inlinepatterns.py
|
107
|
"""
INLINE PATTERNS
=============================================================================
Inline patterns such as *emphasis* are handled by means of auxiliary
objects, one per pattern. Pattern objects must be instances of classes
that extend markdown.Pattern. Each pattern object uses a single regular
expression and needs support the following methods:
pattern.getCompiledRegExp() # returns a regular expression
pattern.handleMatch(m) # takes a match object and returns
# an ElementTree element or just plain text
All of python markdown's built-in patterns subclass from Pattern,
but you can add additional patterns that don't.
Also note that all the regular expressions used by inline must
capture the whole block. For this reason, they all start with
'^(.*)' and end with '(.*)!'. In case with built-in expression
Pattern takes care of adding the "^(.*)" and "(.*)!".
Finally, the order in which regular expressions are applied is very
important - e.g. if we first replace http://.../ links with <a> tags
and _then_ try to replace inline html, we would end up with a mess.
So, we apply the expressions in the following order:
* escape and backticks have to go before everything else, so
that we can preempt any markdown patterns by escaping them.
* then we handle auto-links (must be done before inline html)
* then we handle inline HTML. At this point we will simply
replace all inline HTML strings with a placeholder and add
the actual HTML to a hash.
* then inline images (must be done before links)
* then bracketed links, first regular then reference-style
* finally we apply strong and emphasis
"""
import markdown
import re
from urlparse import urlparse, urlunparse
import sys
if sys.version >= "3.0":
from html import entities as htmlentitydefs
else:
import htmlentitydefs
"""
The actual regular expressions for patterns
-----------------------------------------------------------------------------
"""
NOBRACKET = r'[^\]\[]*'
BRK = ( r'\[('
+ (NOBRACKET + r'(\[')*6
+ (NOBRACKET+ r'\])*')*6
+ NOBRACKET + r')\]' )
NOIMG = r'(?<!\!)'
BACKTICK_RE = r'(?<!\\)(`+)(.+?)(?<!`)\2(?!`)' # `e=f()` or ``e=f("`")``
ESCAPE_RE = r'\\(.)' # \<
EMPHASIS_RE = r'(\*)([^\*]+)\2' # *emphasis*
STRONG_RE = r'(\*{2}|_{2})(.+?)\2' # **strong**
STRONG_EM_RE = r'(\*{3}|_{3})(.+?)\2' # ***strong***
if markdown.SMART_EMPHASIS:
EMPHASIS_2_RE = r'(?<!\w)(_)(\S.+?)\2(?!\w)' # _emphasis_
else:
EMPHASIS_2_RE = r'(_)(.+?)\2' # _emphasis_
LINK_RE = NOIMG + BRK + \
r'''\(\s*(<.*?>|((?:(?:\(.*?\))|[^\(\)]))*?)\s*((['"])(.*?)\12)?\)'''
# [text](url) or [text](<url>)
IMAGE_LINK_RE = r'\!' + BRK + r'\s*\((<.*?>|([^\)]*))\)'
#  or 
REFERENCE_RE = NOIMG + BRK+ r'\s*\[([^\]]*)\]' # [Google][3]
IMAGE_REFERENCE_RE = r'\!' + BRK + '\s*\[([^\]]*)\]' # ![alt text][2]
NOT_STRONG_RE = r'((^| )(\*|_)( |$))' # stand-alone * or _
AUTOLINK_RE = r'<((?:f|ht)tps?://[^>]*)>' # <http://www.123.com>
AUTOMAIL_RE = r'<([^> \!]*@[^> ]*)>' # <me@example.com>
HTML_RE = r'(\<([a-zA-Z/][^\>]*?|\!--.*?--)\>)' # <...>
ENTITY_RE = r'(&[\#a-zA-Z0-9]*;)' # &
LINE_BREAK_RE = r' \n' # two spaces at end of line
LINE_BREAK_2_RE = r' $' # two spaces at end of text
def dequote(string):
"""Remove quotes from around a string."""
if ( ( string.startswith('"') and string.endswith('"'))
or (string.startswith("'") and string.endswith("'")) ):
return string[1:-1]
else:
return string
ATTR_RE = re.compile("\{@([^\}]*)=([^\}]*)}") # {@id=123}
def handleAttributes(text, parent):
"""Set values of an element based on attribute definitions ({@id=123})."""
def attributeCallback(match):
parent.set(match.group(1), match.group(2).replace('\n', ' '))
return ATTR_RE.sub(attributeCallback, text)
"""
The pattern classes
-----------------------------------------------------------------------------
"""
class Pattern:
"""Base class that inline patterns subclass. """
def __init__ (self, pattern, markdown_instance=None):
"""
Create an instant of an inline pattern.
Keyword arguments:
* pattern: A regular expression that matches a pattern
"""
self.pattern = pattern
self.compiled_re = re.compile("^(.*?)%s(.*?)$" % pattern, re.DOTALL)
# Api for Markdown to pass safe_mode into instance
self.safe_mode = False
if markdown_instance:
self.markdown = markdown_instance
def getCompiledRegExp (self):
""" Return a compiled regular expression. """
return self.compiled_re
def handleMatch(self, m):
"""Return a ElementTree element from the given match.
Subclasses should override this method.
Keyword arguments:
* m: A re match object containing a match of the pattern.
"""
pass
def type(self):
""" Return class name, to define pattern type """
return self.__class__.__name__
BasePattern = Pattern # for backward compatibility
class SimpleTextPattern (Pattern):
""" Return a simple text of group(2) of a Pattern. """
def handleMatch(self, m):
text = m.group(2)
if text == markdown.INLINE_PLACEHOLDER_PREFIX:
return None
return text
class SimpleTagPattern (Pattern):
"""
Return element of type `tag` with a text attribute of group(3)
of a Pattern.
"""
def __init__ (self, pattern, tag):
Pattern.__init__(self, pattern)
self.tag = tag
def handleMatch(self, m):
el = markdown.etree.Element(self.tag)
el.text = m.group(3)
return el
class SubstituteTagPattern (SimpleTagPattern):
""" Return a eLement of type `tag` with no children. """
def handleMatch (self, m):
return markdown.etree.Element(self.tag)
class BacktickPattern (Pattern):
""" Return a `<code>` element containing the matching text. """
def __init__ (self, pattern):
Pattern.__init__(self, pattern)
self.tag = "code"
def handleMatch(self, m):
el = markdown.etree.Element(self.tag)
el.text = markdown.AtomicString(m.group(3).strip())
return el
class DoubleTagPattern (SimpleTagPattern):
"""Return a ElementTree element nested in tag2 nested in tag1.
Useful for strong emphasis etc.
"""
def handleMatch(self, m):
tag1, tag2 = self.tag.split(",")
el1 = markdown.etree.Element(tag1)
el2 = markdown.etree.SubElement(el1, tag2)
el2.text = m.group(3)
return el1
class HtmlPattern (Pattern):
""" Store raw inline html and return a placeholder. """
def handleMatch (self, m):
rawhtml = m.group(2)
inline = True
place_holder = self.markdown.htmlStash.store(rawhtml)
return place_holder
class LinkPattern (Pattern):
""" Return a link element from the given match. """
def handleMatch(self, m):
el = markdown.etree.Element("a")
el.text = m.group(2)
title = m.group(11)
href = m.group(9)
if href:
if href[0] == "<":
href = href[1:-1]
el.set("href", self.sanitize_url(href.strip()))
else:
el.set("href", "")
if title:
title = dequote(title) #.replace('"', """)
el.set("title", title)
return el
def sanitize_url(self, url):
"""
Sanitize a url against xss attacks in "safe_mode".
Rather than specifically blacklisting `javascript:alert("XSS")` and all
its aliases (see <http://ha.ckers.org/xss.html>), we whitelist known
safe url formats. Most urls contain a network location, however some
are known not to (i.e.: mailto links). Script urls do not contain a
location. Additionally, for `javascript:...`, the scheme would be
"javascript" but some aliases will appear to `urlparse()` to have no
scheme. On top of that relative links (i.e.: "foo/bar.html") have no
scheme. Therefore we must check "path", "parameters", "query" and
"fragment" for any literal colons. We don't check "scheme" for colons
because it *should* never have any and "netloc" must allow the form:
`username:password@host:port`.
"""
locless_schemes = ['', 'mailto', 'news']
scheme, netloc, path, params, query, fragment = url = urlparse(url)
safe_url = False
if netloc != '' or scheme in locless_schemes:
safe_url = True
for part in url[2:]:
if ":" in part:
safe_url = False
if self.markdown.safeMode and not safe_url:
return ''
else:
return urlunparse(url)
class ImagePattern(LinkPattern):
""" Return a img element from the given match. """
def handleMatch(self, m):
el = markdown.etree.Element("img")
src_parts = m.group(9).split()
if src_parts:
src = src_parts[0]
if src[0] == "<" and src[-1] == ">":
src = src[1:-1]
el.set('src', self.sanitize_url(src))
else:
el.set('src', "")
if len(src_parts) > 1:
el.set('title', dequote(" ".join(src_parts[1:])))
if markdown.ENABLE_ATTRIBUTES:
truealt = handleAttributes(m.group(2), el)
else:
truealt = m.group(2)
el.set('alt', truealt)
return el
class ReferencePattern(LinkPattern):
""" Match to a stored reference and return link element. """
def handleMatch(self, m):
if m.group(9):
id = m.group(9).lower()
else:
# if we got something like "[Google][]"
# we'll use "google" as the id
id = m.group(2).lower()
if not id in self.markdown.references: # ignore undefined refs
return None
href, title = self.markdown.references[id]
text = m.group(2)
return self.makeTag(href, title, text)
def makeTag(self, href, title, text):
el = markdown.etree.Element('a')
el.set('href', self.sanitize_url(href))
if title:
el.set('title', title)
el.text = text
return el
class ImageReferencePattern (ReferencePattern):
""" Match to a stored reference and return img element. """
def makeTag(self, href, title, text):
el = markdown.etree.Element("img")
el.set("src", self.sanitize_url(href))
if title:
el.set("title", title)
el.set("alt", text)
return el
class AutolinkPattern (Pattern):
""" Return a link Element given an autolink (`<http://example/com>`). """
def handleMatch(self, m):
el = markdown.etree.Element("a")
el.set('href', m.group(2))
el.text = markdown.AtomicString(m.group(2))
return el
class AutomailPattern (Pattern):
"""
Return a mailto link Element given an automail link (`<foo@example.com>`).
"""
def handleMatch(self, m):
el = markdown.etree.Element('a')
email = m.group(2)
if email.startswith("mailto:"):
email = email[len("mailto:"):]
def codepoint2name(code):
"""Return entity definition by code, or the code if not defined."""
entity = htmlentitydefs.codepoint2name.get(code)
if entity:
return "%s%s;" % (markdown.AMP_SUBSTITUTE, entity)
else:
return "%s#%d;" % (markdown.AMP_SUBSTITUTE, code)
letters = [codepoint2name(ord(letter)) for letter in email]
el.text = markdown.AtomicString(''.join(letters))
mailto = "mailto:" + email
mailto = "".join([markdown.AMP_SUBSTITUTE + '#%d;' %
ord(letter) for letter in mailto])
el.set('href', mailto)
return el
|
popazerty/enigma2-4.3
|
refs/heads/master
|
lib/python/Components/Timeshift.py
|
6
|
# -*- coding: utf-8 -*-
# InfoBarTimeshift requires InfoBarSeek, instantiated BEFORE!
# Hrmf.
#
# Timeshift works the following way:
# demux0 demux1 "TimeshiftActions" "TimeshiftActivateActions" "SeekActions"
# - normal playback TUNER unused PLAY enable disable disable
# - user presses "yellow" button. FILE record PAUSE enable disable enable
# - user presess pause again FILE record PLAY enable disable enable
# - user fast forwards FILE record FF enable disable enable
# - end of timeshift buffer reached TUNER record PLAY enable enable disable
# - user backwards FILE record BACK # !! enable disable enable
#
# in other words:
# - when a service is playing, pressing the "timeshiftStart" button ("yellow") enables recording ("enables timeshift"),
# freezes the picture (to indicate timeshift), sets timeshiftMode ("activates timeshift")
# now, the service becomes seekable, so "SeekActions" are enabled, "TimeshiftEnableActions" are disabled.
# - the user can now PVR around
# - if it hits the end, the service goes into live mode ("deactivates timeshift", it's of course still "enabled")
# the service looses it's "seekable" state. It can still be paused, but just to activate timeshift right
# after!
# the seek actions will be disabled, but the timeshiftActivateActions will be enabled
# - if the user rewinds, or press pause, timeshift will be activated again
# note that a timeshift can be enabled ("recording") and
# activated (currently time-shifting).
from Components.ActionMap import ActionMap, HelpableActionMap
from Components.ServiceEventTracker import ServiceEventTracker
from Components.config import config
from Components.SystemInfo import SystemInfo
from Components.Task import job_manager as JobManager
from Screens.ChoiceBox import ChoiceBox
from Screens.MessageBox import MessageBox
import Screens.Standby
from ServiceReference import ServiceReference
from RecordTimer import RecordTimerEntry, parseEvent
from timer import TimerEntry
from Tools import ASCIItranslit, Notifications
from Tools.BoundFunction import boundFunction
from Tools.Directories import pathExists, fileExists, getRecordingFilename, copyfile, resolveFilename, SCOPE_TIMESHIFT, SCOPE_AUTORECORD
from Tools.TimeShift import CopyTimeshiftJob, MergeTimeshiftJob, CreateAPSCFilesJob
from enigma import eBackgroundFileEraser, eTimer, eServiceCenter, iServiceInformation, iPlayableService
from boxbranding import getBoxType, getBrandOEM
from time import time, localtime, strftime
from random import randint
import os
class InfoBarTimeshift:
ts_disabled = False
def __init__(self):
self["TimeshiftActions"] = HelpableActionMap(self, "InfobarTimeshiftActions",
{
"timeshiftStart": (self.startTimeshift, _("Start timeshift")), # the "yellow key"
"timeshiftStop": (self.stopTimeshift, _("Stop timeshift")), # currently undefined :), probably 'TV'
"instantRecord": self.instantRecord,
"restartTimeshift": self.restartTimeshift
}, prio=1)
self["TimeshiftActivateActions"] = ActionMap(["InfobarTimeshiftActivateActions"],
{
"timeshiftActivateEnd": self.activateTimeshiftEnd, # something like "rewind key"
"timeshiftActivateEndAndPause": self.activateTimeshiftEndAndPause # something like "pause key"
}, prio=-1) # priority over record
self["TimeshiftSeekPointerActions"] = ActionMap(["InfobarTimeshiftSeekPointerActions"],
{
"SeekPointerOK": self.ptsSeekPointerOK,
"SeekPointerLeft": self.ptsSeekPointerLeft,
"SeekPointerRight": self.ptsSeekPointerRight
}, prio=-1)
self["TimeshiftFileActions"] = ActionMap(["InfobarTimeshiftActions"],
{
"jumpPreviousFile": self.__evSOF,
"jumpNextFile": self.__evEOF
}, prio=-1) # priority over history
self["TimeshiftActions"].setEnabled(False)
self["TimeshiftActivateActions"].setEnabled(False)
self["TimeshiftSeekPointerActions"].setEnabled(False)
self["TimeshiftFileActions"].setEnabled(False)
self.switchToLive = True
self.ptsStop = False
self.ts_rewind_timer = eTimer()
self.ts_rewind_timer.callback.append(self.rewindService)
self.save_timeshift_file = False
self.saveTimeshiftEventPopupActive = False
self.__event_tracker = ServiceEventTracker(screen = self, eventmap =
{
iPlayableService.evStart: self.__serviceStarted,
iPlayableService.evSeekableStatusChanged: self.__seekableStatusChanged,
iPlayableService.evEnd: self.__serviceEnd,
iPlayableService.evSOF: self.__evSOF,
iPlayableService.evUpdatedInfo: self.__evInfoChanged,
iPlayableService.evUpdatedEventInfo: self.__evEventInfoChanged,
iPlayableService.evUser+1: self.ptsTimeshiftFileChanged
})
self.pts_begintime = 0
self.pts_switchtolive = False
self.pts_firstplayable = 1
self.pts_currplaying = 1
self.pts_nextplaying = 0
self.pts_lastseekspeed = 0
self.pts_service_changed = False
self.pts_file_changed = False
self.pts_record_running = self.session.nav.RecordTimer.isRecording()
self.save_current_timeshift = False
self.save_timeshift_postaction = None
self.service_changed = 0
self.event_changed = False
# Init Global Variables
self.session.ptsmainloopvalue = 0
config.timeshift.isRecording.value = False
# Init eBackgroundFileEraser
self.BgFileEraser = eBackgroundFileEraser.getInstance()
# Init PTS Delay-Timer
self.pts_delay_timer = eTimer()
self.pts_delay_timer.callback.append(self.autostartAutorecordTimeshift)
# Init PTS MergeRecords-Timer
self.pts_mergeRecords_timer = eTimer()
self.pts_mergeRecords_timer.callback.append(self.ptsMergeRecords)
# Init PTS Merge Cleanup-Timer
self.pts_mergeCleanUp_timer = eTimer()
self.pts_mergeCleanUp_timer.callback.append(self.ptsMergePostCleanUp)
# Init PTS QuitMainloop-Timer
self.pts_QuitMainloop_timer = eTimer()
self.pts_QuitMainloop_timer.callback.append(self.ptsTryQuitMainloop)
# Init PTS CleanUp-Timer
self.pts_cleanUp_timer = eTimer()
self.pts_cleanUp_timer.callback.append(self.ptsCleanTimeshiftFolder)
# Init PTS SeekBack-Timer
self.pts_SeekBack_timer = eTimer()
self.pts_SeekBack_timer.callback.append(self.ptsSeekBackTimer)
self.pts_StartSeekBackTimer = eTimer()
self.pts_StartSeekBackTimer.callback.append(self.ptsStartSeekBackTimer)
# Init PTS CheckFileChanged-Timer
self.pts_CheckFileChanged_timer = eTimer()
self.pts_CheckFileChanged_timer.callback.append(self.ptsCheckFileChanged)
# Init Block-Zap Timer
self.pts_blockZap_timer = eTimer()
# Record Event Tracker
self.session.nav.RecordTimer.on_state_change.append(self.ptsTimerEntryStateChange)
# Keep Current Event Info for recordings
self.pts_eventcount = 0
self.pts_curevent_begin = int(time())
self.pts_curevent_end = 0
self.pts_curevent_name = _("Timeshift")
self.pts_curevent_description = ""
self.pts_curevent_servicerefname = ""
self.pts_curevent_station = ""
self.pts_curevent_eventid = None
# Init PTS Infobar
def __seekableStatusChanged(self):
# print '__seekableStatusChanged'
self["TimeshiftActivateActions"].setEnabled(not self.isSeekable() and self.timeshiftEnabled())
state = self.getSeek() is not None and self.timeshiftEnabled()
self["SeekActionsPTS"].setEnabled(state)
self["TimeshiftFileActions"].setEnabled(state)
# print ('__seekableStatusChanged - state %s, seekstate %s' % (state, self.seekstate))
if not state and self.pts_currplaying == self.pts_eventcount:
self.setSeekState(self.SEEK_STATE_PLAY)
if self.pts_eventcount < self.pts_firstplayable:
self.pts_firstplayable = self.pts_eventcount
self.restartSubtitle()
# print ('[TIMESHIFT] - pts_currplaying %s, pts_nextplaying %s, pts_eventcount %s, pts_firstplayable %s' % (self.pts_currplaying, self.pts_nextplaying, self.pts_eventcount, self.pts_firstplayable))
if self.timeshiftEnabled() and not self.isSeekable():
self.ptsSeekPointerReset()
if int(config.timeshift.startdelay.value):
if self.pts_starttime <= (time()-5):
self.pts_blockZap_timer.start(3000, True)
self.pts_currplaying = self.pts_eventcount
self.pts_nextplaying = 0
self.pts_file_changed = True
self.ptsSetNextPlaybackFile("pts_livebuffer_%s" % self.pts_eventcount)
# print ('[TIMESHIFT] - pts_currplaying %s, pts_nextplaying %s, pts_eventcount %s, pts_firstplayable %s' % (self.pts_currplaying, self.pts_nextplaying, self.pts_eventcount, self.pts_firstplayable))
def __serviceStarted(self):
# print '__serviceStarted'
self.service_changed = 1
self.pts_service_changed = True
# print 'self.timeshiftEnabled1',self.timeshiftEnabled()
if self.pts_delay_timer.isActive():
# print 'TS AUTO START TEST1'
self.pts_delay_timer.stop()
if int(config.timeshift.startdelay.value):
# print 'TS AUTO START TEST2'
self.pts_delay_timer.start(int(config.timeshift.startdelay.value) * 1000, True)
self.__seekableStatusChanged()
def __serviceEnd(self):
if self.save_current_timeshift:
if self.pts_curevent_end > time():
self.SaveTimeshift("pts_livebuffer_%s" % self.pts_eventcount, mergelater=True)
self.ptsRecordCurrentEvent()
else:
self.SaveTimeshift("pts_livebuffer_%s" % self.pts_eventcount)
self.service_changed = 0
if not config.timeshift.isRecording.value:
self.__seekableStatusChanged()
def __evSOF(self):
# print '!!!!! jumpToPrevTimeshiftedEvent'
if not self.timeshiftEnabled() or self.pts_CheckFileChanged_timer.isActive():
return
# print ('[TIMESHIFT] - pts_currplaying %s, pts_nextplaying %s, pts_eventcount %s, pts_firstplayable %s' % (self.pts_currplaying, self.pts_nextplaying, self.pts_eventcount, self.pts_firstplayable))
self.pts_switchtolive = False
self.pts_nextplaying = 0
if self.pts_currplaying > self.pts_firstplayable:
self.pts_currplaying -= 1
else:
self.setSeekState(self.SEEK_STATE_PLAY)
self.doSeek(0)
return
# Switch to previous TS file by seeking forward to next file
# print 'self.pts_currplaying2',self.pts_currplaying
# print ("'!!!!! %spts_livebuffer_%s" % (config.usage.timeshift_path.value, self.pts_currplaying))
if fileExists("%spts_livebuffer_%s" % (config.usage.timeshift_path.value, self.pts_currplaying), 'r'):
self.ptsSetNextPlaybackFile("pts_livebuffer_%s" % self.pts_currplaying)
self.setSeekState(self.SEEK_STATE_PLAY)
self.doSeek(3600 * 24 * 90000)
self.pts_CheckFileChanged_timer.start(1000, False)
self.pts_file_changed = False
else:
print ('[TIMESHIFT] - "pts_livebuffer_%s" file was not found -> put pointer to the first (current) "pts_livebuffer_%s" file' % (self.pts_currplaying, self.pts_currplaying + 1))
self.pts_currplaying += 1
self.pts_firstplayable += 1
self.setSeekState(self.SEEK_STATE_PLAY)
self.doSeek(0)
# print ('[TIMESHIFT] - pts_currplaying %s, pts_nextplaying %s, pts_eventcount %s, pts_firstplayable %s' % (self.pts_currplaying, self.pts_nextplaying, self.pts_eventcount, self.pts_firstplayable))
def __evEOF(self):
# print '!!!!! jumpToNextTimeshiftedEvent'
if not self.timeshiftEnabled() or self.pts_CheckFileChanged_timer.isActive():
return
# print ('[TIMESHIFT] - pts_currplaying %s, pts_nextplaying %s, pts_eventcount %s, pts_firstplayable %s' % (self.pts_currplaying, self.pts_nextplaying, self.pts_eventcount, self.pts_firstplayable))
self.pts_switchtolive = False
self.pts_nextplaying = 0
self.pts_currplaying += 1
# Switch to next TS file by seeking forward to next file
# print 'self.pts_currplaying2',self.pts_currplaying
# print ("'!!!!! %spts_livebuffer_%s" % (config.usage.timeshift_path.value, self.pts_currplaying))
if fileExists("%spts_livebuffer_%s" % (config.usage.timeshift_path.value, self.pts_currplaying), 'r'):
self.ptsSetNextPlaybackFile("pts_livebuffer_%s" % self.pts_currplaying)
self.setSeekState(self.SEEK_STATE_PLAY)
self.doSeek(3600 * 24 * 90000)
else:
self.pts_switchtolive = True
self.pts_currplaying -= 1
self.ptsSetNextPlaybackFile("")
self.setSeekState(self.SEEK_STATE_PLAY)
self.doSeek(3600 * 24 * 90000)
self.pts_CheckFileChanged_timer.start(1000, False)
self.pts_file_changed = False
# print ('[TIMESHIFT] - pts_currplaying %s, pts_nextplaying %s, pts_eventcount %s, pts_firstplayable %s' % (self.pts_currplaying, self.pts_nextplaying, self.pts_eventcount, self.pts_firstplayable))
def __evInfoChanged(self):
# print '__evInfoChanged'
# print 'service_changed',self.service_changed
# print ('[TIMESHIFT] - pts_currplaying %s, pts_nextplaying %s, pts_eventcount %s, pts_firstplayable %s' % (self.pts_currplaying, self.pts_nextplaying, self.pts_eventcount, self.pts_firstplayable))
if self.service_changed:
self.service_changed = 0
# We zapped away before saving the file, save it now!
if self.save_current_timeshift:
self.SaveTimeshift("pts_livebuffer_%s" % self.pts_eventcount)
# Delete Timeshift Records on zap
if config.timeshift.deleteAfterZap.value:
self.pts_eventcount = 0
self.pts_firstplayable = self.pts_eventcount + 1
if self.pts_eventcount == 0 and not int(config.timeshift.startdelay.value):
self.pts_cleanUp_timer.start(1000, True)
# print ('[TIMESHIFT] - pts_currplaying %s, pts_nextplaying %s, pts_eventcount %s, pts_firstplayable %s' % (self.pts_currplaying, self.pts_nextplaying, self.pts_eventcount, self.pts_firstplayable))
def __evEventInfoChanged(self):
# print '__evEventInfoChanged'
# Get Current Event Info
service = self.session.nav.getCurrentService()
old_begin_time = self.pts_begintime
info = service and service.info()
ptr = info and info.getEvent(0)
self.pts_begintime = ptr and ptr.getBeginTime() or 0
# Save current TimeShift permanently now ...
if info.getInfo(iServiceInformation.sVideoPID) != -1:
# Take care of Record Margin Time ...
if self.save_current_timeshift and self.timeshiftEnabled():
if config.recording.margin_after.value > 0 and len(self.recording) == 0:
self.SaveTimeshift(mergelater=True)
recording = RecordTimerEntry(ServiceReference(self.session.nav.getCurrentlyPlayingServiceOrGroup()), time(), time()+(config.recording.margin_after.value * 60), self.pts_curevent_name, self.pts_curevent_description, self.pts_curevent_eventid, dirname = config.usage.autorecord_path.value)
recording.dontSave = True
self.session.nav.RecordTimer.record(recording)
self.recording.append(recording)
else:
self.SaveTimeshift()
#(Re)Start TimeShift
# print 'self.pts_delay_timer.isActive',self.pts_delay_timer.isActive()
if not self.pts_delay_timer.isActive():
# print 'TS AUTO START TEST4'
if old_begin_time != self.pts_begintime or old_begin_time == 0:
# print 'TS AUTO START TEST5'
if int(config.timeshift.startdelay.value) or self.timeshiftEnabled():
self.event_changed = True
self.pts_delay_timer.start(1000, True)
def getTimeshift(self):
if self.ts_disabled:
return None
service = self.session.nav.getCurrentService()
return service and service.timeshift()
def timeshiftEnabled(self):
ts = self.getTimeshift()
return ts and ts.isTimeshiftEnabled()
def startTimeshift(self):
ts = self.getTimeshift()
if ts is None:
# self.session.open(MessageBox, _("Timeshift not possible!"), MessageBox.TYPE_ERROR, timeout=5)
return 0
if ts.isTimeshiftEnabled():
print "hu, timeshift already enabled?"
else:
self.activateAutorecordTimeshift()
self.activateTimeshiftEndAndPause()
def stopTimeshift(self):
# print 'stopTimeshift'
ts = self.getTimeshift()
if ts and ts.isTimeshiftEnabled():
# print 'TEST1'
if int(config.timeshift.startdelay.value) and self.isSeekable():
# print 'TEST2'
self.switchToLive = True
self.ptsStop = True
self.checkTimeshiftRunning(self.stopTimeshiftcheckTimeshiftRunningCallback)
elif not int(config.timeshift.startdelay.value):
# print 'TEST2b'
self.checkTimeshiftRunning(self.stopTimeshiftcheckTimeshiftRunningCallback)
else:
# print 'TES2c'
return 0
else:
# print 'TEST3'
return 0
def stopTimeshiftcheckTimeshiftRunningCallback(self, answer):
# print 'stopTimeshiftcheckTimeshiftRunningCallback'
# print ' answer', answer
if answer and int(config.timeshift.startdelay.value) and self.switchToLive and self.isSeekable():
# print 'TEST4'
self.ptsStop = False
self.pts_nextplaying = 0
self.pts_switchtolive = True
self.setSeekState(self.SEEK_STATE_PLAY)
self.ptsSetNextPlaybackFile("")
self.doSeek(3600 * 24 * 90000)
self.__seekableStatusChanged()
return 0
was_enabled = False
ts = self.getTimeshift()
if ts and ts.isTimeshiftEnabled():
# print 'TEST5'
was_enabled = ts.isTimeshiftEnabled()
if answer and ts:
# print 'TEST6'
if int(config.timeshift.startdelay.value):
# print 'TEST7'
ts.stopTimeshift(self.switchToLive)
else:
# print 'TEST8', str(self.event_changed)
ts.stopTimeshift(not self.event_changed)
self.__seekableStatusChanged()
# activates timeshift, and seeks to (almost) the end
def activateTimeshiftEnd(self, back = True):
ts = self.getTimeshift()
if ts is None:
return
if ts.isTimeshiftActive():
self.pauseService()
else:
ts.activateTimeshift() # activate timeshift will automatically pause
self.setSeekState(self.SEEK_STATE_PAUSE)
seekable = self.getSeek()
if seekable is not None:
seekable.seekTo(-90000) # seek approx. 1 sec before end
if back:
if getBrandOEM() == 'xtrend':
self.ts_rewind_timer.start(1000, 1)
else:
self.ts_rewind_timer.start(100, 1)
def rewindService(self):
if getBrandOEM() in ('gigablue', 'xp'):
self.setSeekState(self.SEEK_STATE_PLAY)
self.setSeekState(self.makeStateBackward(int(config.seek.enter_backward.value)))
# same as activateTimeshiftEnd, but pauses afterwards.
def activateTimeshiftEndAndPause(self):
self.activateTimeshiftEnd(False)
def checkTimeshiftRunning(self, returnFunction):
# print 'checkTimeshiftRunning'
# print 'self.switchToLive',self.switchToLive
if self.ptsStop:
returnFunction(True)
elif (self.isSeekable() and self.timeshiftEnabled() or self.save_current_timeshift) and config.usage.check_timeshift.value:
# print 'TEST1'
if config.timeshift.favoriteSaveAction.value == "askuser":
# print 'TEST2'
if self.save_current_timeshift:
# print 'TEST3'
message = _("You have chosen to save the current timeshift event, but the event has not yet finished\nWhat do you want to do ?")
choice = [(_("Save timeshift as movie and continue recording"), "savetimeshiftandrecord"),
(_("Save timeshift as movie and stop recording"), "savetimeshift"),
(_("Cancel save timeshift as movie"), "noSave"),
(_("Nothing, just leave this menu"), "no")]
self.session.openWithCallback(boundFunction(self.checkTimeshiftRunningCallback, returnFunction), MessageBox, message, simple = True, list = choice, timeout=30)
else:
# print 'TEST4'
message = _("You seem to be in timeshift, Do you want to leave timeshift ?")
choice = [(_("Yes, but don't save timeshift as movie"), "noSave"),
(_("Yes, but save timeshift as movie and continue recording"), "savetimeshiftandrecord"),
(_("Yes, but save timeshift as movie and stop recording"), "savetimeshift"),
(_("No"), "no")]
self.session.openWithCallback(boundFunction(self.checkTimeshiftRunningCallback, returnFunction), MessageBox, message, simple = True, list = choice, timeout=30)
else:
# print 'TEST5'
if self.save_current_timeshift:
# print 'TEST6'
# the user has previously activated "Timeshift save recording" of current event - so must be necessarily saved of the timeshift!
# workaround - without the message box can the box no longer be operated (not freeze - only no longer can use)
message = _("You have chosen to save the current timeshift")
choice = [(_("Now save timeshift as movie and continues recording"), "savetimeshiftandrecord")]
self.session.openWithCallback(boundFunction(self.checkTimeshiftRunningCallback, returnFunction), MessageBox, message, simple = True, list = choice, timeout=1)
#InfoBarTimeshift.saveTimeshiftActions(self, "savetimeshiftandrecord", returnFunction)
else:
# print 'TEST7'
message = _("You seem to be in timeshift, Do you want to leave timeshift ?")
choice = [(_("Yes"), config.timeshift.favoriteSaveAction.value), (_("No"), "no")]
self.session.openWithCallback(boundFunction(self.checkTimeshiftRunningCallback, returnFunction), MessageBox, message, simple = True, list = choice, timeout=30)
elif self.save_current_timeshift:
# the user has chosen "no warning" when timeshift is stopped (config.usage.check_timeshift=False)
# but the user has previously activated "Timeshift save recording" of current event
# so we silently do "savetimeshiftandrecord" when switching channel independent of config.timeshift.favoriteSaveAction
# workaround - without the message box can the box no longer be operated (not freeze - only no longer can use)
message = _("You have chosen to save the current timeshift")
choice = [(_("Now save timeshift as movie and continues recording"), "savetimeshiftandrecord")]
self.session.openWithCallback(boundFunction(self.checkTimeshiftRunningCallback, returnFunction), MessageBox, message, simple = True, list = choice, timeout=1)
#InfoBarTimeshift.saveTimeshiftActions(self, "savetimeshiftandrecord", returnFunction)
else:
returnFunction(True)
def checkTimeshiftRunningCallback(self, returnFunction, answer):
# print 'checkTimeshiftRunningCallback'
# print 'returnFunction',returnFunction
# print 'answer',answer
if answer:
if answer == "savetimeshift" or answer == "savetimeshiftandrecord":
self.save_current_timeshift = True
elif answer == "noSave":
self.save_current_timeshift = False
elif answer == "no":
pass
InfoBarTimeshift.saveTimeshiftActions(self, answer, returnFunction)
def eraseTimeshiftFile(self):
for filename in os.listdir(config.usage.timeshift_path.value):
if filename.startswith("timeshift.") and not filename.endswith(".del") and not filename.endswith(".copy"):
self.BgFileEraser.erase("%s%s" % (config.usage.timeshift_path.value,filename))
def autostartAutorecordTimeshift(self):
# print '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!autostartAutorecordTimeshift'
self["TimeshiftActions"].setEnabled(True)
ts = self.getTimeshift()
if ts is None:
# print '[TimeShift] tune lock failed, so could not start.'
return 0
if self.pts_delay_timer.isActive():
self.pts_delay_timer.stop()
if (int(config.timeshift.startdelay.value) and not self.timeshiftEnabled()) or self.event_changed:
self.activateAutorecordTimeshift()
def activateAutorecordTimeshift(self):
# print 'activateAutorecordTimeshift'
# print ('[TIMESHIFT] - pts_currplaying %s, pts_nextplaying %s, pts_eventcount %s, pts_firstplayable %s' % (self.pts_currplaying, self.pts_nextplaying, self.pts_eventcount, self.pts_firstplayable))
self.createTimeshiftFolder()
if self.pts_eventcount == 0: #only cleanup folder after switching channels, not when a new event starts, to allow saving old events from timeshift buffer
self.ptsCleanTimeshiftFolder(justZapped = True) #remove all timeshift files
else:
self.ptsCleanTimeshiftFolder(justZapped = False) #only delete very old timeshift files based on config.usage.timeshiftMaxHours
if self.ptsCheckTimeshiftPath() is False or self.session.screen["Standby"].boolean is True or self.ptsLiveTVStatus() is False or (config.timeshift.stopwhilerecording.value and self.pts_record_running):
return
# Update internal Event Counter
self.pts_eventcount += 1
# setNextPlaybackFile() on event change while timeshifting
if self.isSeekable():
self.pts_nextplaying = self.pts_currplaying + 1
self.ptsSetNextPlaybackFile("pts_livebuffer_%s" % self.pts_nextplaying)
# Do not switch back to LiveTV while timeshifting
self.switchToLive = False
else:
self.switchToLive = True
# (Re)start Timeshift now
self.stopTimeshiftcheckTimeshiftRunningCallback(True)
self.event_changed = False
ts = self.getTimeshift()
if ts and not ts.startTimeshift():
if (getBoxType() == 'vuuno' or getBoxType() == 'vuduo') and os.path.exists("/proc/stb/lcd/symbol_timeshift"):
if self.session.nav.RecordTimer.isRecording():
f = open("/proc/stb/lcd/symbol_timeshift", "w")
f.write("0")
f.close()
self.pts_starttime = time()
self.save_timeshift_postaction = None
self.ptsGetEventInfo()
self.ptsCreateHardlink()
self.__seekableStatusChanged()
else:
self.session.open(MessageBox, _("Timeshift not possible!"), MessageBox.TYPE_ERROR, timeout=5)
self.pts_eventcount = 0
# print ('[TIMESHIFT] - pts_currplaying %s, pts_nextplaying %s, pts_eventcount %s, pts_firstplayable %s' % (self.pts_currplaying, self.pts_nextplaying, self.pts_eventcount, self.pts_firstplayable))
def createTimeshiftFolder(self):
timeshiftdir = resolveFilename(SCOPE_TIMESHIFT)
if not pathExists(timeshiftdir):
try:
os.makedirs(timeshiftdir)
except:
print "[TimeShift] Failed to create %s !!" %timeshiftdir
def restartTimeshift(self):
self.activateAutorecordTimeshift()
Notifications.AddNotification(MessageBox, _("[TimeShift] Restarting Timeshift!"), MessageBox.TYPE_INFO, timeout=5)
def saveTimeshiftEventPopup(self):
self.saveTimeshiftEventPopupActive = True
filecount = 0
entrylist = [(_("Current Event:") + " %s" % self.pts_curevent_name, "savetimeshift")]
filelist = os.listdir(config.usage.timeshift_path.value)
if filelist is not None:
try:
filelist = sorted(filelist, key=lambda x: int(x.split('pts_livebuffer_')[1]) if x.startswith("pts_livebuffer") and not os.path.splitext(x)[1] else x)
except:
print '[TIMESHIFT] - file sorting error, use standard sorting method'
filelist.sort()
# print filelist
for filename in filelist:
if filename.startswith("pts_livebuffer") and not os.path.splitext(filename)[1]:
# print "TRUE"
statinfo = os.stat("%s%s" % (config.usage.timeshift_path.value,filename))
if statinfo.st_mtime < (time()-5.0):
# Get Event Info from meta file
readmetafile = open("%s%s.meta" % (config.usage.timeshift_path.value,filename), "r")
servicerefname = readmetafile.readline()[0:-1]
eventname = readmetafile.readline()[0:-1]
description = readmetafile.readline()[0:-1]
begintime = readmetafile.readline()[0:-1]
readmetafile.close()
# Add Event to list
filecount += 1
if config.timeshift.deleteAfterZap.value and servicerefname == self.pts_curevent_servicerefname:
entrylist.append((_("Record") + " #%s (%s): %s" % (filecount,strftime("%H:%M",localtime(int(begintime))),eventname), "%s" % filename))
else:
servicename = ServiceReference(servicerefname).getServiceName()
#entrylist.append((_("Record") + " #%s (%s,%s): %s" % (filecount,strftime("%H:%M",localtime(int(begintime))),servicename,eventname), "%s" % filename))
entrylist.append(("[%s] %s : %s" % (strftime("%H:%M",localtime(int(begintime))),servicename,eventname), "%s" % filename))
self.session.openWithCallback(self.recordQuestionCallback, ChoiceBox, title=_("Which event do you want to save permanently?"), list=entrylist)
def saveTimeshiftActions(self, action=None, returnFunction=None):
# print 'saveTimeshiftActions'
# print 'action',action
if action == "savetimeshift":
self.SaveTimeshift()
elif action == "savetimeshiftandrecord":
if self.pts_curevent_end > time():
self.SaveTimeshift(mergelater=True)
self.ptsRecordCurrentEvent()
else:
self.SaveTimeshift()
elif action == "noSave":
config.timeshift.isRecording.value = False
self.save_current_timeshift = False
elif action == "no":
pass
# Get rid of old timeshift file before E2 truncates its filesize
if returnFunction is not None and action != "no":
self.eraseTimeshiftFile()
# print 'action returnFunction'
returnFunction(action and action != "no")
def SaveTimeshift(self, timeshiftfile=None, mergelater=False):
# print 'SaveTimeshift'
self.save_current_timeshift = False
savefilename = None
if timeshiftfile is not None:
savefilename = timeshiftfile
# print 'savefilename',savefilename
if savefilename is None:
# print 'TEST1'
for filename in os.listdir(config.usage.timeshift_path.value):
# print 'filename',filename
if filename.startswith("timeshift.") and not filename.endswith(".del") and not filename.endswith(".copy") and not filename.endswith(".sc"):
statinfo = os.stat("%s%s" % (config.usage.timeshift_path.value,filename))
if statinfo.st_mtime > (time()-5.0):
savefilename=filename
# print 'savefilename',savefilename
if savefilename is None:
Notifications.AddNotification(MessageBox, _("No Timeshift found to save as recording!"), MessageBox.TYPE_ERROR, timeout=30)
else:
timeshift_saved = True
timeshift_saveerror1 = ""
timeshift_saveerror2 = ""
metamergestring = ""
config.timeshift.isRecording.value = True
if mergelater:
self.pts_mergeRecords_timer.start(120000, True)
metamergestring = "pts_merge\n"
try:
if timeshiftfile is None:
# Save Current Event by creating hardlink to ts file
if self.pts_starttime >= (time()-60):
self.pts_starttime -= 60
ptsfilename = "%s - %s - %s" % (strftime("%Y%m%d %H%M",localtime(self.pts_starttime)),self.pts_curevent_station,self.pts_curevent_name.replace("\n", ""))
try:
if config.usage.setup_level.index >= 2:
if config.recording.filename_composition.value == "long" and self.pts_curevent_name.replace("\n", "") != self.pts_curevent_description.replace("\n", ""):
ptsfilename = "%s - %s - %s - %s" % (strftime("%Y%m%d %H%M",localtime(self.pts_starttime)),self.pts_curevent_station,self.pts_curevent_name.replace("\n", ""),self.pts_curevent_description.replace("\n", ""))
elif config.recording.filename_composition.value == "short":
ptsfilename = "%s - %s" % (strftime("%Y%m%d",localtime(self.pts_starttime)),self.pts_curevent_name.replace("\n", ""))
elif config.recording.filename_composition.value == "veryshort":
ptsfilename = "%s - %s" % (self.pts_curevent_name.replace("\n", ""),strftime("%Y%m%d %H%M",localtime(self.pts_starttime)))
elif config.recording.filename_composition.value == "veryveryshort":
ptsfilename = "%s - %s" % (self.pts_curevent_name.replace("\n", ""),strftime("%Y%m%d %H%M",localtime(self.pts_starttime)))
except Exception, errormsg:
print "[TimeShift] Using default filename"
if config.recording.ascii_filenames.value:
ptsfilename = ASCIItranslit.legacyEncode(ptsfilename)
# print 'ptsfilename',ptsfilename
fullname = getRecordingFilename(ptsfilename,config.usage.autorecord_path.value)
# print 'fullname',fullname
os.link("%s%s" % (config.usage.timeshift_path.value,savefilename), "%s.ts" % fullname)
metafile = open("%s.ts.meta" % fullname, "w")
metafile.write("%s\n%s\n%s\n%i\n%s" % (self.pts_curevent_servicerefname,self.pts_curevent_name.replace("\n", ""),self.pts_curevent_description.replace("\n", ""),int(self.pts_starttime),metamergestring))
metafile.close()
self.ptsCreateEITFile(fullname)
elif timeshiftfile.startswith("pts_livebuffer"):
# Save stored timeshift by creating hardlink to ts file
readmetafile = open("%s%s.meta" % (config.usage.timeshift_path.value,timeshiftfile), "r")
servicerefname = readmetafile.readline()[0:-1]
eventname = readmetafile.readline()[0:-1]
description = readmetafile.readline()[0:-1]
begintime = readmetafile.readline()[0:-1]
readmetafile.close()
if config.timeshift.deleteAfterZap.value and servicerefname == self.pts_curevent_servicerefname:
servicename = self.pts_curevent_station
else:
servicename = ServiceReference(servicerefname).getServiceName()
ptsfilename = "%s - %s - %s" % (strftime("%Y%m%d %H%M",localtime(int(begintime))),servicename,eventname)
try:
if config.usage.setup_level.index >= 2:
if config.recording.filename_composition.value == "long" and eventname != description:
ptsfilename = "%s - %s - %s - %s" % (strftime("%Y%m%d %H%M",localtime(int(begintime))),servicename,eventname,description)
elif config.recording.filename_composition.value == "short":
ptsfilename = "%s - %s" % (strftime("%Y%m%d",localtime(int(begintime))),eventname)
elif config.recording.filename_composition.value == "veryshort":
ptsfilename = "%s - %s" % (eventname,strftime("%Y%m%d %H%M",localtime(int(begintime))))
elif config.recording.filename_composition.value == "veryveryshort":
ptsfilename = "%s - %s" % (eventname,strftime("%Y%m%d %H%M",localtime(int(begintime))))
except Exception, errormsg:
print "[TimeShift] Using default filename"
if config.recording.ascii_filenames.value:
ptsfilename = ASCIItranslit.legacyEncode(ptsfilename)
fullname=getRecordingFilename(ptsfilename,config.usage.autorecord_path.value)
os.link("%s%s" % (config.usage.timeshift_path.value,timeshiftfile),"%s.ts" % fullname)
os.link("%s%s.meta" % (config.usage.timeshift_path.value,timeshiftfile),"%s.ts.meta" % fullname)
if os.path.exists("%s%s.eit" % (config.usage.timeshift_path.value,timeshiftfile)):
os.link("%s%s.eit" % (config.usage.timeshift_path.value,timeshiftfile),"%s.eit" % fullname)
# Add merge-tag to metafile
if mergelater:
metafile = open("%s.ts.meta" % fullname, "a")
metafile.write("%s\n" % metamergestring)
metafile.close()
# Create AP and SC Files when not merging
if not mergelater:
self.ptsCreateAPSCFiles(fullname+".ts")
except Exception, errormsg:
timeshift_saved = False
timeshift_saveerror1 = errormsg
# Hmpppf! Saving Timeshift via Hardlink-Method failed. Probably other device?
# Let's try to copy the file in background now! This might take a while ...
if not timeshift_saved:
try:
stat = os.statvfs(config.usage.autorecord_path.value)
freespace = stat.f_bfree / 1000 * stat.f_bsize / 1000
randomint = randint(1, 999)
if timeshiftfile is None:
# Get Filesize for Free Space Check
filesize = int(os.path.getsize("%s%s" % (config.usage.timeshift_path.value,savefilename)) / (1024*1024))
# Save Current Event by copying it to the other device
if filesize <= freespace:
os.link("%s%s" % (config.usage.timeshift_path.value,savefilename), "%s%s.%s.copy" % (config.usage.timeshift_path.value,savefilename,randomint))
copy_file = savefilename
metafile = open("%s.ts.meta" % fullname, "w")
metafile.write("%s\n%s\n%s\n%i\n%s" % (self.pts_curevent_servicerefname,self.pts_curevent_name.replace("\n", ""),self.pts_curevent_description.replace("\n", ""),int(self.pts_starttime),metamergestring))
metafile.close()
self.ptsCreateEITFile(fullname)
elif timeshiftfile.startswith("pts_livebuffer"):
# Get Filesize for Free Space Check
filesize = int(os.path.getsize("%s%s" % (config.usage.timeshift_path.value, timeshiftfile)) / (1024*1024))
# Save stored timeshift by copying it to the other device
if filesize <= freespace:
os.link("%s%s" % (config.usage.timeshift_path.value,timeshiftfile), "%s%s.%s.copy" % (config.usage.timeshift_path.value,timeshiftfile,randomint))
copyfile("%s%s.meta" % (config.usage.timeshift_path.value,timeshiftfile),"%s.ts.meta" % fullname)
if os.path.exists("%s%s.eit" % (config.usage.timeshift_path.value,timeshiftfile)):
copyfile("%s%s.eit" % (config.usage.timeshift_path.value,timeshiftfile),"%s.eit" % fullname)
copy_file = timeshiftfile
# Add merge-tag to metafile
if mergelater:
metafile = open("%s.ts.meta" % fullname, "a")
metafile.write("%s\n" % metamergestring)
metafile.close()
# Only copy file when enough disk-space available!
if filesize <= freespace:
timeshift_saved = True
copy_file = copy_file+"."+str(randomint)
# Get Event Info from meta file
if os.path.exists("%s.ts.meta" % fullname):
readmetafile = open("%s.ts.meta" % fullname, "r")
servicerefname = readmetafile.readline()[0:-1]
eventname = readmetafile.readline()[0:-1]
readmetafile.close()
else:
eventname = ""
JobManager.AddJob(CopyTimeshiftJob(self, "mv \"%s%s.copy\" \"%s.ts\"" % (config.usage.timeshift_path.value,copy_file,fullname), copy_file, fullname, eventname))
if not Screens.Standby.inTryQuitMainloop and not Screens.Standby.inStandby and not mergelater and self.save_timeshift_postaction != "standby":
Notifications.AddNotification(MessageBox, _("Saving timeshift as movie now. This might take a while!"), MessageBox.TYPE_INFO, timeout=30)
else:
timeshift_saved = False
timeshift_saveerror1 = ""
timeshift_saveerror2 = _("Not enough free Diskspace!\n\nFilesize: %sMB\nFree Space: %sMB\nPath: %s" % (filesize,freespace,config.usage.autorecord_path.value))
except Exception, errormsg:
timeshift_saved = False
timeshift_saveerror2 = errormsg
if not timeshift_saved:
config.timeshift.isRecording.value = False
self.save_timeshift_postaction = None
errormessage = str(timeshift_saveerror1) + "\n" + str(timeshift_saveerror2)
Notifications.AddNotification(MessageBox, _("Timeshift save failed!")+"\n\n%s" % errormessage, MessageBox.TYPE_ERROR, timeout=30)
# print 'SAVE COMPLETED'
def ptsCleanTimeshiftFolder(self, justZapped = True):
# print '!!!!!!!!!!!!!!!!!!!!! ptsCleanTimeshiftFolder'
if self.ptsCheckTimeshiftPath() is False or self.session.screen["Standby"].boolean is True:
return
for filename in os.listdir(config.usage.timeshift_path.value):
if (os.path.exists("%s%s" % (config.usage.timeshift_path.value,filename))) and ((filename.startswith("timeshift.") or filename.startswith("pts_livebuffer_"))):
# print 'filename:',filename
statinfo = os.stat("%s%s" % (config.usage.timeshift_path.value,filename))
if (justZapped is True) and (filename.endswith(".del") is False) and (filename.endswith(".copy") is False):
# after zapping, remove all regular timeshift files
# print "[TimeShift] Erasing stranded timeshift file %s" % filename
self.BgFileEraser.erase("%s%s" % (config.usage.timeshift_path.value,filename))
elif (filename.endswith(".eit") is False) and (filename.endswith(".meta") is False) and (filename.endswith(".sc") is False) and (filename.endswith(".del") is False) and (filename.endswith(".copy") is False):
# remove old files, but only complete sets of files (base file, .eit, .meta, .sc),
# and not while saveTimeshiftEventPopup is active (avoid deleting files about to be saved)
# and don't delete the file currently playing
if (statinfo.st_mtime < (time()-3600*config.timeshift.timeshiftMaxHours.value)) and (self.saveTimeshiftEventPopupActive is False) and not(filename == ("pts_livebuffer_%s" % self.pts_currplaying)):
# print "[TimeShift] Erasing set of old timeshift files (base file, .eit, .meta, .sc) %s" % filename
self.BgFileEraser.erase("%s%s" % (config.usage.timeshift_path.value,filename))
if os.path.exists("%s%s.eit" % (config.usage.timeshift_path.value,filename)):
self.BgFileEraser.erase("%s%s.eit" % (config.usage.timeshift_path.value,filename))
if os.path.exists("%s%s.meta" % (config.usage.timeshift_path.value,filename)):
self.BgFileEraser.erase("%s%s.meta" % (config.usage.timeshift_path.value,filename))
if os.path.exists("%s%s.sc" % (config.usage.timeshift_path.value,filename)):
self.BgFileEraser.erase("%s%s.sc" % (config.usage.timeshift_path.value,filename))
else:
# remove anything still left over another 24h later
if statinfo.st_mtime < (time()-3600*(24+config.timeshift.timeshiftMaxHours.value)):
# print "[TimeShift] Erasing very old timeshift file %s" % filename
if filename.endswith(".del") is True:
os.rename("%s%s" % (config.usage.timeshift_path.value,filename), "%s%s.del_again" % (config.usage.timeshift_path.value,filename))
self.BgFileEraser.erase("%s%s.del_again" % (config.usage.timeshift_path.value,filename))
else:
self.BgFileEraser.erase("%s%s" % (config.usage.timeshift_path.value,filename))
def ptsGetEventInfo(self):
event = None
try:
serviceref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
serviceHandler = eServiceCenter.getInstance()
info = serviceHandler.info(serviceref)
self.pts_curevent_servicerefname = serviceref.toString()
self.pts_curevent_station = info.getName(serviceref)
service = self.session.nav.getCurrentService()
info = service and service.info()
event = info and info.getEvent(0)
except Exception, errormsg:
Notifications.AddNotification(MessageBox, _("Getting Event Info failed!")+"\n\n%s" % errormsg, MessageBox.TYPE_ERROR, timeout=10)
if event is not None:
curEvent = parseEvent(event)
self.pts_curevent_begin = int(curEvent[0])
self.pts_curevent_end = int(curEvent[1])
self.pts_curevent_name = curEvent[2]
self.pts_curevent_description = curEvent[3]
self.pts_curevent_eventid = curEvent[4]
def ptsFrontpanelActions(self, action=None):
if self.session.nav.RecordTimer.isRecording() or SystemInfo.get("NumFrontpanelLEDs", 0) == 0:
return
if action == "start":
if os.path.exists("/proc/stb/fp/led_set_pattern"):
f = open("/proc/stb/fp/led_set_pattern", "w")
f.write("0xa7fccf7a")
f.close()
elif os.path.exists("/proc/stb/fp/led0_pattern"):
f = open("/proc/stb/fp/led0_pattern", "w")
f.write("0x55555555")
f.close()
if os.path.exists("/proc/stb/fp/led_pattern_speed"):
f = open("/proc/stb/fp/led_pattern_speed", "w")
f.write("20")
f.close()
elif os.path.exists("/proc/stb/fp/led_set_speed"):
f = open("/proc/stb/fp/led_set_speed", "w")
f.write("20")
f.close()
elif action == "stop":
if os.path.exists("/proc/stb/fp/led_set_pattern"):
f = open("/proc/stb/fp/led_set_pattern", "w")
f.write("0")
f.close()
elif os.path.exists("/proc/stb/fp/led0_pattern"):
f = open("/proc/stb/fp/led0_pattern", "w")
f.write("0")
f.close()
def ptsCreateHardlink(self):
# print 'ptsCreateHardlink'
for filename in os.listdir(config.usage.timeshift_path.value):
# if filename.startswith("timeshift") and not os.path.splitext(filename)[1]:
if filename.startswith("timeshift") and not filename.endswith(".sc") and not filename.endswith(".del") and not filename.endswith(".copy"):
if os.path.exists("%spts_livebuffer_%s.eit" % (config.usage.timeshift_path.value,self.pts_eventcount)):
self.BgFileEraser.erase("%spts_livebuffer_%s.eit" % (config.usage.timeshift_path.value,self.pts_eventcount))
if os.path.exists("%spts_livebuffer_%s.meta" % (config.usage.timeshift_path.value,self.pts_eventcount)):
self.BgFileEraser.erase("%spts_livebuffer_%s.meta" % (config.usage.timeshift_path.value,self.pts_eventcount))
if os.path.exists("%spts_livebuffer_%s" % (config.usage.timeshift_path.value,self.pts_eventcount)):
self.BgFileEraser.erase("%spts_livebuffer_%s" % (config.usage.timeshift_path.value,self.pts_eventcount))
if os.path.exists("%spts_livebuffer_%s.sc" % (config.usage.timeshift_path.value,self.pts_eventcount)):
self.BgFileEraser.erase("%spts_livebuffer_%s.sc" % (config.usage.timeshift_path.value,self.pts_eventcount))
try:
# Create link to pts_livebuffer file
os.link("%s%s" % (config.usage.timeshift_path.value,filename), "%spts_livebuffer_%s" % (config.usage.timeshift_path.value,self.pts_eventcount))
os.link("%s%s.sc" % (config.usage.timeshift_path.value,filename), "%spts_livebuffer_%s.sc" % (config.usage.timeshift_path.value,self.pts_eventcount))
# Create a Meta File
metafile = open("%spts_livebuffer_%s.meta" % (config.usage.timeshift_path.value,self.pts_eventcount), "w")
metafile.write("%s\n%s\n%s\n%i\n" % (self.pts_curevent_servicerefname,self.pts_curevent_name.replace("\n", ""),self.pts_curevent_description.replace("\n", ""),int(self.pts_starttime)))
metafile.close()
except Exception, errormsg:
Notifications.AddNotification(MessageBox, _("Creating Hardlink to Timeshift file failed!")+"\n"+_("The Filesystem on your Timeshift-Device does not support hardlinks.\nMake sure it is formatted in EXT2 or EXT3!")+"\n\n%s" % errormsg, MessageBox.TYPE_ERROR, timeout=30)
# Create EIT File
self.ptsCreateEITFile("%spts_livebuffer_%s" % (config.usage.timeshift_path.value,self.pts_eventcount))
# Autorecord
if config.timeshift.autorecord.value:
try:
fullname = getRecordingFilename("%s - %s - %s" % (strftime("%Y%m%d %H%M",localtime(self.pts_starttime)),self.pts_curevent_station,self.pts_curevent_name),config.usage.autorecord_path.value)
os.link("%s%s" % (config.usage.timeshift_path.value,filename), "%s.ts" % fullname)
# Create a Meta File
metafile = open("%s.ts.meta" % fullname, "w")
metafile.write("%s\n%s\n%s\n%i\nautosaved\n" % (self.pts_curevent_servicerefname,self.pts_curevent_name.replace("\n", ""),self.pts_curevent_description.replace("\n", ""),int(self.pts_starttime)))
metafile.close()
except Exception, errormsg:
print "[Timeshift] %s" % errormsg
def ptsRecordCurrentEvent(self):
recording = RecordTimerEntry(ServiceReference(self.session.nav.getCurrentlyPlayingServiceOrGroup()), time(), self.pts_curevent_end, self.pts_curevent_name, self.pts_curevent_description, self.pts_curevent_eventid, dirname = config.usage.autorecord_path.value)
recording.dontSave = True
self.session.nav.RecordTimer.record(recording)
self.recording.append(recording)
def ptsMergeRecords(self):
if self.session.nav.RecordTimer.isRecording():
self.pts_mergeRecords_timer.start(120000, True)
return
ptsmergeSRC = ""
ptsmergeDEST = ""
ptsmergeeventname = ""
ptsgetnextfile = False
ptsfilemerged = False
filelist = os.listdir(config.usage.autorecord_path.value)
if filelist is not None:
filelist.sort()
for filename in filelist:
if filename.endswith(".meta"):
# Get Event Info from meta file
readmetafile = open("%s%s" % (config.usage.autorecord_path.value,filename), "r")
servicerefname = readmetafile.readline()[0:-1]
eventname = readmetafile.readline()[0:-1]
eventtitle = readmetafile.readline()[0:-1]
eventtime = readmetafile.readline()[0:-1]
eventtag = readmetafile.readline()[0:-1]
readmetafile.close()
if ptsgetnextfile:
ptsgetnextfile = False
ptsmergeSRC = filename[0:-5]
if ASCIItranslit.legacyEncode(eventname) == ASCIItranslit.legacyEncode(ptsmergeeventname):
# Copy EIT File
if fileExists("%s%s.eit" % (config.usage.autorecord_path.value, ptsmergeSRC[0:-3])):
copyfile("%s%s.eit" % (config.usage.autorecord_path.value, ptsmergeSRC[0:-3]),"%s%s.eit" % (config.usage.autorecord_path.value, ptsmergeDEST[0:-3]))
# Delete AP and SC Files
if os.path.exists("%s%s.ap" % (config.usage.autorecord_path.value, ptsmergeDEST)):
self.BgFileEraser.erase("%s%s.ap" % (config.usage.autorecord_path.value, ptsmergeDEST))
if os.path.exists("%s%s.sc" % (config.usage.autorecord_path.value, ptsmergeDEST)):
self.BgFileEraser.erase("%s%s.sc" % (config.usage.autorecord_path.value, ptsmergeDEST))
# Add Merge Job to JobManager
JobManager.AddJob(MergeTimeshiftJob(self, "cat \"%s%s\" >> \"%s%s\"" % (config.usage.autorecord_path.value,ptsmergeSRC,config.usage.autorecord_path.value,ptsmergeDEST), ptsmergeSRC, ptsmergeDEST, eventname))
config.timeshift.isRecording.value = True
ptsfilemerged = True
else:
ptsgetnextfile = True
if eventtag == "pts_merge" and not ptsgetnextfile:
ptsgetnextfile = True
ptsmergeDEST = filename[0:-5]
ptsmergeeventname = eventname
ptsfilemerged = False
# If still recording or transfering, try again later ...
if fileExists("%s%s" % (config.usage.autorecord_path.value,ptsmergeDEST)):
statinfo = os.stat("%s%s" % (config.usage.autorecord_path.value,ptsmergeDEST))
if statinfo.st_mtime > (time()-10.0):
self.pts_mergeRecords_timer.start(120000, True)
return
# Rewrite Meta File to get rid of pts_merge tag
metafile = open("%s%s.meta" % (config.usage.autorecord_path.value,ptsmergeDEST), "w")
metafile.write("%s\n%s\n%s\n%i\n" % (servicerefname,eventname.replace("\n", ""),eventtitle.replace("\n", ""),int(eventtime)))
metafile.close()
# Merging failed :(
if not ptsfilemerged and ptsgetnextfile:
Notifications.AddNotification(MessageBox,_("[Timeshift] Merging records failed!"), MessageBox.TYPE_ERROR, timeout=30)
def ptsCreateAPSCFiles(self, filename):
if fileExists(filename, 'r'):
if fileExists(filename+".meta", 'r'):
# Get Event Info from meta file
readmetafile = open(filename+".meta", "r")
servicerefname = readmetafile.readline()[0:-1]
eventname = readmetafile.readline()[0:-1]
readmetafile.close()
else:
eventname = ""
JobManager.AddJob(CreateAPSCFilesJob(self, "/usr/lib/enigma2/python/Components/createapscfiles \"%s\" > /dev/null" % filename, eventname))
else:
self.ptsSaveTimeshiftFinished()
def ptsCreateEITFile(self, filename):
if self.pts_curevent_eventid is not None:
try:
import Components.eitsave
serviceref = ServiceReference(self.session.nav.getCurrentlyPlayingServiceOrGroup()).ref.toString()
Components.eitsave.SaveEIT(serviceref, filename+".eit", self.pts_curevent_eventid, -1, -1)
except Exception, errormsg:
print "[Timeshift] %s" % errormsg
def ptsCopyFilefinished(self, srcfile, destfile):
# Erase Source File
if fileExists(srcfile):
self.BgFileEraser.erase(srcfile)
# Restart Merge Timer
if self.pts_mergeRecords_timer.isActive():
self.pts_mergeRecords_timer.stop()
self.pts_mergeRecords_timer.start(15000, True)
else:
# Create AP and SC Files
self.ptsCreateAPSCFiles(destfile)
def ptsMergeFilefinished(self, srcfile, destfile):
if self.session.nav.RecordTimer.isRecording() or len(JobManager.getPendingJobs()) >= 1:
# Rename files and delete them later ...
self.pts_mergeCleanUp_timer.start(120000, True)
os.system("echo \"\" > \"%s.pts.del\"" % (srcfile[0:-3]))
else:
# Delete Instant Record permanently now ... R.I.P.
self.BgFileEraser.erase("%s" % srcfile)
self.BgFileEraser.erase("%s.ap" % srcfile)
self.BgFileEraser.erase("%s.sc" % srcfile)
self.BgFileEraser.erase("%s.meta" % srcfile)
self.BgFileEraser.erase("%s.cuts" % srcfile)
self.BgFileEraser.erase("%s.eit" % (srcfile[0:-3]))
# Create AP and SC Files
self.ptsCreateAPSCFiles(destfile)
# Run Merge-Process one more time to check if there are more records to merge
self.pts_mergeRecords_timer.start(10000, True)
def ptsSaveTimeshiftFinished(self):
if not self.pts_mergeCleanUp_timer.isActive():
self.ptsFrontpanelActions("stop")
config.timeshift.isRecording.value = False
if Screens.Standby.inTryQuitMainloop:
self.pts_QuitMainloop_timer.start(30000, True)
else:
Notifications.AddNotification(MessageBox, _("Timeshift saved to your harddisk!"), MessageBox.TYPE_INFO, timeout=30)
def ptsMergePostCleanUp(self):
if self.session.nav.RecordTimer.isRecording() or len(JobManager.getPendingJobs()) >= 1:
config.timeshift.isRecording.value = True
self.pts_mergeCleanUp_timer.start(120000, True)
return
self.ptsFrontpanelActions("stop")
config.timeshift.isRecording.value = False
filelist = os.listdir(config.usage.autorecord_path.value)
for filename in filelist:
if filename.endswith(".pts.del"):
srcfile = config.usage.autorecord_path.value + "/" + filename[0:-8] + ".ts"
self.BgFileEraser.erase("%s" % srcfile)
self.BgFileEraser.erase("%s.ap" % srcfile)
self.BgFileEraser.erase("%s.sc" % srcfile)
self.BgFileEraser.erase("%s.meta" % srcfile)
self.BgFileEraser.erase("%s.cuts" % srcfile)
self.BgFileEraser.erase("%s.eit" % (srcfile[0:-3]))
self.BgFileEraser.erase("%s.pts.del" % (srcfile[0:-3]))
# Restart QuitMainloop Timer to give BgFileEraser enough time
if Screens.Standby.inTryQuitMainloop and self.pts_QuitMainloop_timer.isActive():
self.pts_QuitMainloop_timer.start(60000, True)
def ptsTryQuitMainloop(self):
if Screens.Standby.inTryQuitMainloop and (len(JobManager.getPendingJobs()) >= 1 or self.pts_mergeCleanUp_timer.isActive()):
self.pts_QuitMainloop_timer.start(60000, True)
return
if Screens.Standby.inTryQuitMainloop and self.session.ptsmainloopvalue:
self.session.dialog_stack = []
self.session.summary_stack = [None]
self.session.open(Screens.Standby.TryQuitMainloop, self.session.ptsmainloopvalue)
def ptsGetSeekInfo(self):
s = self.session.nav.getCurrentService()
return s and s.seek()
def ptsGetPosition(self):
seek = self.ptsGetSeekInfo()
if seek is None:
return None
pos = seek.getPlayPosition()
if pos[0]:
return 0
return pos[1]
def ptsGetLength(self):
seek = self.ptsGetSeekInfo()
if seek is None:
return None
length = seek.getLength()
if length[0]:
return 0
return length[1]
def ptsGetTimeshiftStatus(self):
if (self.isSeekable() and self.timeshiftEnabled() or self.save_current_timeshift) and config.usage.check_timeshift.value:
return True
else:
return False
def ptsSeekPointerOK(self):
if self.pvrStateDialog.has_key("PTSSeekPointer") and self.timeshiftEnabled() and self.isSeekable():
if not self.pvrStateDialog.shown:
if self.seekstate != self.SEEK_STATE_PLAY or self.seekstate == self.SEEK_STATE_PAUSE:
self.setSeekState(self.SEEK_STATE_PLAY)
self.doShow()
return
length = self.ptsGetLength()
position = self.ptsGetPosition()
if length is None or position is None:
return
cur_pos = self.pvrStateDialog["PTSSeekPointer"].position
jumptox = int(cur_pos[0]) - (int(self.pvrStateDialog["PTSSeekBack"].instance.position().x())+8)
jumptoperc = round((jumptox / float(self.pvrStateDialog["PTSSeekBack"].instance.size().width())) * 100, 0)
jumptotime = int((length / 100) * jumptoperc)
jumptodiff = position - jumptotime
self.doSeekRelative(-jumptodiff)
else:
return
def ptsSeekPointerLeft(self):
if self.pvrStateDialog.has_key("PTSSeekPointer") and self.pvrStateDialog.shown and self.timeshiftEnabled() and self.isSeekable():
self.ptsMoveSeekPointer(direction="left")
else:
return
def ptsSeekPointerRight(self):
if self.pvrStateDialog.has_key("PTSSeekPointer") and self.pvrStateDialog.shown and self.timeshiftEnabled() and self.isSeekable():
self.ptsMoveSeekPointer(direction="right")
else:
return
def ptsSeekPointerReset(self):
if self.pvrStateDialog.has_key("PTSSeekPointer") and self.timeshiftEnabled():
self.pvrStateDialog["PTSSeekPointer"].setPosition(int(self.pvrStateDialog["PTSSeekBack"].instance.position().x())+8,self.pvrStateDialog["PTSSeekPointer"].position[1])
def ptsSeekPointerSetCurrentPos(self):
if not self.pvrStateDialog.has_key("PTSSeekPointer") or not self.timeshiftEnabled() or not self.isSeekable():
return
position = self.ptsGetPosition()
length = self.ptsGetLength()
if length >= 1:
tpixels = int((float(int((position*100)/length))/100)*self.pvrStateDialog["PTSSeekBack"].instance.size().width())
self.pvrStateDialog["PTSSeekPointer"].setPosition(int(self.pvrStateDialog["PTSSeekBack"].instance.position().x())+8+tpixels, self.pvrStateDialog["PTSSeekPointer"].position[1])
def ptsMoveSeekPointer(self, direction=None):
if direction is None or not self.pvrStateDialog.has_key("PTSSeekPointer"):
return
isvalidjump = False
cur_pos = self.pvrStateDialog["PTSSeekPointer"].position
self.doShow()
if direction == "left":
minmaxval = int(self.pvrStateDialog["PTSSeekBack"].instance.position().x())+8
movepixels = -15
if cur_pos[0]+movepixels > minmaxval:
isvalidjump = True
elif direction == "right":
minmaxval = int(self.pvrStateDialog["PTSSeekBack"].instance.size().width()*0.96)
movepixels = 15
if cur_pos[0]+movepixels < minmaxval:
isvalidjump = True
else:
return 0
if isvalidjump:
self.pvrStateDialog["PTSSeekPointer"].setPosition(cur_pos[0]+movepixels,cur_pos[1])
else:
self.pvrStateDialog["PTSSeekPointer"].setPosition(minmaxval,cur_pos[1])
def ptsCheckFileChanged(self):
# print '!!!!! ptsCheckFileChanged'
# print ('[TIMESHIFT] - pts_currplaying %s, pts_nextplaying %s, pts_eventcount %s, pts_firstplayable %s' % (self.pts_currplaying, self.pts_nextplaying, self.pts_eventcount, self.pts_firstplayable))
# print 'self.pts_file_changed',self.pts_file_changed
if self.pts_file_changed or not self.timeshiftEnabled():
self.pts_CheckFileChanged_timer.stop()
if not self.pts_currplaying == self.pts_eventcount:
self.pts_SeekBack_timer.start(1000, True)
else:
self.doSeek(3600 * 24 * 90000)
def ptsTimeshiftFileChanged(self):
# print '!!!!! ptsTimeshiftFileChanged'
# print ('[TIMESHIFT] - pts_currplaying %s, pts_nextplaying %s, pts_eventcount %s, pts_firstplayable %s' % (self.pts_currplaying, self.pts_nextplaying, self.pts_eventcount, self.pts_firstplayable))
self.pts_file_changed = True
# Reset Seek Pointer
self.ptsSeekPointerReset()
# print 'self.pts_switchtolive',self.pts_switchtolive
if self.pts_switchtolive:
self.pts_switchtolive = False
self.pts_nextplaying = 0
self.pts_currplaying = self.pts_eventcount
return
if self.pts_nextplaying:
self.pts_currplaying = self.pts_nextplaying
self.pts_nextplaying = self.pts_currplaying + 1
# Get next pts file ...
# print ("!!! %spts_livebuffer_%s" % (config.usage.timeshift_path.value,self.pts_nextplaying))
if fileExists("%spts_livebuffer_%s" % (config.usage.timeshift_path.value,self.pts_nextplaying), 'r'):
# print '!!!!! TEST1'
self.ptsSetNextPlaybackFile("pts_livebuffer_%s" % self.pts_nextplaying)
self.pts_switchtolive = False
else:
self.ptsSetNextPlaybackFile("")
self.pts_switchtolive = True
# print ('[TIMESHIFT] - pts_currplaying %s, pts_nextplaying %s, pts_eventcount %s, pts_firstplayable %s' % (self.pts_currplaying, self.pts_nextplaying, self.pts_eventcount, self.pts_firstplayable))
def ptsSetNextPlaybackFile(self, nexttsfile):
# print '!!!!! ptsSetNextPlaybackFile'
ts = self.getTimeshift()
if ts is None:
return
# print ("!!! SET NextPlaybackFile%s%s" % (config.usage.timeshift_path.value,nexttsfile))
ts.setNextPlaybackFile("%s%s" % (config.usage.timeshift_path.value,nexttsfile))
def ptsSeekBackTimer(self):
# print '!!!!! ptsSeekBackTimer RUN'
self.doSeek(-90000*10) # seek ~10s before end
self.setSeekState(self.SEEK_STATE_PAUSE)
self.pts_StartSeekBackTimer.start(1000, True)
def ptsStartSeekBackTimer(self):
# print '!!!!! ptsStartSeekBackTimer RUN'
if self.pts_lastseekspeed == 0:
self.setSeekState(self.makeStateBackward(int(config.seek.enter_backward.value)))
else:
self.setSeekState(self.makeStateBackward(int(-self.pts_lastseekspeed)))
def ptsCheckTimeshiftPath(self):
if fileExists(config.usage.timeshift_path.value, 'w'):
return True
else:
# Notifications.AddNotification(MessageBox, _("Could not activate Autorecord-Timeshift!\nTimeshift-Path does not exist"), MessageBox.TYPE_ERROR, timeout=15)
if self.pts_delay_timer.isActive():
self.pts_delay_timer.stop()
if self.pts_cleanUp_timer.isActive():
self.pts_cleanUp_timer.stop()
return False
def ptsTimerEntryStateChange(self, timer):
# print 'ptsTimerEntryStateChange'
if not config.timeshift.stopwhilerecording.value:
return
self.pts_record_running = self.session.nav.RecordTimer.isRecording()
# Abort here when box is in standby mode
if self.session.screen["Standby"].boolean is True:
return
# Stop Timeshift when Record started ...
if timer.state == TimerEntry.StateRunning and self.timeshiftEnabled() and self.pts_record_running:
if self.seekstate != self.SEEK_STATE_PLAY:
self.setSeekState(self.SEEK_STATE_PLAY)
if self.isSeekable():
Notifications.AddNotification(MessageBox,_("Record started! Stopping timeshift now ..."), MessageBox.TYPE_INFO, timeout=30)
self.switchToLive = False
self.stopTimeshiftcheckTimeshiftRunningCallback(True)
# Restart Timeshift when all records stopped
if timer.state == TimerEntry.StateEnded and not self.timeshiftEnabled() and not self.pts_record_running:
self.autostartAutorecordTimeshift()
# Restart Merge-Timer when all records stopped
if timer.state == TimerEntry.StateEnded and self.pts_mergeRecords_timer.isActive():
self.pts_mergeRecords_timer.stop()
self.pts_mergeRecords_timer.start(15000, True)
# Restart FrontPanel LED when still copying or merging files
# ToDo: Only do this on PTS Events and not events from other jobs
if timer.state == TimerEntry.StateEnded and (len(JobManager.getPendingJobs()) >= 1 or self.pts_mergeRecords_timer.isActive()):
self.ptsFrontpanelActions("start")
config.timeshift.isRecording.value = True
def ptsLiveTVStatus(self):
service = self.session.nav.getCurrentService()
info = service and service.info()
sTSID = info and info.getInfo(iServiceInformation.sTSID) or -1
if sTSID is None or sTSID == -1:
return False
else:
return True
|
libfuse/python-fuse
|
refs/heads/master
|
example/hello.py
|
1
|
#!/usr/bin/env python
# Copyright (C) 2006 Andrew Straw <strawman@astraw.com>
#
# This program can be distributed under the terms of the GNU LGPL.
# See the file COPYING.
#
import os, stat, errno
# pull in some spaghetti to make this stuff work without fuse-py being installed
try:
import _find_fuse_parts
except ImportError:
pass
import fuse
from fuse import Fuse
if not hasattr(fuse, '__version__'):
raise RuntimeError("your fuse-py doesn't know of fuse.__version__, probably it's too old.")
fuse.fuse_python_api = (0, 2)
hello_path = '/hello'
hello_str = b'Hello World!\n'
class MyStat(fuse.Stat):
def __init__(self):
self.st_mode = 0
self.st_ino = 0
self.st_dev = 0
self.st_nlink = 0
self.st_uid = 0
self.st_gid = 0
self.st_size = 0
self.st_atime = 0
self.st_mtime = 0
self.st_ctime = 0
class HelloFS(Fuse):
def getattr(self, path):
st = MyStat()
if path == '/':
st.st_mode = stat.S_IFDIR | 0o755
st.st_nlink = 2
elif path == hello_path:
st.st_mode = stat.S_IFREG | 0o444
st.st_nlink = 1
st.st_size = len(hello_str)
else:
return -errno.ENOENT
return st
def readdir(self, path, offset):
for r in '.', '..', hello_path[1:]:
yield fuse.Direntry(r)
def open(self, path, flags):
if path != hello_path:
return -errno.ENOENT
accmode = os.O_RDONLY | os.O_WRONLY | os.O_RDWR
if (flags & accmode) != os.O_RDONLY:
return -errno.EACCES
def read(self, path, size, offset):
if path != hello_path:
return -errno.ENOENT
slen = len(hello_str)
if offset < slen:
if offset + size > slen:
size = slen - offset
buf = hello_str[offset:offset+size]
else:
buf = b''
return buf
def main():
usage="""
Userspace hello example
""" + Fuse.fusage
server = HelloFS(version="%prog " + fuse.__version__,
usage=usage,
dash_s_do='setsingle')
server.parse(errex=1)
server.main()
if __name__ == '__main__':
main()
|
clumsy/intellij-community
|
refs/heads/master
|
python/testData/intentions/PyConvertMethodToPropertyIntentionTest/emptyReturn.py
|
83
|
class A():
def mo<caret>o(self):
return
|
CivicTechTO/open-cabinet
|
refs/heads/master
|
venv/lib/python2.7/site-packages/django/contrib/staticfiles/templatetags/__init__.py
|
12133432
| |
delinhabit/django
|
refs/heads/master
|
tests/view_tests/templatetags/__init__.py
|
12133432
| |
haystack/eyebrowse-server
|
refs/heads/master
|
notifications/management/__init__.py
|
12133432
| |
enigmampc/catalyst
|
refs/heads/master
|
catalyst/examples/mean_reversion_simple_custom_fees.py
|
1
|
# For this example, we're going to write a simple momentum script. When the
# stock goes up quickly, we're going to buy; when it goes down quickly, we're
# going to sell. Hopefully we'll ride the waves.
import os
import tempfile
import time
import numpy as np
import pandas as pd
import talib
from logbook import Logger
from catalyst import run_algorithm
from catalyst.api import symbol, record, order_target_percent, get_open_orders
from catalyst.exchange.utils.stats_utils import extract_transactions
# We give a name to the algorithm which Catalyst will use to persist its state.
# In this example, Catalyst will create the `.catalyst/data/live_algos`
# directory. If we stop and start the algorithm, Catalyst will resume its
# state using the files included in the folder.
from catalyst.utils.paths import ensure_directory
NAMESPACE = 'mean_reversion_simple'
log = Logger(NAMESPACE)
# To run an algorithm in Catalyst, you need two functions: initialize and
# handle_data.
def initialize(context):
# This initialize function sets any data or variables that you'll use in
# your algorithm. For instance, you'll want to define the trading pair (or
# trading pairs) you want to backtest. You'll also want to define any
# parameters or values you're going to use.
# In our example, we're looking at Neo in Ether.
context.market = symbol('eth_btc')
context.base_price = None
context.current_day = None
context.RSI_OVERSOLD = 50
context.RSI_OVERBOUGHT = 60
context.CANDLE_SIZE = '5T'
context.start_time = time.time()
context.set_commission(maker=0.001, taker=0.002)
# context.set_slippage(slippage=0.001)
def handle_data(context, data):
# This handle_data function is where the real work is done. Our data is
# minute-level tick data, and each minute is called a frame. This function
# runs on each frame of the data.
# We flag the first period of each day.
# Since cryptocurrencies trade 24/7 the `before_trading_starts` handle
# would only execute once. This method works with minute and daily
# frequencies.
today = data.current_dt.floor('1D')
if today != context.current_day:
context.traded_today = False
context.current_day = today
# We're computing the volume-weighted-average-price of the security
# defined above, in the context.market variable. For this example, we're
# using three bars on the 15 min bars.
# The frequency attribute determine the bar size. We use this convention
# for the frequency alias:
# http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases
prices = data.history(
context.market,
fields='close',
bar_count=50,
frequency=context.CANDLE_SIZE
)
# Ta-lib calculates various technical indicator based on price and
# volume arrays.
# In this example, we are comp
rsi = talib.RSI(prices.values, timeperiod=14)
# We need a variable for the current price of the security to compare to
# the average. Since we are requesting two fields, data.current()
# returns a DataFrame with
current = data.current(context.market, fields=['close', 'volume'])
price = current['close']
# If base_price is not set, we use the current value. This is the
# price at the first bar which we reference to calculate price_change.
if context.base_price is None:
context.base_price = price
price_change = (price - context.base_price) / context.base_price
cash = context.portfolio.cash
# Now that we've collected all current data for this frame, we use
# the record() method to save it. This data will be available as
# a parameter of the analyze() function for further analysis.
record(
volume=current['volume'],
price=price,
price_change=price_change,
rsi=rsi[-1],
cash=cash
)
# We are trying to avoid over-trading by limiting our trades to
# one per day.
if context.traded_today:
return
# TODO: retest with open orders
# Since we are using limit orders, some orders may not execute immediately
# we wait until all orders are executed before considering more trades.
orders = get_open_orders(context.market)
if len(orders) > 0:
log.info('exiting because orders are open: {}'.format(orders))
return
# Exit if we cannot trade
if not data.can_trade(context.market):
return
# Another powerful built-in feature of the Catalyst backtester is the
# portfolio object. The portfolio object tracks your positions, cash,
# cost basis of specific holdings, and more. In this line, we calculate
# how long or short our position is at this minute.
pos_amount = context.portfolio.positions[context.market].amount
if rsi[-1] <= context.RSI_OVERSOLD and pos_amount == 0:
log.info(
'{}: buying - price: {}, rsi: {}'.format(
data.current_dt, price, rsi[-1]
)
)
# Set a style for limit orders,
limit_price = price * 1.005
order_target_percent(
context.market, 1, limit_price=limit_price
)
context.traded_today = True
elif rsi[-1] >= context.RSI_OVERBOUGHT and pos_amount > 0:
log.info(
'{}: selling - price: {}, rsi: {}'.format(
data.current_dt, price, rsi[-1]
)
)
limit_price = price * 0.995
order_target_percent(
context.market, 0, limit_price=limit_price
)
context.traded_today = True
def analyze(context=None, perf=None):
end = time.time()
log.info('elapsed time: {}'.format(end - context.start_time))
import matplotlib.pyplot as plt
# The quote currency of the algo exchange
quote_currency = list(context.exchanges.values())[0].quote_currency.upper()
# Plot the portfolio value over time.
ax1 = plt.subplot(611)
perf.loc[:, 'portfolio_value'].plot(ax=ax1)
ax1.set_ylabel('Portfolio\nValue\n({})'.format(quote_currency))
# Plot the price increase or decrease over time.
ax2 = plt.subplot(612, sharex=ax1)
perf.loc[:, 'price'].plot(ax=ax2, label='Price')
ax2.set_ylabel('{asset}\n({quote})'.format(
asset=context.market.symbol, quote=quote_currency
))
transaction_df = extract_transactions(perf)
if not transaction_df.empty:
buy_df = transaction_df[transaction_df['amount'] > 0]
sell_df = transaction_df[transaction_df['amount'] < 0]
ax2.scatter(
buy_df.index.to_pydatetime(),
perf.loc[buy_df.index.floor('1 min'), 'price'],
marker='^',
s=100,
c='green',
label=''
)
ax2.scatter(
sell_df.index.to_pydatetime(),
perf.loc[sell_df.index.floor('1 min'), 'price'],
marker='v',
s=100,
c='red',
label=''
)
ax4 = plt.subplot(613, sharex=ax1)
perf.loc[:, 'cash'].plot(
ax=ax4, label='Quote Currency ({})'.format(quote_currency)
)
ax4.set_ylabel('Cash\n({})'.format(quote_currency))
perf['algorithm'] = perf.loc[:, 'algorithm_period_return']
ax5 = plt.subplot(614, sharex=ax1)
perf.loc[:, ['algorithm', 'price_change']].plot(ax=ax5)
ax5.set_ylabel('Percent\nChange')
ax6 = plt.subplot(615, sharex=ax1)
perf.loc[:, 'rsi'].plot(ax=ax6, label='RSI')
ax6.set_ylabel('RSI')
ax6.axhline(context.RSI_OVERBOUGHT, color='darkgoldenrod')
ax6.axhline(context.RSI_OVERSOLD, color='darkgoldenrod')
if not transaction_df.empty:
ax6.scatter(
buy_df.index.to_pydatetime(),
perf.loc[buy_df.index.floor('1 min'), 'rsi'],
marker='^',
s=100,
c='green',
label=''
)
ax6.scatter(
sell_df.index.to_pydatetime(),
perf.loc[sell_df.index.floor('1 min'), 'rsi'],
marker='v',
s=100,
c='red',
label=''
)
plt.legend(loc=3)
start, end = ax6.get_ylim()
ax6.yaxis.set_ticks(np.arange(0, end, end / 5))
# Show the plot.
plt.gcf().set_size_inches(18, 8)
plt.show()
pass
if __name__ == '__main__':
# The execution mode: backtest or live
live = False
if live:
run_algorithm(
capital_base=0.025,
initialize=initialize,
handle_data=handle_data,
analyze=analyze,
exchange_name='poloniex',
live=True,
algo_namespace=NAMESPACE,
quote_currency='btc',
live_graph=False,
simulate_orders=False,
stats_output=None,
)
else:
folder = os.path.join(
tempfile.gettempdir(), 'catalyst', NAMESPACE
)
ensure_directory(folder)
timestr = time.strftime('%Y%m%d-%H%M%S')
out = os.path.join(folder, '{}.p'.format(timestr))
# catalyst run -f catalyst/examples/mean_reversion_simple.py \
# -x bitfinex -s 2017-10-1 -e 2017-11-10 -c usdt -n mean-reversion \
# --data-frequency minute --capital-base 10000
run_algorithm(
capital_base=0.1,
data_frequency='minute',
initialize=initialize,
handle_data=handle_data,
analyze=analyze,
exchange_name='bitfinex',
algo_namespace=NAMESPACE,
quote_currency='eth',
start=pd.to_datetime('2017-10-01', utc=True),
end=pd.to_datetime('2017-11-10', utc=True),
output=out
)
log.info('saved perf stats: {}'.format(out))
|
mikewiebe-ansible/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/ovirt.py
|
14
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import inspect
import os
import time
from abc import ABCMeta, abstractmethod
from datetime import datetime
from distutils.version import LooseVersion
from ansible.module_utils.cloud import CloudRetry
from ansible.module_utils.common._collections_compat import Mapping
try:
from enum import Enum # enum is a ovirtsdk4 requirement
import ovirtsdk4 as sdk
import ovirtsdk4.version as sdk_version
import ovirtsdk4.types as otypes
HAS_SDK = LooseVersion(sdk_version.VERSION) >= LooseVersion('4.3.0')
except ImportError:
HAS_SDK = False
BYTES_MAP = {
'kib': 2**10,
'mib': 2**20,
'gib': 2**30,
'tib': 2**40,
'pib': 2**50,
}
def check_sdk(module):
if not HAS_SDK:
module.fail_json(
msg='ovirtsdk4 version 4.3.0 or higher is required for this module'
)
def get_dict_of_struct(struct, connection=None, fetch_nested=False, attributes=None):
"""
Convert SDK Struct type into dictionary.
"""
res = {}
def resolve_href(value):
# Fetch nested values of struct:
try:
value = connection.follow_link(value)
except sdk.Error:
value = None
nested_obj = dict(
(attr, convert_value(getattr(value, attr)))
for attr in attributes if getattr(value, attr, None) is not None
)
nested_obj['id'] = getattr(value, 'id', None)
nested_obj['href'] = getattr(value, 'href', None)
return nested_obj
def remove_underscore(val):
if val.startswith('_'):
val = val[1:]
remove_underscore(val)
return val
def convert_value(value):
nested = False
if isinstance(value, sdk.Struct):
if not fetch_nested or not value.href:
return get_dict_of_struct(value)
return resolve_href(value)
elif isinstance(value, Enum) or isinstance(value, datetime):
return str(value)
elif isinstance(value, list) or isinstance(value, sdk.List):
if isinstance(value, sdk.List) and fetch_nested and value.href:
try:
value = connection.follow_link(value)
nested = True
except sdk.Error:
value = []
ret = []
for i in value:
if isinstance(i, sdk.Struct):
if not nested and fetch_nested and i.href:
ret.append(resolve_href(i))
elif not nested:
ret.append(get_dict_of_struct(i))
else:
nested_obj = dict(
(attr, convert_value(getattr(i, attr)))
for attr in attributes if getattr(i, attr, None)
)
nested_obj['id'] = getattr(i, 'id', None)
ret.append(nested_obj)
elif isinstance(i, Enum):
ret.append(str(i))
else:
ret.append(i)
return ret
else:
return value
if struct is not None:
for key, value in struct.__dict__.items():
if value is None:
continue
key = remove_underscore(key)
res[key] = convert_value(value)
return res
def engine_version(connection):
"""
Return string representation of oVirt engine version.
"""
engine_api = connection.system_service().get()
engine_version = engine_api.product_info.version
return '%s.%s' % (engine_version.major, engine_version.minor)
def create_connection(auth):
"""
Create a connection to Python SDK, from task `auth` parameter.
If user doesnt't have SSO token the `auth` dictionary has following parameters mandatory:
url, username, password
If user has SSO token the `auth` dictionary has following parameters mandatory:
url, token
The `ca_file` parameter is mandatory in case user want to use secure connection,
in case user want to use insecure connection, it's mandatory to send insecure=True.
:param auth: dictionary which contains needed values for connection creation
:return: Python SDK connection
"""
url = auth.get('url')
if url is None and auth.get('hostname') is not None:
url = 'https://{0}/ovirt-engine/api'.format(auth.get('hostname'))
return sdk.Connection(
url=url,
username=auth.get('username'),
password=auth.get('password'),
ca_file=auth.get('ca_file', None),
insecure=auth.get('insecure', False),
token=auth.get('token', None),
kerberos=auth.get('kerberos', None),
headers=auth.get('headers', None),
)
def convert_to_bytes(param):
"""
This method convert units to bytes, which follow IEC standard.
:param param: value to be converted
"""
if param is None:
return None
# Get rid of whitespaces:
param = ''.join(param.split())
# Convert to bytes:
if len(param) > 3 and param[-3].lower() in ['k', 'm', 'g', 't', 'p']:
return int(param[:-3]) * BYTES_MAP.get(param[-3:].lower(), 1)
elif param.isdigit():
return int(param) * 2**10
else:
raise ValueError(
"Unsupported value(IEC supported): '{value}'".format(value=param)
)
def follow_link(connection, link):
"""
This method returns the entity of the element which link points to.
:param connection: connection to the Python SDK
:param link: link of the entity
:return: entity which link points to
"""
if link:
return connection.follow_link(link)
else:
return None
def get_link_name(connection, link):
"""
This method returns the name of the element which link points to.
:param connection: connection to the Python SDK
:param link: link of the entity
:return: name of the entity, which link points to
"""
if link:
return connection.follow_link(link).name
else:
return None
def equal(param1, param2, ignore_case=False):
"""
Compare two parameters and return if they are equal.
This parameter doesn't run equal operation if first parameter is None.
With this approach we don't run equal operation in case user don't
specify parameter in their task.
:param param1: user inputted parameter
:param param2: value of entity parameter
:return: True if parameters are equal or first parameter is None, otherwise False
"""
if param1 is not None:
if ignore_case:
return param1.lower() == param2.lower()
return param1 == param2
return True
def search_by_attributes(service, list_params=None, **kwargs):
"""
Search for the entity by attributes. Nested entities don't support search
via REST, so in case using search for nested entity we return all entities
and filter them by specified attributes.
"""
list_params = list_params or {}
# Check if 'list' method support search(look for search parameter):
if 'search' in inspect.getargspec(service.list)[0]:
res = service.list(
# There must be double quotes around name, because some oVirt resources it's possible to create then with space in name.
search=' and '.join('{0}="{1}"'.format(k, v) for k, v in kwargs.items()),
**list_params
)
else:
res = [
e for e in service.list(**list_params) if len([
k for k, v in kwargs.items() if getattr(e, k, None) == v
]) == len(kwargs)
]
res = res or [None]
return res[0]
def search_by_name(service, name, **kwargs):
"""
Search for the entity by its name. Nested entities don't support search
via REST, so in case using search for nested entity we return all entities
and filter them by name.
:param service: service of the entity
:param name: name of the entity
:return: Entity object returned by Python SDK
"""
# Check if 'list' method support search(look for search parameter):
if 'search' in inspect.getargspec(service.list)[0]:
res = service.list(
# There must be double quotes around name, because some oVirt resources it's possible to create then with space in name.
search='name="{name}"'.format(name=name)
)
else:
res = [e for e in service.list() if e.name == name]
if kwargs:
res = [
e for e in service.list() if len([
k for k, v in kwargs.items() if getattr(e, k, None) == v
]) == len(kwargs)
]
res = res or [None]
return res[0]
def get_entity(service, get_params=None):
"""
Ignore SDK Error in case of getting an entity from service.
"""
entity = None
try:
if get_params is not None:
entity = service.get(**get_params)
else:
entity = service.get()
except sdk.Error:
# We can get here 404, we should ignore it, in case
# of removing entity for example.
pass
return entity
def get_id_by_name(service, name, raise_error=True, ignore_case=False):
"""
Search an entity ID by it's name.
"""
entity = search_by_name(service, name)
if entity is not None:
return entity.id
if raise_error:
raise Exception("Entity '%s' was not found." % name)
def wait(
service,
condition,
fail_condition=lambda e: False,
timeout=180,
wait=True,
poll_interval=3,
):
"""
Wait until entity fulfill expected condition.
:param service: service of the entity
:param condition: condition to be fulfilled
:param fail_condition: if this condition is true, raise Exception
:param timeout: max time to wait in seconds
:param wait: if True wait for condition, if False don't wait
:param poll_interval: Number of seconds we should wait until next condition check
"""
# Wait until the desired state of the entity:
if wait:
start = time.time()
while time.time() < start + timeout:
# Exit if the condition of entity is valid:
entity = get_entity(service)
if condition(entity):
return
elif fail_condition(entity):
raise Exception("Error while waiting on result state of the entity.")
# Sleep for `poll_interval` seconds if none of the conditions apply:
time.sleep(float(poll_interval))
raise Exception("Timeout exceed while waiting on result state of the entity.")
def __get_auth_dict():
OVIRT_URL = os.environ.get('OVIRT_URL')
OVIRT_HOSTNAME = os.environ.get('OVIRT_HOSTNAME')
OVIRT_USERNAME = os.environ.get('OVIRT_USERNAME')
OVIRT_PASSWORD = os.environ.get('OVIRT_PASSWORD')
OVIRT_TOKEN = os.environ.get('OVIRT_TOKEN')
OVIRT_CAFILE = os.environ.get('OVIRT_CAFILE')
OVIRT_INSECURE = OVIRT_CAFILE is None
env_vars = None
if OVIRT_URL is None and OVIRT_HOSTNAME is not None:
OVIRT_URL = 'https://{0}/ovirt-engine/api'.format(OVIRT_HOSTNAME)
if OVIRT_URL and ((OVIRT_USERNAME and OVIRT_PASSWORD) or OVIRT_TOKEN):
env_vars = {
'url': OVIRT_URL,
'username': OVIRT_USERNAME,
'password': OVIRT_PASSWORD,
'insecure': OVIRT_INSECURE,
'token': OVIRT_TOKEN,
'ca_file': OVIRT_CAFILE,
}
if env_vars is not None:
auth = dict(default=env_vars, type='dict')
else:
auth = dict(required=True, type='dict')
return auth
def ovirt_info_full_argument_spec(**kwargs):
"""
Extend parameters of info module with parameters which are common to all
oVirt info modules.
:param kwargs: kwargs to be extended
:return: extended dictionary with common parameters
"""
spec = dict(
auth=__get_auth_dict(),
fetch_nested=dict(default=False, type='bool'),
nested_attributes=dict(type='list', default=list()),
)
spec.update(kwargs)
return spec
# Left for third-party module compatibility
def ovirt_facts_full_argument_spec(**kwargs):
"""
This is deprecated. Please use ovirt_info_full_argument_spec instead!
:param kwargs: kwargs to be extended
:return: extended dictionary with common parameters
"""
return ovirt_info_full_argument_spec(**kwargs)
def ovirt_full_argument_spec(**kwargs):
"""
Extend parameters of module with parameters which are common to all oVirt modules.
:param kwargs: kwargs to be extended
:return: extended dictionary with common parameters
"""
spec = dict(
auth=__get_auth_dict(),
timeout=dict(default=180, type='int'),
wait=dict(default=True, type='bool'),
poll_interval=dict(default=3, type='int'),
fetch_nested=dict(default=False, type='bool'),
nested_attributes=dict(type='list', default=list()),
)
spec.update(kwargs)
return spec
def check_params(module):
"""
Most modules must have either `name` or `id` specified.
"""
if module.params.get('name') is None and module.params.get('id') is None:
module.fail_json(msg='"name" or "id" is required')
def engine_supported(connection, version):
return LooseVersion(engine_version(connection)) >= LooseVersion(version)
def check_support(version, connection, module, params):
"""
Check if parameters used by user are supported by oVirt Python SDK
and oVirt engine.
"""
api_version = LooseVersion(engine_version(connection))
version = LooseVersion(version)
for param in params:
if module.params.get(param) is not None:
return LooseVersion(sdk_version.VERSION) >= version and api_version >= version
return True
class BaseModule(object):
"""
This is base class for oVirt modules. oVirt modules should inherit this
class and override method to customize specific needs of the module.
The only abstract method of this class is `build_entity`, which must
to be implemented in child class.
"""
__metaclass__ = ABCMeta
def __init__(self, connection, module, service, changed=False):
self._connection = connection
self._module = module
self._service = service
self._changed = changed
self._diff = {'after': dict(), 'before': dict()}
@property
def changed(self):
return self._changed
@changed.setter
def changed(self, changed):
if not self._changed:
self._changed = changed
@abstractmethod
def build_entity(self):
"""
This method should return oVirt Python SDK type, which we want to
create or update, initialized by values passed by Ansible module.
For example if we want to create VM, we will return following:
types.Vm(name=self._module.params['vm_name'])
:return: Specific instance of sdk.Struct.
"""
pass
def param(self, name, default=None):
"""
Return a module parameter specified by it's name.
"""
return self._module.params.get(name, default)
def update_check(self, entity):
"""
This method handle checks whether the entity values are same as values
passed to ansible module. By default we don't compare any values.
:param entity: Entity we want to compare with Ansible module values.
:return: True if values are same, so we don't need to update the entity.
"""
return True
def pre_create(self, entity):
"""
This method is called right before entity is created.
:param entity: Entity to be created or updated.
"""
pass
def post_create(self, entity):
"""
This method is called right after entity is created.
:param entity: Entity which was created.
"""
pass
def post_update(self, entity):
"""
This method is called right after entity is updated.
:param entity: Entity which was updated.
"""
pass
def diff_update(self, after, update):
for k, v in update.items():
if isinstance(v, Mapping):
after[k] = self.diff_update(after.get(k, dict()), v)
else:
after[k] = update[k]
return after
def create(
self,
entity=None,
result_state=None,
fail_condition=lambda e: False,
search_params=None,
update_params=None,
_wait=None,
force_create=False,
**kwargs
):
"""
Method which is called when state of the entity is 'present'. If user
don't provide `entity` parameter the entity is searched using
`search_params` parameter. If entity is found it's updated, whether
the entity should be updated is checked by `update_check` method.
The corresponding updated entity is build by `build_entity` method.
Function executed after entity is created can optionally be specified
in `post_create` parameter. Function executed after entity is updated
can optionally be specified in `post_update` parameter.
:param entity: Entity we want to update, if exists.
:param result_state: State which should entity has in order to finish task.
:param fail_condition: Function which checks incorrect state of entity, if it returns `True` Exception is raised.
:param search_params: Dictionary of parameters to be used for search.
:param update_params: The params which should be passed to update method.
:param kwargs: Additional parameters passed when creating entity.
:return: Dictionary with values returned by Ansible module.
"""
if entity is None and not force_create:
entity = self.search_entity(search_params)
self.pre_create(entity)
if entity:
# Entity exists, so update it:
entity_service = self._service.service(entity.id)
if not self.update_check(entity):
new_entity = self.build_entity()
if not self._module.check_mode:
update_params = update_params or {}
updated_entity = entity_service.update(
new_entity,
**update_params
)
self.post_update(entity)
# Update diffs only if user specified --diff parameter,
# so we don't useless overload API:
if self._module._diff:
before = get_dict_of_struct(
entity,
self._connection,
fetch_nested=True,
attributes=['name'],
)
after = before.copy()
self.diff_update(after, get_dict_of_struct(new_entity))
self._diff['before'] = before
self._diff['after'] = after
self.changed = True
else:
# Entity don't exists, so create it:
if not self._module.check_mode:
entity = self._service.add(
self.build_entity(),
**kwargs
)
self.post_create(entity)
self.changed = True
if not self._module.check_mode:
# Wait for the entity to be created and to be in the defined state:
entity_service = self._service.service(entity.id)
def state_condition(entity):
return entity
if result_state:
def state_condition(entity):
return entity and entity.status == result_state
wait(
service=entity_service,
condition=state_condition,
fail_condition=fail_condition,
wait=_wait if _wait is not None else self._module.params['wait'],
timeout=self._module.params['timeout'],
poll_interval=self._module.params['poll_interval'],
)
return {
'changed': self.changed,
'id': getattr(entity, 'id', None),
type(entity).__name__.lower(): get_dict_of_struct(
struct=entity,
connection=self._connection,
fetch_nested=self._module.params.get('fetch_nested'),
attributes=self._module.params.get('nested_attributes'),
),
'diff': self._diff,
}
def pre_remove(self, entity):
"""
This method is called right before entity is removed.
:param entity: Entity which we want to remove.
"""
pass
def entity_name(self, entity):
return "{e_type} '{e_name}'".format(
e_type=type(entity).__name__.lower(),
e_name=getattr(entity, 'name', None),
)
def remove(self, entity=None, search_params=None, **kwargs):
"""
Method which is called when state of the entity is 'absent'. If user
don't provide `entity` parameter the entity is searched using
`search_params` parameter. If entity is found it's removed.
Function executed before remove is executed can optionally be specified
in `pre_remove` parameter.
:param entity: Entity we want to remove.
:param search_params: Dictionary of parameters to be used for search.
:param kwargs: Additional parameters passed when removing entity.
:return: Dictionary with values returned by Ansible module.
"""
if entity is None:
entity = self.search_entity(search_params)
if entity is None:
return {
'changed': self.changed,
'msg': "Entity wasn't found."
}
self.pre_remove(entity)
entity_service = self._service.service(entity.id)
if not self._module.check_mode:
entity_service.remove(**kwargs)
wait(
service=entity_service,
condition=lambda entity: not entity,
wait=self._module.params['wait'],
timeout=self._module.params['timeout'],
poll_interval=self._module.params['poll_interval'],
)
self.changed = True
return {
'changed': self.changed,
'id': entity.id,
type(entity).__name__.lower(): get_dict_of_struct(
struct=entity,
connection=self._connection,
fetch_nested=self._module.params.get('fetch_nested'),
attributes=self._module.params.get('nested_attributes'),
),
}
def action(
self,
action,
entity=None,
action_condition=lambda e: e,
wait_condition=lambda e: e,
fail_condition=lambda e: False,
pre_action=lambda e: e,
post_action=lambda e: None,
search_params=None,
**kwargs
):
"""
This method is executed when we want to change the state of some oVirt
entity. The action to be executed on oVirt service is specified by
`action` parameter. Whether the action should be executed can be
specified by passing `action_condition` parameter. State which the
entity should be in after execution of the action can be specified
by `wait_condition` parameter.
Function executed before an action on entity can optionally be specified
in `pre_action` parameter. Function executed after an action on entity can
optionally be specified in `post_action` parameter.
:param action: Action which should be executed by service on entity.
:param entity: Entity we want to run action on.
:param action_condition: Function which is executed when checking if action should be executed.
:param fail_condition: Function which checks incorrect state of entity, if it returns `True` Exception is raised.
:param wait_condition: Function which is executed when waiting on result state.
:param pre_action: Function which is executed before running the action.
:param post_action: Function which is executed after running the action.
:param search_params: Dictionary of parameters to be used for search.
:param kwargs: Additional parameters passed to action.
:return: Dictionary with values returned by Ansible module.
"""
if entity is None:
entity = self.search_entity(search_params)
entity = pre_action(entity)
if entity is None:
self._module.fail_json(
msg="Entity not found, can't run action '{0}'.".format(
action
)
)
entity_service = self._service.service(entity.id)
entity = entity_service.get()
if action_condition(entity):
if not self._module.check_mode:
getattr(entity_service, action)(**kwargs)
self.changed = True
post_action(entity)
wait(
service=self._service.service(entity.id),
condition=wait_condition,
fail_condition=fail_condition,
wait=self._module.params['wait'],
timeout=self._module.params['timeout'],
poll_interval=self._module.params['poll_interval'],
)
return {
'changed': self.changed,
'id': entity.id,
type(entity).__name__.lower(): get_dict_of_struct(
struct=entity,
connection=self._connection,
fetch_nested=self._module.params.get('fetch_nested'),
attributes=self._module.params.get('nested_attributes'),
),
'diff': self._diff,
}
def wait_for_import(self, condition=lambda e: True):
if self._module.params['wait']:
start = time.time()
timeout = self._module.params['timeout']
poll_interval = self._module.params['poll_interval']
while time.time() < start + timeout:
entity = self.search_entity()
if entity and condition(entity):
return entity
time.sleep(poll_interval)
def search_entity(self, search_params=None, list_params=None):
"""
Always first try to search by `ID`, if ID isn't specified,
check if user constructed special search in `search_params`,
if not search by `name`.
"""
entity = None
if 'id' in self._module.params and self._module.params['id'] is not None:
entity = get_entity(self._service.service(self._module.params['id']), get_params=list_params)
elif search_params is not None:
entity = search_by_attributes(self._service, list_params=list_params, **search_params)
elif self._module.params.get('name') is not None:
entity = search_by_attributes(self._service, list_params=list_params, name=self._module.params['name'])
return entity
def _get_major(self, full_version):
if full_version is None or full_version == "":
return None
if isinstance(full_version, otypes.Version):
return int(full_version.major)
return int(full_version.split('.')[0])
def _get_minor(self, full_version):
if full_version is None or full_version == "":
return None
if isinstance(full_version, otypes.Version):
return int(full_version.minor)
return int(full_version.split('.')[1])
def _sdk4_error_maybe():
"""
Allow for ovirtsdk4 not being installed.
"""
if HAS_SDK:
return sdk.Error
return type(None)
class OvirtRetry(CloudRetry):
base_class = _sdk4_error_maybe()
@staticmethod
def status_code_from_exception(error):
return error.code
@staticmethod
def found(response_code, catch_extra_error_codes=None):
# This is a list of error codes to retry.
retry_on = [
# HTTP status: Conflict
409,
]
if catch_extra_error_codes:
retry_on.extend(catch_extra_error_codes)
return response_code in retry_on
|
bhargav/scikit-learn
|
refs/heads/master
|
sklearn/tests/test_check_build.py
|
312
|
"""
Smoke Test the check_build module
"""
# Author: G Varoquaux
# Licence: BSD 3 clause
from sklearn.__check_build import raise_build_error
from sklearn.utils.testing import assert_raises
def test_raise_build_error():
assert_raises(ImportError, raise_build_error, ImportError())
|
neuroidss/nupic.vision
|
refs/heads/master
|
image_encoders.py
|
1
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
These routines convert images to bit vectors that can be used as input to
the spatial pooler.
"""
import numpy
from PIL import Image
def imageToVector(image):
'''
Returns a bit vector representation (list of ints) of a PIL image.
'''
# Convert the image to black and white
image = image.convert('1',dither=Image.NONE)
# Pull out the data, turn that into a list, then a numpy array,
# then convert from 0 255 space to binary with a threshold.
# Finally cast the values into a type CPP likes
vector = (numpy.array(list(image.getdata())) < 100).astype('uint32')
return vector
def imagesToVectors(images):
vectors = [imageToVector(image) for image in images]
return vectors
|
showell/zulip
|
refs/heads/master
|
zerver/migrations/0086_realm_alter_default_org_type.py
|
7
|
# Generated by Django 1.11.2 on 2017-06-26 21:56
from django.db import migrations, models
CORPORATE = 1
class Migration(migrations.Migration):
dependencies = [
('zerver', '0085_fix_bots_with_none_bot_type'),
]
operations = [
migrations.AlterField(
model_name='realm',
name='org_type',
field=models.PositiveSmallIntegerField(default=CORPORATE),
),
]
|
sgerhart/ansible
|
refs/heads/maintenance_policy_module
|
lib/ansible/modules/cloud/azure/azure_rm_mysqlfirewallrule.py
|
25
|
#!/usr/bin/python
#
# Copyright (c) 2018 Zim Kalinowski, <zikalino@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_mysqlfirewallrule
version_added: "2.8"
short_description: Manage MySQL firewall rule instance.
description:
- Create, update and delete instance of MySQL firewall rule.
options:
resource_group:
description:
- The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
required: True
server_name:
description:
- The name of the server.
required: True
name:
description:
- The name of the MySQL firewall rule.
required: True
start_ip_address:
description:
- The start IP address of the MySQL firewall rule. Must be IPv4 format.
end_ip_address:
description:
- The end IP address of the MySQL firewall rule. Must be IPv4 format.
state:
description:
- Assert the state of the MySQL firewall rule. Use 'present' to create or update a rule and 'absent' to ensure it is not present.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
author:
- "Zim Kalinowski (@zikalino)"
'''
EXAMPLES = '''
- name: Create (or update) MySQL firewall rule
azure_rm_mysqlfirewallrule:
resource_group: TestGroup
server_name: testserver
name: rule1
start_ip_address: 10.0.0.17
end_ip_address: 10.0.0.20
'''
RETURN = '''
id:
description:
- Resource ID
returned: always
type: str
sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/TestGroup/providers/Microsoft.DBforMySQL/servers/testserver/firewallRules/rule1
'''
import time
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller
from azure.mgmt.rdbms.mysql import MySQLManagementClient
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class Actions:
NoAction, Create, Update, Delete = range(4)
class AzureRMFirewallRules(AzureRMModuleBase):
"""Configuration class for an Azure RM MySQL firewall rule resource"""
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
server_name=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
start_ip_address=dict(
type='str'
),
end_ip_address=dict(
type='str'
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
self.resource_group = None
self.server_name = None
self.name = None
self.start_ip_address = None
self.end_ip_address = None
self.results = dict(changed=False)
self.state = None
self.to_do = Actions.NoAction
super(AzureRMFirewallRules, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=False)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()):
if hasattr(self, key):
setattr(self, key, kwargs[key])
old_response = None
response = None
resource_group = self.get_resource_group(self.resource_group)
old_response = self.get_firewallrule()
if not old_response:
self.log("MySQL firewall rule instance doesn't exist")
if self.state == 'absent':
self.log("Old instance didn't exist")
else:
self.to_do = Actions.Create
else:
self.log("MySQL firewall rule instance already exists")
if self.state == 'absent':
self.to_do = Actions.Delete
elif self.state == 'present':
self.log("Need to check if MySQL firewall rule instance has to be deleted or may be updated")
if (self.start_ip_address is not None) and (self.start_ip_address != old_response['start_ip_address']):
self.to_do = Actions.Update
if (self.end_ip_address is not None) and (self.end_ip_address != old_response['end_ip_address']):
self.to_do = Actions.Update
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
self.log("Need to Create / Update the MySQL firewall rule instance")
if self.check_mode:
self.results['changed'] = True
return self.results
response = self.create_update_firewallrule()
if not old_response:
self.results['changed'] = True
else:
self.results['changed'] = old_response.__ne__(response)
self.log("Creation / Update done")
elif self.to_do == Actions.Delete:
self.log("MySQL firewall rule instance deleted")
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_firewallrule()
# make sure instance is actually deleted, for some Azure resources, instance is hanging around
# for some time after deletion -- this should be really fixed in Azure
while self.get_firewallrule():
time.sleep(20)
else:
self.log("MySQL firewall rule instance unchanged")
self.results['changed'] = False
response = old_response
if response:
self.results["id"] = response["id"]
return self.results
def create_update_firewallrule(self):
'''
Creates or updates MySQL firewall rule with the specified configuration.
:return: deserialized MySQL firewall rule instance state dictionary
'''
self.log("Creating / Updating the MySQL firewall rule instance {0}".format(self.name))
try:
response = self.mysql_client.firewall_rules.create_or_update(resource_group_name=self.resource_group,
server_name=self.server_name,
firewall_rule_name=self.name,
start_ip_address=self.start_ip_address,
end_ip_address=self.end_ip_address)
if isinstance(response, LROPoller):
response = self.get_poller_result(response)
except CloudError as exc:
self.log('Error attempting to create the MySQL firewall rule instance.')
self.fail("Error creating the MySQL firewall rule instance: {0}".format(str(exc)))
return response.as_dict()
def delete_firewallrule(self):
'''
Deletes specified MySQL firewall rule instance in the specified subscription and resource group.
:return: True
'''
self.log("Deleting the MySQL firewall rule instance {0}".format(self.name))
try:
response = self.mysql_client.firewall_rules.delete(resource_group_name=self.resource_group,
server_name=self.server_name,
firewall_rule_name=self.name)
except CloudError as e:
self.log('Error attempting to delete the MySQL firewall rule instance.')
self.fail("Error deleting the MySQL firewall rule instance: {0}".format(str(e)))
return True
def get_firewallrule(self):
'''
Gets the properties of the specified MySQL firewall rule.
:return: deserialized MySQL firewall rule instance state dictionary
'''
self.log("Checking if the MySQL firewall rule instance {0} is present".format(self.name))
found = False
try:
response = self.mysql_client.firewall_rules.get(resource_group_name=self.resource_group,
server_name=self.server_name,
firewall_rule_name=self.name)
found = True
self.log("Response : {0}".format(response))
self.log("MySQL firewall rule instance : {0} found".format(response.name))
except CloudError as e:
self.log('Did not find the MySQL firewall rule instance.')
if found is True:
return response.as_dict()
return False
def main():
"""Main execution"""
AzureRMFirewallRules()
if __name__ == '__main__':
main()
|
timothydmorton/bokeh
|
refs/heads/master
|
examples/compat/ggplot/xkcd_density.py
|
34
|
from ggplot import aes, diamonds, geom_density, ggplot
import matplotlib.pyplot as plt
from bokeh import mpl
from bokeh.plotting import output_file, show
g = ggplot(diamonds, aes(x='price', color='cut')) + geom_density()
g.draw()
plt.title("xkcd-ggplot-mpl based plot in Bokeh.")
output_file("xkcd_density.html")
show(mpl.to_bokeh(xkcd=True))
|
yograterol/django
|
refs/heads/master
|
django/contrib/postgres/validators.py
|
458
|
import copy
from django.core.exceptions import ValidationError
from django.core.validators import (
MaxLengthValidator, MaxValueValidator, MinLengthValidator,
MinValueValidator,
)
from django.utils.deconstruct import deconstructible
from django.utils.translation import ugettext_lazy as _, ungettext_lazy
class ArrayMaxLengthValidator(MaxLengthValidator):
message = ungettext_lazy(
'List contains %(show_value)d item, it should contain no more than %(limit_value)d.',
'List contains %(show_value)d items, it should contain no more than %(limit_value)d.',
'limit_value')
class ArrayMinLengthValidator(MinLengthValidator):
message = ungettext_lazy(
'List contains %(show_value)d item, it should contain no fewer than %(limit_value)d.',
'List contains %(show_value)d items, it should contain no fewer than %(limit_value)d.',
'limit_value')
@deconstructible
class KeysValidator(object):
"""A validator designed for HStore to require/restrict keys."""
messages = {
'missing_keys': _('Some keys were missing: %(keys)s'),
'extra_keys': _('Some unknown keys were provided: %(keys)s'),
}
strict = False
def __init__(self, keys, strict=False, messages=None):
self.keys = set(keys)
self.strict = strict
if messages is not None:
self.messages = copy.copy(self.messages)
self.messages.update(messages)
def __call__(self, value):
keys = set(value.keys())
missing_keys = self.keys - keys
if missing_keys:
raise ValidationError(self.messages['missing_keys'],
code='missing_keys',
params={'keys': ', '.join(missing_keys)},
)
if self.strict:
extra_keys = keys - self.keys
if extra_keys:
raise ValidationError(self.messages['extra_keys'],
code='extra_keys',
params={'keys': ', '.join(extra_keys)},
)
def __eq__(self, other):
return (
isinstance(other, self.__class__)
and (self.keys == other.keys)
and (self.messages == other.messages)
and (self.strict == other.strict)
)
def __ne__(self, other):
return not (self == other)
class RangeMaxValueValidator(MaxValueValidator):
compare = lambda self, a, b: a.upper > b
message = _('Ensure that this range is completely less than or equal to %(limit_value)s.')
class RangeMinValueValidator(MinValueValidator):
compare = lambda self, a, b: a.lower < b
message = _('Ensure that this range is completely greater than or equal to %(limit_value)s.')
|
sasasaftic/owatam
|
refs/heads/master
|
analysis/models.py
|
1
|
from django.contrib.auth.models import User
from django.db import models
class WebPage(models.Model):
user = models.ForeignKey(User)
main_domain = models.CharField(max_length=20)
page_id = models.CharField(max_length=20)
selected = models.BooleanField(default=False)
def __unicode__(self):
return self.main_domain
class Visitor(models.Model):
cookie_id = models.CharField(max_length=40, null=True)
webpage = models.ForeignKey(WebPage)
last_visit = models.DateTimeField(null=True)
last_visited_page = models.CharField(max_length=255, null=True)
last_page_visit = models.DateTimeField(null=True)
last_visited_element = models.CharField(max_length=255, null=True)
last_element_visit = models.DateTimeField(null=True)
def __unicode__(self):
return self.cookie_id
class Visit(models.Model):
webpage = models.ForeignKey(WebPage)
ip = models.CharField(max_length=30)
location = models.CharField(max_length=20)
page = models.CharField(max_length=255)
call_type = models.CharField(max_length=20)
title = models.CharField(max_length=20)
element = models.CharField(max_length=20)
date = models.DateTimeField()
visitor = models.ForeignKey(Visitor)
def __unicode__(self):
return self.page
class VisitAnalyzed(models.Model):
webpage = models.ForeignKey(WebPage)
date = models.DateField(null=True)
average_time_active = models.IntegerField(default=0)
num_of_samples_active = models.IntegerField(default=0)
average_time_all = models.IntegerField(default=0)
num_of_samples_all = models.IntegerField(default=0)
unique_visitors = models.IntegerField(default=0)
all_visitors = models.IntegerField(default=0)
class Location(models.Model):
webpage = models.ForeignKey(WebPage)
country = models.CharField(max_length=200, null=True)
city = models.CharField(max_length=200, null=True)
num_of_visits = models.IntegerField(default=1)
continent = models.CharField(max_length=200, null=True)
time_zone = models.CharField(max_length=200, null=True)
class PageAnalyzed(models.Model):
webpage = models.ForeignKey(WebPage)
average_time = models.IntegerField(default=0)
page = models.CharField(max_length=255)
num_of_samples = models.IntegerField(default=1)
class ElementAnalyzed(models.Model):
webpage = models.ForeignKey(WebPage)
page = models.CharField(max_length=255)
average_time = models.IntegerField(default=0)
element = models.CharField(max_length=255)
num_of_samples = models.IntegerField(default=1)
class UserSettings(models.Model):
user = models.ForeignKey(User)
active_time = models.IntegerField()
new_visit_time = models.IntegerField()
class AboutDevice(models.Model):
webpage = models.ForeignKey(WebPage)
mobile = models.IntegerField(default=0)
tablet = models.IntegerField(default=0)
touch_capable = models.IntegerField(default=0)
pc = models.IntegerField(default=0)
bot = models.IntegerField(default=0)
class Browser(models.Model):
webpage = models.ForeignKey(WebPage)
browser = models.CharField(max_length=200) # Family + version_string
num_of_browsers = models.IntegerField(default=1)
class Device(models.Model):
webpage = models.ForeignKey(WebPage)
device = models.CharField(max_length=200) # device.family
num_of_devices = models.IntegerField(default=1)
class Os(models.Model):
webpage = models.ForeignKey(WebPage)
os = models.CharField(max_length=200) # Os + os.family
num_of_oss = models.IntegerField(default=1)
|
DCSaunders/tensorflow
|
refs/heads/master
|
tensorflow/examples/android/jni/__init__.py
|
12133432
| |
MinchinWeb/topydo
|
refs/heads/stable
|
test/__init__.py
|
12133432
| |
fivejjs/bayespy
|
refs/heads/master
|
bayespy/utils/tests/__init__.py
|
12133432
| |
akhilari7/pa-dude
|
refs/heads/master
|
lib/python2.7/site-packages/django/contrib/auth/management/commands/__init__.py
|
12133432
| |
JiYou/apprtc
|
refs/heads/master
|
src/app_engine/compute_page.py
|
24
|
# Copyright 2015 Google Inc. All Rights Reserved.
"""Compute page for handling tasks related to compute engine."""
import logging
import apiauth
import webapp2
from google.appengine.api import app_identity
from google.appengine.api import taskqueue
# Page actions
# Get the status of an instance.
ACTION_STATUS = 'status'
# Start the compute instance if it is not already started. When an
# instance is in the TERMINATED state, it will be started. If the
# instance is already RUNNING do nothing. If the instance is in an
# intermediate state--PROVISIONING, STAGING, STOPPING--the task is
# requeued.
ACTION_START = 'start'
# Stop the compute instance if it is RUNNING. Then queue a task to start it. Do
# nothing if the instance is not RUNNING.
ACTION_RESTART = 'restart'
# Constants for the Compute Engine API
COMPUTE_STATUS = 'status'
COMPUTE_STATUS_TERMINATED = 'TERMINATED'
COMPUTE_STATUS_RUNNING = 'RUNNING'
# Seconds between API retries.
START_TASK_MIN_WAIT_S = 3
COMPUTE_API_URL = 'https://www.googleapis.com/auth/compute'
def enqueue_start_task(instance, zone):
taskqueue.add(url='/compute/%s/%s/%s' % (ACTION_START, instance, zone),
countdown=START_TASK_MIN_WAIT_S)
def enqueue_restart_task(instance, zone):
taskqueue.add(url='/compute/%s/%s/%s' % (ACTION_RESTART, instance, zone),
countdown=START_TASK_MIN_WAIT_S)
class ComputePage(webapp2.RequestHandler):
"""Page to handle requests against GCE."""
def __init__(self, request, response):
# Call initialize rather than the parent constructor fun. See:
# https://webapp-improved.appspot.com/guide/handlers.html#overriding-init
self.initialize(request, response)
self.compute_service = self._build_compute_service()
if self.compute_service is None:
logging.warning('Unable to create Compute service object.')
def _build_compute_service(self):
return apiauth.build(scope=COMPUTE_API_URL,
service_name='compute',
version='v1')
def _maybe_restart_instance(self, instance, zone):
"""Implementation for restart action.
Args:
instance: Name of the instance to restart.
zone: Name of the zone the instance belongs to.
"""
if self.compute_service is None:
logging.warning('Compute service unavailable.')
return
status = self._compute_status(instance, zone)
logging.info('GCE VM \'%s (%s)\' status: \'%s\'.',
instance, zone, status)
# Do nothing if the status is not RUNNING to avoid race. This will cover
# most of the cases.
if status == COMPUTE_STATUS_RUNNING:
logging.info('Stopping GCE VM: %s (%s)', instance, zone)
self.compute_service.instances().stop(
project=app_identity.get_application_id(),
instance=instance,
zone=zone).execute()
enqueue_start_task(instance, zone)
def _maybe_start_instance(self, instance, zone):
"""Implementation for start action.
Args:
instance: Name of the instance to start.
zone: Name of the zone the instance belongs to.
"""
if self.compute_service is None:
logging.warning('Unable to start Compute instance, service unavailable.')
return
status = self._compute_status(instance, zone)
logging.info('GCE VM \'%s (%s)\' status: \'%s\'.',
instance, zone, status)
if status == COMPUTE_STATUS_TERMINATED:
logging.info('Starting GCE VM: %s (%s)', instance, zone)
self.compute_service.instances().start(
project=app_identity.get_application_id(),
instance=instance,
zone=zone).execute()
if status != COMPUTE_STATUS_RUNNING:
# If in an intermediate state: PROVISIONING, STAGING, STOPPING, requeue
# the task to check back later. If in TERMINATED state, also requeue the
# task since the start attempt may fail and we should retry.
enqueue_start_task(instance, zone)
def _compute_status(self, instance, zone):
"""Return the status of the compute instance."""
if self.compute_service is None:
logging.warning('Service unavailable: unable to start GCE VM: %s (%s)',
instance, zone)
return
info = self.compute_service.instances().get(
project=app_identity.get_application_id(),
instance=instance,
zone=zone).execute()
return info[COMPUTE_STATUS]
def get(self, action, instance, zone):
if action == ACTION_STATUS:
self.response.write(self._compute_status(instance, zone))
def post(self, action, instance, zone):
if action == ACTION_START:
self._maybe_start_instance(instance, zone)
elif action == ACTION_RESTART:
self._maybe_restart_instance(instance, zone)
|
slevenhagen/odoo
|
refs/heads/8.0
|
openerp/report/render/rml2pdf/utils.py
|
381
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2003, Fabien Pinckaers, UCL, FSA
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
import copy
import locale
import logging
import re
import reportlab
import openerp.tools as tools
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.misc import ustr
_logger = logging.getLogger(__name__)
_regex = re.compile('\[\[(.+?)\]\]')
def str2xml(s):
return (s or '').replace('&', '&').replace('<', '<').replace('>', '>')
def xml2str(s):
return (s or '').replace('&','&').replace('<','<').replace('>','>')
def _child_get(node, self=None, tagname=None):
for n in node:
if self and self.localcontext and n.get('rml_loop'):
for ctx in eval(n.get('rml_loop'),{}, self.localcontext):
self.localcontext.update(ctx)
if (tagname is None) or (n.tag==tagname):
if n.get('rml_except', False):
try:
eval(n.get('rml_except'), {}, self.localcontext)
except GeneratorExit:
continue
except Exception, e:
_logger.warning('rml_except: "%s"', n.get('rml_except',''), exc_info=True)
continue
if n.get('rml_tag'):
try:
(tag,attr) = eval(n.get('rml_tag'),{}, self.localcontext)
n2 = copy.deepcopy(n)
n2.tag = tag
n2.attrib.update(attr)
yield n2
except GeneratorExit:
yield n
except Exception, e:
_logger.warning('rml_tag: "%s"', n.get('rml_tag',''), exc_info=True)
yield n
else:
yield n
continue
if self and self.localcontext and n.get('rml_except'):
try:
eval(n.get('rml_except'), {}, self.localcontext)
except GeneratorExit:
continue
except Exception, e:
_logger.warning('rml_except: "%s"', n.get('rml_except',''), exc_info=True)
continue
if self and self.localcontext and n.get('rml_tag'):
try:
(tag,attr) = eval(n.get('rml_tag'),{}, self.localcontext)
n2 = copy.deepcopy(n)
n2.tag = tag
n2.attrib.update(attr or {})
yield n2
tagname = ''
except GeneratorExit:
pass
except Exception, e:
_logger.warning('rml_tag: "%s"', n.get('rml_tag',''), exc_info=True)
pass
if (tagname is None) or (n.tag==tagname):
yield n
def _process_text(self, txt):
"""Translate ``txt`` according to the language in the local context,
replace dynamic ``[[expr]]`` with their real value, then escape
the result for XML.
:param str txt: original text to translate (must NOT be XML-escaped)
:return: translated text, with dynamic expressions evaluated and
with special XML characters escaped (``&,<,>``).
"""
if not self.localcontext:
return str2xml(txt)
if not txt:
return ''
result = ''
sps = _regex.split(txt)
while sps:
# This is a simple text to translate
to_translate = tools.ustr(sps.pop(0))
result += tools.ustr(self.localcontext.get('translate', lambda x:x)(to_translate))
if sps:
txt = None
try:
expr = sps.pop(0)
txt = eval(expr, self.localcontext)
if txt and isinstance(txt, basestring):
txt = tools.ustr(txt)
except Exception:
_logger.error("Failed to evaluate expression [[ %s ]] with context %r while rendering report, ignored.", expr, self.localcontext)
if isinstance(txt, basestring):
result += txt
elif txt and (txt is not None) and (txt is not False):
result += ustr(txt)
return str2xml(result)
def text_get(node):
return ''.join([ustr(n.text) for n in node])
units = [
(re.compile('^(-?[0-9\.]+)\s*in$'), reportlab.lib.units.inch),
(re.compile('^(-?[0-9\.]+)\s*cm$'), reportlab.lib.units.cm),
(re.compile('^(-?[0-9\.]+)\s*mm$'), reportlab.lib.units.mm),
(re.compile('^(-?[0-9\.]+)\s*$'), 1)
]
def unit_get(size):
global units
if size:
if size.find('.') == -1:
decimal_point = '.'
try:
decimal_point = locale.nl_langinfo(locale.RADIXCHAR)
except Exception:
decimal_point = locale.localeconv()['decimal_point']
size = size.replace(decimal_point, '.')
for unit in units:
res = unit[0].search(size, 0)
if res:
return unit[1]*float(res.group(1))
return False
def tuple_int_get(node, attr_name, default=None):
if not node.get(attr_name):
return default
return map(int, node.get(attr_name).split(','))
def bool_get(value):
return (str(value)=="1") or (value.lower()=='yes')
def attr_get(node, attrs, dict=None):
if dict is None:
dict = {}
res = {}
for name in attrs:
if node.get(name):
res[name] = unit_get(node.get(name))
for key in dict:
if node.get(key):
if dict[key]=='str':
res[key] = tools.ustr(node.get(key))
elif dict[key]=='bool':
res[key] = bool_get(node.get(key))
elif dict[key]=='int':
res[key] = int(node.get(key))
elif dict[key]=='unit':
res[key] = unit_get(node.get(key))
elif dict[key] == 'float' :
res[key] = float(node.get(key))
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
cjahangir/geodash-new
|
refs/heads/master
|
geonode/documents/forms.py
|
2
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import json
import os
import re
import autocomplete_light
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.contrib.contenttypes.models import ContentType
from django.conf import settings
from django.forms import HiddenInput, TextInput
from modeltranslation.forms import TranslationModelForm
from geonode.documents.models import Document
from geonode.maps.models import Map
from geonode.layers.models import Layer
from geonode.groups.models import GroupProfile
autocomplete_light.autodiscover() # flake8: noqa
from geonode.base.forms import ResourceBaseForm
class DocumentForm(ResourceBaseForm):
resource = forms.ChoiceField(label='Link to')
def __init__(self, *args, **kwargs):
super(DocumentForm, self).__init__(*args, **kwargs)
rbases = list(Layer.objects.all())
rbases += list(Map.objects.all())
rbases.sort(key=lambda x: x.title)
rbases_choices = []
rbases_choices.append(['no_link', '---------'])
for obj in rbases:
type_id = ContentType.objects.get_for_model(obj.__class__).id
obj_id = obj.id
form_value = "type:%s-id:%s" % (type_id, obj_id)
display_text = '%s (%s)' % (obj.title, obj.polymorphic_ctype.model)
rbases_choices.append([form_value, display_text])
self.fields['resource'].choices = rbases_choices
if self.instance.content_type:
self.fields['resource'].initial = 'type:%s-id:%s' % (
self.instance.content_type.id, self.instance.object_id)
def save(self, *args, **kwargs):
contenttype_id = None
contenttype = None
object_id = None
resource = self.cleaned_data['resource']
if resource != 'no_link':
matches = re.match("type:(\d+)-id:(\d+)", resource).groups()
contenttype_id = matches[0]
object_id = matches[1]
contenttype = ContentType.objects.get(id=contenttype_id)
self.cleaned_data['content_type'] = contenttype_id
self.cleaned_data['object_id'] = object_id
self.instance.object_id = object_id
self.instance.content_type = contenttype
return super(DocumentForm, self).save(*args, **kwargs)
class Meta(ResourceBaseForm.Meta):
model = Document
exclude = ResourceBaseForm.Meta.exclude + (
'content_type',
'object_id',
'doc_file',
'extension',
'doc_type',
'doc_url',
'status',
'group',
'last_auditor')
class DocumentDescriptionForm(forms.Form):
title = forms.CharField(300)
abstract = forms.CharField(1000, widget=forms.Textarea, required=False)
keywords = forms.CharField(500, required=False)
class DocumentReplaceForm(forms.ModelForm):
"""
The form used to replace a document.
"""
class Meta:
model = Document
fields = ['doc_file', 'doc_url']
def clean(self):
"""
Ensures the doc_file or the doc_url field is populated.
"""
cleaned_data = super(DocumentReplaceForm, self).clean()
doc_file = self.cleaned_data.get('doc_file')
doc_url = self.cleaned_data.get('doc_url')
if not doc_file and not doc_url:
raise forms.ValidationError(_("Document must be a file or url."))
if doc_file and doc_url:
raise forms.ValidationError(
_("A document cannot have both a file and a url."))
return cleaned_data
def clean_doc_file(self):
"""
Ensures the doc_file is valid.
"""
doc_file = self.cleaned_data.get('doc_file')
if doc_file and not os.path.splitext(
doc_file.name)[1].lower()[
1:] in settings.ALLOWED_DOCUMENT_TYPES:
raise forms.ValidationError(_("This file type is not allowed"))
return doc_file
class DocumentCreateForm(TranslationModelForm):
"""
The document upload form.
"""
permissions = forms.CharField(
widget=HiddenInput(
attrs={
'name': 'permissions',
'id': 'permissions'}),
required=True)
resource = forms.CharField(
required=False,
label=_("Link to"),
widget=TextInput(
attrs={
'name': 'title__contains',
'id': 'resource'}))
class Meta:
model = Document
fields = ['title', 'doc_file', 'doc_url']
widgets = {
'name': HiddenInput(attrs={'cols': 80, 'rows': 20}),
}
def clean_permissions(self):
"""
Ensures the JSON field is JSON.
"""
permissions = self.cleaned_data['permissions']
try:
return json.loads(permissions)
except ValueError:
raise forms.ValidationError(_("Permissions must be valid JSON."))
def clean(self):
"""
Ensures the doc_file or the doc_url field is populated.
"""
cleaned_data = super(DocumentCreateForm, self).clean()
doc_file = self.cleaned_data.get('doc_file')
doc_url = self.cleaned_data.get('doc_url')
if not doc_file and not doc_url:
raise forms.ValidationError(_("Document must be a file or url."))
if doc_file and doc_url:
raise forms.ValidationError(
_("A document cannot have both a file and a url."))
return cleaned_data
def clean_doc_file(self):
"""
Ensures the doc_file is valid.
"""
doc_file = self.cleaned_data.get('doc_file')
if doc_file and not os.path.splitext(
doc_file.name)[1].lower()[
1:] in settings.ALLOWED_DOCUMENT_TYPES:
raise forms.ValidationError(_("This file type is not allowed"))
return doc_file
|
AnimationInVR/avango
|
refs/heads/master
|
examples/examples_common/matrix_converter.py
|
3
|
import avango.gua
import avango.osg
def convert_osg_to_gua(osg_mat):
gua_mat = avango.gua.make_identity_mat()
for row in range(0, 4):
for col in range(0, 4):
gua_mat.set_element(col, row, osg_mat.get_element(row, col))
return gua_mat
def convert_gua_to_osg(gua_mat):
gua_mat = avango.osg.make_identity_mat()
for row in range(0, 4):
for col in range(0, 4):
osg_mat.set_element(col, row, gua_mat.get_element(row, col))
return osg_mat
|
harmy/kbengine
|
refs/heads/master
|
kbe/src/lib/python/Lib/test/json_tests/test_decode.py
|
49
|
import decimal
from io import StringIO
from collections import OrderedDict
from test.json_tests import PyTest, CTest
class TestDecode:
def test_decimal(self):
rval = self.loads('1.1', parse_float=decimal.Decimal)
self.assertTrue(isinstance(rval, decimal.Decimal))
self.assertEqual(rval, decimal.Decimal('1.1'))
def test_float(self):
rval = self.loads('1', parse_int=float)
self.assertTrue(isinstance(rval, float))
self.assertEqual(rval, 1.0)
def test_empty_objects(self):
self.assertEqual(self.loads('{}'), {})
self.assertEqual(self.loads('[]'), [])
self.assertEqual(self.loads('""'), "")
def test_object_pairs_hook(self):
s = '{"xkd":1, "kcw":2, "art":3, "hxm":4, "qrt":5, "pad":6, "hoy":7}'
p = [("xkd", 1), ("kcw", 2), ("art", 3), ("hxm", 4),
("qrt", 5), ("pad", 6), ("hoy", 7)]
self.assertEqual(self.loads(s), eval(s))
self.assertEqual(self.loads(s, object_pairs_hook = lambda x: x), p)
self.assertEqual(self.json.load(StringIO(s),
object_pairs_hook=lambda x: x), p)
od = self.loads(s, object_pairs_hook = OrderedDict)
self.assertEqual(od, OrderedDict(p))
self.assertEqual(type(od), OrderedDict)
# the object_pairs_hook takes priority over the object_hook
self.assertEqual(self.loads(s, object_pairs_hook = OrderedDict,
object_hook = lambda x: None),
OrderedDict(p))
def test_decoder_optimizations(self):
# Several optimizations were made that skip over calls to
# the whitespace regex, so this test is designed to try and
# exercise the uncommon cases. The array cases are already covered.
rval = self.loads('{ "key" : "value" , "k":"v" }')
self.assertEqual(rval, {"key":"value", "k":"v"})
def check_keys_reuse(self, source, loads):
rval = loads(source)
(a, b), (c, d) = sorted(rval[0]), sorted(rval[1])
self.assertIs(a, c)
self.assertIs(b, d)
def test_keys_reuse(self):
s = '[{"a_key": 1, "b_\xe9": 2}, {"a_key": 3, "b_\xe9": 4}]'
self.check_keys_reuse(s, self.loads)
self.check_keys_reuse(s, self.json.decoder.JSONDecoder().decode)
class TestPyDecode(TestDecode, PyTest): pass
class TestCDecode(TestDecode, CTest): pass
|
jni/networkx
|
refs/heads/master
|
networkx/algorithms/shortest_paths/tests/test_astar.py
|
76
|
#!/usr/bin/env python
from nose.tools import *
import networkx as nx
from random import random, choice
class TestAStar:
def setUp(self):
self.XG=nx.DiGraph()
self.XG.add_edges_from([('s','u',{'weight':10}),
('s','x',{'weight':5}),
('u','v',{'weight':1}),
('u','x',{'weight':2}),
('v','y',{'weight':1}),
('x','u',{'weight':3}),
('x','v',{'weight':5}),
('x','y',{'weight':2}),
('y','s',{'weight':7}),
('y','v',{'weight':6})])
def test_random_graph(self):
def dist(a, b):
(x1, y1) = a
(x2, y2) = b
return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5
G = nx.Graph()
points = [(random(), random()) for _ in range(100)]
# Build a path from points[0] to points[-1] to be sure it exists
for p1, p2 in zip(points[:-1], points[1:]):
G.add_edge(p1, p2, weight=dist(p1, p2))
# Add other random edges
for _ in range(100):
p1, p2 = choice(points), choice(points)
G.add_edge(p1, p2, weight=dist(p1, p2))
path = nx.astar_path(G, points[0], points[-1], dist)
assert path == nx.dijkstra_path(G, points[0], points[-1])
def test_astar_directed(self):
assert nx.astar_path(self.XG,'s','v')==['s', 'x', 'u', 'v']
assert nx.astar_path_length(self.XG,'s','v')==9
def test_astar_multigraph(self):
G=nx.MultiDiGraph(self.XG)
assert_raises((TypeError,nx.NetworkXError),
nx.astar_path, [G,'s','v'])
assert_raises((TypeError,nx.NetworkXError),
nx.astar_path_length, [G,'s','v'])
def test_astar_undirected(self):
GG=self.XG.to_undirected()
# make sure we get lower weight
# to_undirected might choose either edge with weight 2 or weight 3
GG['u']['x']['weight']=2
GG['y']['v']['weight'] = 2
assert_equal(nx.astar_path(GG,'s','v'),['s', 'x', 'u', 'v'])
assert_equal(nx.astar_path_length(GG,'s','v'),8)
def test_astar_directed2(self):
XG2=nx.DiGraph()
XG2.add_edges_from([[1,4,{'weight':1}],
[4,5,{'weight':1}],
[5,6,{'weight':1}],
[6,3,{'weight':1}],
[1,3,{'weight':50}],
[1,2,{'weight':100}],
[2,3,{'weight':100}]])
assert nx.astar_path(XG2,1,3)==[1, 4, 5, 6, 3]
def test_astar_undirected2(self):
XG3=nx.Graph()
XG3.add_edges_from([ [0,1,{'weight':2}],
[1,2,{'weight':12}],
[2,3,{'weight':1}],
[3,4,{'weight':5}],
[4,5,{'weight':1}],
[5,0,{'weight':10}] ])
assert nx.astar_path(XG3,0,3)==[0, 1, 2, 3]
assert nx.astar_path_length(XG3,0,3)==15
def test_astar_undirected3(self):
XG4=nx.Graph()
XG4.add_edges_from([ [0,1,{'weight':2}],
[1,2,{'weight':2}],
[2,3,{'weight':1}],
[3,4,{'weight':1}],
[4,5,{'weight':1}],
[5,6,{'weight':1}],
[6,7,{'weight':1}],
[7,0,{'weight':1}] ])
assert nx.astar_path(XG4,0,2)==[0, 1, 2]
assert nx.astar_path_length(XG4,0,2)==4
# >>> MXG4=NX.MultiGraph(XG4)
# >>> MXG4.add_edge(0,1,3)
# >>> NX.dijkstra_path(MXG4,0,2)
# [0, 1, 2]
def test_astar_w1(self):
G=nx.DiGraph()
G.add_edges_from([('s','u'), ('s','x'), ('u','v'), ('u','x'),
('v','y'), ('x','u'), ('x','w'), ('w', 'v'), ('x','y'),
('y','s'), ('y','v')])
assert nx.astar_path(G,'s','v')==['s', 'u', 'v']
assert nx.astar_path_length(G,'s','v')== 2
@raises(nx.NetworkXNoPath)
def test_astar_nopath(self):
p = nx.astar_path(self.XG,'s','moon')
def test_cycle(self):
C=nx.cycle_graph(7)
assert nx.astar_path(C,0,3)==[0, 1, 2, 3]
assert nx.dijkstra_path(C,0,4)==[0, 6, 5, 4]
def test_orderable(self):
class UnorderableClass: pass
node_1 = UnorderableClass()
node_2 = UnorderableClass()
node_3 = UnorderableClass()
node_4 = UnorderableClass()
G = nx.Graph()
G.add_edge(node_1, node_2)
G.add_edge(node_1, node_3)
G.add_edge(node_2, node_4)
G.add_edge(node_3, node_4)
path=nx.algorithms.shortest_paths.astar.astar_path(G, node_1, node_4)
|
csblab/md_scripts
|
refs/heads/master
|
openmm/amberff/equilibrate_NPT.py
|
1
|
#!/usr/bin/env python
"""
Runs a simulation under NPT conditions.
Outputs a portable state (.xml) file with positions and velocities,
to allow restarting and/or continuation.
.2019. joaor@stanford.edu
"""
from __future__ import print_function, division
import argparse
import logging
import math
import os
import random
import re
import sys
import numpy as np
import simtk.openmm.app as app
import simtk.openmm as mm
import simtk.unit as units
import _utils
import _restraints
# Format logger
logging.basicConfig(stream=sys.stdout,
level=logging.INFO,
format='[%(asctime)s] %(message)s',
datefmt='%Y/%m/%d %H:%M:%S')
##
# Parse user input and options
ap_fmt = argparse.ArgumentDefaultsHelpFormatter
ap = argparse.ArgumentParser(description=__doc__,
formatter_class=ap_fmt)
# Mandatory
ap.add_argument('structure', help='Input coordinate file (.cif)')
# Options
ap.add_argument('--output', type=str, default=None,
help='Root name for output files. Default is input file name.')
ap.add_argument('--forcefield', type=str, default='amber14-all.xml',
help='Force field to build the system with (XML format).')
ap.add_argument('--solvent', type=str, default='amber14/tip3p.xml',
help='Solvent model to use in minimization (XML format).')
ap.add_argument('--xyz-frequency', dest='xyz_freq', type=int, default=5000,
help='Frequency (number of steps) to write coordinates.')
ap.add_argument('--log-frequency', dest='log_freq', type=int, default=5000,
help='Frequency (number of steps) to log run parameters.')
ap.add_argument('--platform', type=str, default=None,
choices=('OpenCL', 'CUDA', 'CPU', 'Reference'),
help='Platform to run calculations on.')
ap.add_argument('--state', type=str,
help='Checkpoint/XML file to read positions/velocities from.')
ap.add_argument('--seed', type=int, default=917,
help='Seed number for random number generator(s).')
ap.add_argument('--temperature', default=310, type=float,
help='Target temperature, in Kelvin.')
ap.add_argument('--pressure', default=1.0, type=float,
help='Target pressure, in bar.')
ap.add_argument('--barostat', default='isotropic',
choices=('isotropic', 'membrane'),
help='Type of barostat.')
ap.add_argument('--runtime', default=5, type=float,
help='Simulation length in nanoseconds. Default 5.')
ap.add_argument('--continuation', action='store_true',
help='Reads elapsed run time from checkpoint/state files.')
ap.add_argument('--restraint-heavy-atom', action='store_true', default=False,
help='Apply position restraints to non-solvent heavy atoms')
ap.add_argument('--restraint-lipids', action='store_true', default=False,
help='Apply position restraints to lipid head groups')
ap.add_argument('--restraint-heavy-atom-k', default=500, type=int,
help='Force constant for heavy atom restraints.')
ap.add_argument('--restraint-lipids-k', default=500, type=int,
help='Force constant for lipid restraints.')
ap.add_argument('--hmr', action='store_true', default=False,
help='Use Hydrogen Mass Repartitioning.')
cmd = ap.parse_args()
logging.info('Started')
# Set random seed for reproducibility
random.seed(cmd.seed)
# Figure out platform
platform, plat_properties = _utils.get_platform(cmd.platform)
logging.info('Simulation Details:')
logging.info(f' random seed : {cmd.seed}')
logging.info(f' structure : {cmd.structure}')
logging.info(f' force field : {cmd.forcefield}')
logging.info(f' solvent model: {cmd.solvent}')
logging.info(f' temperature : {cmd.temperature} K')
logging.info(f' barostat : {cmd.barostat}')
logging.info(f' pressure : {cmd.pressure} bar')
logging.info(f' runtime : {cmd.runtime} ns')
logging.info(f' heavy-atom restraints : {cmd.restraint_heavy_atom}')
if cmd.restraint_heavy_atom:
logging.info(f' K = {cmd.restraint_heavy_atom_k} kJ/mol/nm^2')
logging.info(f' lipid restraints : {cmd.restraint_lipids}')
if cmd.restraint_lipids:
logging.info(f' K = {cmd.restraint_lipids_k} kJ/mol/nm^2')
logging.info(f' HMR : {cmd.hmr}')
# Make rootname for output files
basename = os.path.basename(cmd.structure)
fname, fext = os.path.splitext(basename)
if cmd.output is None:
rootname = fname + '_EqNVT'
else:
rootname = cmd.output
# Read in structure data and setup OpenMM system
structure = app.PDBxFile(cmd.structure)
# Remove dummy atoms (mass 0) just in case
model = app.Modeller(structure.topology, structure.positions)
dummy_idx = [a for a in model.topology.atoms() if a.element is None]
n_dummies = len(dummy_idx)
if n_dummies:
logging.info(f'Removing {n_dummies} dummy atoms from input')
model.delete(dummy_idx)
structure.topology = model.topology
structure.positions = model.positions
forcefield = app.ForceField(cmd.forcefield, cmd.solvent)
md_temp = cmd.temperature * units.kelvin
md_step = 2.0*units.femtosecond
md_fric = 1.0/units.picosecond
md_nbct = 1.0*units.nanometer
md_hamu = None
md_cstr = app.HBonds
if cmd.hmr: # adapt for HMR if necessary
md_step *= 2.5 # make 5 fs
md_hamu = 4*units.amu
md_cstr = app.AllBonds
# Build system & integrator
logging.info('Setting up system and integrator')
system = forcefield.createSystem(structure.topology, nonbondedMethod=app.PME,
nonbondedCutoff=md_nbct,
constraints=md_cstr,
hydrogenMass=md_hamu,
ewaldErrorTolerance=0.0005,
rigidWater=True)
# Setup pressure
md_pres = cmd.pressure * units.bar
if cmd.barostat == 'isotropic':
b = mm.MonteCarloBarostat(md_pres, md_temp, 25)
elif cmd.barostat == 'membrane':
surface_tension = 0*units.bar*units.nanometer # amber lipids = tensionless
b = mm.MonteCarloMembraneBarostat(md_pres, surface_tension,
md_temp,
mm.MonteCarloMembraneBarostat.XYIsotropic,
mm.MonteCarloMembraneBarostat.ZFree,
25)
system.addForce(b)
# Setup integrator and temperature coupling
integrator = mm.LangevinIntegrator(md_temp, md_fric, md_step)
integrator.setRandomNumberSeed(cmd.seed)
integrator.setConstraintTolerance(0.00001)
# Restrain heavy atoms
if cmd.restraint_heavy_atom:
# force = _restraints.make_heavy_atom_restraints(structure,
# cmd.restraint_heavy_atom_k)
force = _restraints.make_heavy_atom_restraints_v2(system, structure,
cmd.restraint_heavy_atom_k)
system.addForce(force)
# Restrain lipid headgroups in Z
if cmd.restraint_lipids:
# force = _restraints.make_lipid_restraints(structure,
# cmd.restraint_lipids_k)
force = _restraints.make_lipid_restraints_v2(system, structure,
cmd.restraint_lipids_k)
system.addForce(force)
# Setup simulation
simulation = app.Simulation(structure.topology, system, integrator,
platform, plat_properties)
simulation.context.setPositions(structure.positions)
simulation.context.setVelocitiesToTemperature(md_temp)
# Load checkpoint/state file
if cmd.state:
if cmd.state.endswith('.xml'): # is XML state file
logging.info(f'Loading XML state file: {cmd.state}')
simulation.loadState(cmd.state)
logging.info(f' resetting simulation time')
simulation.context.setTime(0.0) # resets simulation time
cmd.runtime = cmd.runtime * units.nanosecond
elif cmd.state.endswith('.cpt'): # is binary checkpoint
logging.info(f'Loading binary checkpoint file: {cmd.state}')
simulation.loadCheckpoint(cmd.state)
if cmd.continuation:
# Adjust remaining running time
run_time = simulation.context.getState().getTime()
run_time_val = run_time.value_in_unit(units.nanosecond)
logging.info(f' {run_time_val:8.2f}/{cmd.runtime:8.2f} ns completed')
expected_t = cmd.runtime * units.nanosecond
cmd.runtime = (expected_t - run_time).in_units_of(units.nanosecond)
else: # restart from 0
simulation.context.setTime(0.0)
cmd.runtime = cmd.runtime * units.nanosecond
else:
raise Exception(f'State file format not recognized: {cmd.state}')
else:
cmd.runtime = cmd.runtime * units.nanosecond
# Assert we actually have to run something.
if cmd.runtime <= 0.00001 * units.nanosecond:
logging.info('Equilibration completed. Apparently. Maybe ask for more?')
logging.info('Finished')
sys.exit(0)
# Setup writer/logger frequencies
# Default: 0.01 ns
if cmd.hmr:
# Time step is 5 fs vs 2fs
cmd.xyz_freq = int(cmd.xyz_freq // 2.5)
cmd.log_freq = int(cmd.log_freq // 2.5)
# Calculate total simulation length in steps
n_steps = int(math.ceil(cmd.runtime / md_step.in_units_of(units.nanoseconds)))
# n_steps is dimensionless (ns/ns)
# Setup Reporters
dcd_fname = _utils.make_fname_serial(rootname + '.dcd')
cpt_fname = _utils.make_fname_serial(rootname + '.cpt')
log_fname = _utils.make_fname_serial(rootname + '.log')
dcd = app.DCDReporter(dcd_fname, cmd.xyz_freq)
cpt = app.CheckpointReporter(cpt_fname, cmd.xyz_freq)
state = app.StateDataReporter(log_fname, cmd.log_freq,
step=True,
time=True,
potentialEnergy=True,
kineticEnergy=True,
temperature=True,
progress=True,
remainingTime=True,
volume=True,
totalSteps=n_steps,
speed=True,
separator='\t')
simulation.reporters.append(dcd)
simulation.reporters.append(cpt)
simulation.reporters.append(state)
logging.info(f'Writing coordinates to \'{dcd_fname}\'')
logging.info(f'Writing checkpoint file to \'{cpt_fname}\'')
logging.info(f'Writing simulation log to \'{log_fname}\'')
# Run simulation
simulation.step(n_steps)
# Write state file (without restraining forces)
xml_fname = _utils.make_fname_serial(rootname + '.xml')
logging.info(f'Writing state file to \'{xml_fname}\'')
system = simulation.system
n_rest_forces = sum([cmd.restraint_heavy_atom, cmd.restraint_lipids])
while n_rest_forces:
system.removeForce(system.getNumForces() - 1)
n_rest_forces -= 1
# Reinitialize context. Keep velocities, positions.
state = simulation.context.getState(getPositions=True, getVelocities=True)
vx, vy, vz = state.getPeriodicBoxVectors()
xyz, vel = state.getPositions(), state.getVelocities()
simulation.context.reinitialize(preserveState=False)
simulation.context.setPositions(xyz)
simulation.context.setVelocities(vel)
simulation.context.setPeriodicBoxVectors(vx, vy, vz)
simulation.saveState(xml_fname)
# Write last frame as mmCIF
cif_fname = _utils.make_fname_serial(rootname + '.cif')
logging.info(f'Writing final structure to \'{cif_fname}\'')
with open(cif_fname, 'w') as handle:
app.PDBxFile.writeFile(structure.topology, xyz, handle, keepIds=True)
# Write system without dummy atoms
# Easier to redo system object
# and set positions/velocities manually.
model = app.Modeller(structure.topology, structure.positions)
dummy = [c for c in model.topology.chains() if c.id.startswith('DUM')]
model.delete(dummy) # delete entire chains
n_ini_atoms = model.topology.getNumAtoms()
logging.info('Writing system without dummy (restraint) atoms')
system = forcefield.createSystem(model.topology, nonbondedMethod=app.PME,
nonbondedCutoff=md_nbct,
constraints=md_cstr,
hydrogenMass=md_hamu,
ewaldErrorTolerance=0.0005,
rigidWater=True)
integrator = mm.LangevinIntegrator(md_temp, md_fric, md_step)
simulation = app.Simulation(model.topology, system, integrator)
simulation.context.setPositions(xyz[:n_ini_atoms])
simulation.context.setVelocities(vel[:n_ini_atoms])
simulation.context.setPeriodicBoxVectors(vx, vy, vz)
xml_fname = _utils.make_fname(rootname + '_noDUM' + '.xml')
logging.info(f'Writing dummy-less state to \'{xml_fname}\'')
simulation.saveState(xml_fname)
# Write last frame as mmCIF
cif_fname = _utils.make_fname(rootname + '_noDUM' + '.cif')
logging.info(f'Writing dummy-less structure to \'{cif_fname}\'')
with open(cif_fname, 'w') as handle:
app.PDBxFile.writeFile(model.topology, xyz[:n_ini_atoms], handle, keepIds=True)
logging.info('Finished')
|
nvoron23/hue
|
refs/heads/master
|
desktop/core/ext-py/Django-1.6.10/django/utils/unittest/runner.py
|
571
|
"""Running tests"""
import sys
import time
import unittest
from django.utils.unittest import result
try:
from django.utils.unittest.signals import registerResult
except ImportError:
def registerResult(_):
pass
__unittest = True
class _WritelnDecorator(object):
"""Used to decorate file-like objects with a handy 'writeln' method"""
def __init__(self,stream):
self.stream = stream
def __getattr__(self, attr):
if attr in ('stream', '__getstate__'):
raise AttributeError(attr)
return getattr(self.stream,attr)
def writeln(self, arg=None):
if arg:
self.write(arg)
self.write('\n') # text-mode streams translate to \r\n if needed
class TextTestResult(result.TestResult):
"""A test result class that can print formatted text results to a stream.
Used by TextTestRunner.
"""
separator1 = '=' * 70
separator2 = '-' * 70
def __init__(self, stream, descriptions, verbosity):
super(TextTestResult, self).__init__()
self.stream = stream
self.showAll = verbosity > 1
self.dots = verbosity == 1
self.descriptions = descriptions
def getDescription(self, test):
doc_first_line = test.shortDescription()
if self.descriptions and doc_first_line:
return '\n'.join((str(test), doc_first_line))
else:
return str(test)
def startTest(self, test):
super(TextTestResult, self).startTest(test)
if self.showAll:
self.stream.write(self.getDescription(test))
self.stream.write(" ... ")
self.stream.flush()
def addSuccess(self, test):
super(TextTestResult, self).addSuccess(test)
if self.showAll:
self.stream.writeln("ok")
elif self.dots:
self.stream.write('.')
self.stream.flush()
def addError(self, test, err):
super(TextTestResult, self).addError(test, err)
if self.showAll:
self.stream.writeln("ERROR")
elif self.dots:
self.stream.write('E')
self.stream.flush()
def addFailure(self, test, err):
super(TextTestResult, self).addFailure(test, err)
if self.showAll:
self.stream.writeln("FAIL")
elif self.dots:
self.stream.write('F')
self.stream.flush()
def addSkip(self, test, reason):
super(TextTestResult, self).addSkip(test, reason)
if self.showAll:
self.stream.writeln("skipped %r" % (reason,))
elif self.dots:
self.stream.write("s")
self.stream.flush()
def addExpectedFailure(self, test, err):
super(TextTestResult, self).addExpectedFailure(test, err)
if self.showAll:
self.stream.writeln("expected failure")
elif self.dots:
self.stream.write("x")
self.stream.flush()
def addUnexpectedSuccess(self, test):
super(TextTestResult, self).addUnexpectedSuccess(test)
if self.showAll:
self.stream.writeln("unexpected success")
elif self.dots:
self.stream.write("u")
self.stream.flush()
def printErrors(self):
if self.dots or self.showAll:
self.stream.writeln()
self.printErrorList('ERROR', self.errors)
self.printErrorList('FAIL', self.failures)
def printErrorList(self, flavour, errors):
for test, err in errors:
self.stream.writeln(self.separator1)
self.stream.writeln("%s: %s" % (flavour, self.getDescription(test)))
self.stream.writeln(self.separator2)
self.stream.writeln("%s" % err)
def stopTestRun(self):
super(TextTestResult, self).stopTestRun()
self.printErrors()
class TextTestRunner(unittest.TextTestRunner):
"""A test runner class that displays results in textual form.
It prints out the names of tests as they are run, errors as they
occur, and a summary of the results at the end of the test run.
"""
resultclass = TextTestResult
def __init__(self, stream=sys.stderr, descriptions=True, verbosity=1,
failfast=False, buffer=False, resultclass=None):
self.stream = _WritelnDecorator(stream)
self.descriptions = descriptions
self.verbosity = verbosity
self.failfast = failfast
self.buffer = buffer
if resultclass is not None:
self.resultclass = resultclass
def _makeResult(self):
return self.resultclass(self.stream, self.descriptions, self.verbosity)
def run(self, test):
"Run the given test case or test suite."
result = self._makeResult()
result.failfast = self.failfast
result.buffer = self.buffer
registerResult(result)
startTime = time.time()
startTestRun = getattr(result, 'startTestRun', None)
if startTestRun is not None:
startTestRun()
try:
test(result)
finally:
stopTestRun = getattr(result, 'stopTestRun', None)
if stopTestRun is not None:
stopTestRun()
else:
result.printErrors()
stopTime = time.time()
timeTaken = stopTime - startTime
if hasattr(result, 'separator2'):
self.stream.writeln(result.separator2)
run = result.testsRun
self.stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", timeTaken))
self.stream.writeln()
expectedFails = unexpectedSuccesses = skipped = 0
try:
results = map(len, (result.expectedFailures,
result.unexpectedSuccesses,
result.skipped))
expectedFails, unexpectedSuccesses, skipped = results
except AttributeError:
pass
infos = []
if not result.wasSuccessful():
self.stream.write("FAILED")
failed, errored = map(len, (result.failures, result.errors))
if failed:
infos.append("failures=%d" % failed)
if errored:
infos.append("errors=%d" % errored)
else:
self.stream.write("OK")
if skipped:
infos.append("skipped=%d" % skipped)
if expectedFails:
infos.append("expected failures=%d" % expectedFails)
if unexpectedSuccesses:
infos.append("unexpected successes=%d" % unexpectedSuccesses)
if infos:
self.stream.writeln(" (%s)" % (", ".join(infos),))
else:
self.stream.write("\n")
return result
|
StefanRijnhart/account-financial-reporting
|
refs/heads/8.0
|
__unported__/account_financial_report_webkit_xls/wizard/general_ledger_wizard.py
|
37
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2013 Noviat nv/sa (www.noviat.com). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm
# import logging
# _logger = logging.getLogger(__name__)
class general_ledger_webkit_wizard(orm.TransientModel):
_inherit = 'general.ledger.webkit'
def xls_export(self, cr, uid, ids, context=None):
return self.check_report(cr, uid, ids, context=context)
def _print_report(self, cr, uid, ids, data, context=None):
context = context or {}
if context.get('xls_export'):
# we update form with display account value
data = self.pre_print_report(cr, uid, ids, data, context=context)
return {'type': 'ir.actions.report.xml',
'report_name': 'account.account_report_general_ledger_xls',
'datas': data}
else:
return super(general_ledger_webkit_wizard, self)._print_report(
cr, uid, ids, data, context=context)
|
venumech/cookiecutter
|
refs/heads/master
|
docs/__init__.py
|
12133432
| |
ebar0n/django
|
refs/heads/master
|
django/views/decorators/__init__.py
|
12133432
| |
vinc456/coala
|
refs/heads/master
|
tests/parsing/GlobTestDir/SubDir1/File11.py
|
12133432
| |
sglumac/pyislands
|
refs/heads/master
|
tests/__init__.py
|
1
|
import tests.permutation
|
ohmini/thaifoodapi
|
refs/heads/master
|
lib/django/contrib/flatpages/urls.py
|
667
|
from django.conf.urls import url
from django.contrib.flatpages import views
urlpatterns = [
url(r'^(?P<url>.*)$', views.flatpage, name='django.contrib.flatpages.views.flatpage'),
]
|
SanPen/GridCal
|
refs/heads/master
|
src/research/PTDF/ACPTDF_research.py
|
1
|
# This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import pandas as pd
import numba as nb
import time
from warnings import warn
import scipy.sparse as sp
from scipy.sparse import coo_matrix, csc_matrix
from scipy.sparse import hstack as hs, vstack as vs
from scipy.sparse.linalg import factorized, spsolve, inv
from matplotlib import pyplot as plt
from GridCal.Engine import *
def Jacobian(Ybus, V, Ibus, pq, pvpq):
"""
Computes the system Jacobian matrix in polar coordinates
Args:
Ybus: Admittance matrix
V: Array of nodal voltages
Ibus: Array of nodal current injections
pq: Array with the indices of the PQ buses
pvpq: Array with the indices of the PV and PQ buses
Returns:
The system Jacobian matrix
"""
I = Ybus * V - Ibus
Vdiag = sp.diags(V)
Idiag = sp.diags(I)
Ediag = sp.diags(V / np.abs(V))
dS_dVm = Vdiag * np.conj(Ybus * Ediag) + np.conj(Idiag) * Ediag
dS_dVa = 1.0j * Vdiag * np.conj(Idiag - Ybus * Vdiag)
J = sp.vstack([sp.hstack([dS_dVa[np.ix_(pvpq, pvpq)].real, dS_dVm[np.ix_(pvpq, pq)].real]),
sp.hstack([dS_dVa[np.ix_(pq, pvpq)].imag, dS_dVm[np.ix_(pq, pq)].imag])], format="csc")
return J
def compute_acptdf(Ybus, Yf, Yt, Cf, V, Ibus, pq, pv, distribute_slack):
"""
Compute the AC-PTDF
:param Ybus: admittance matrix
:param Yf: Admittance matrix of the buses "from"
:param Yt: Admittance matrix of the buses "to"
:param Cf: Connectivity branch - bus "from"
:param V: voltages array
:param Ibus: array of currents
:param pq: array of pq node indices
:param pv: array of pv node indices
:return: AC-PTDF matrix (branches, buses)
"""
n = len(V)
pvpq = np.r_[pv, pq]
npq = len(pq)
# compute the Jacobian
J = Jacobian(Ybus, V, Ibus, pq, pvpq)
if distribute_slack:
dP = np.ones((n, n)) * (-1 / (n - 1))
for i in range(n):
dP[i, i] = 1.0
else:
dP = np.eye(n, n)
# compose the compatible array (the Q increments are considered zero
dQ = np.zeros((npq, n))
# dQ = np.eye(n, n)[pq, :]
dS = np.r_[dP[pvpq, :], dQ]
# solve the voltage increments
dx = spsolve(J, dS)
# compute branch derivatives
If = Yf * V
E = V / np.abs(V)
Vdiag = sp.diags(V)
Vdiag_conj = sp.diags(np.conj(V))
Ediag = sp.diags(E)
Ediag_conj = sp.diags(np.conj(E))
If_diag_conj = sp.diags(np.conj(If))
Yf_conj = Yf.copy()
Yf_conj.data = np.conj(Yf_conj.data)
Yt_conj = Yt.copy()
Yt_conj.data = np.conj(Yt_conj.data)
dSf_dVa = 1j * (If_diag_conj * Cf * Vdiag - sp.diags(Cf * V) * Yf_conj * Vdiag_conj)
dSf_dVm = If_diag_conj * Cf * Ediag - sp.diags(Cf * V) * Yf_conj * Ediag_conj
# compose the final AC-PTDF
dPf_dVa = dSf_dVa.real[:, pvpq]
dPf_dVm = dSf_dVm.real[:, pq]
PTDF = sp.hstack((dPf_dVa, dPf_dVm)) * dx
return PTDF
def make_lodf(circuit: SnapshotCircuit, PTDF, correct_values=True):
"""
:param circuit:
:param PTDF: PTDF matrix in numpy array form
:return:
"""
nl = circuit.nbr
# compute the connectivity matrix
Cft = circuit.C_branch_bus_f - circuit.C_branch_bus_t
H = PTDF * Cft.T
# old code
# h = sp.diags(H.diagonal())
# LODF = H / (np.ones((nl, nl)) - h * np.ones(nl))
# divide each row of H by the vector 1 - H.diagonal
# LODF = H / (1 - H.diagonal())
# replace possible nan and inf
# LODF[LODF == -np.inf] = 0
# LODF[LODF == np.inf] = 0
# LODF = np.nan_to_num(LODF)
# this loop avoids the divisions by zero
# in those cases the LODF column should be zero
LODF = np.zeros((nl, nl))
div = 1 - H.diagonal()
for j in range(H.shape[1]):
if div[j] != 0:
LODF[:, j] = H[:, j] / div[j]
# replace the diagonal elements by -1
# old code
# LODF = LODF - sp.diags(LODF.diagonal()) - sp.eye(nl, nl), replaced by:
for i in range(nl):
LODF[i, i] = - 1.0
if correct_values:
i1, j1 = np.where(LODF > 1)
for i, j in zip(i1, j1):
LODF[i, j] = 1
i2, j2 = np.where(LODF < -1)
for i, j in zip(i2, j2):
LODF[i, j] = -1
return LODF
def get_branch_time_series(circuit: TimeCircuit, PTDF):
"""
:param grid:
:return:
"""
# option 2: call the power directly
P = circuit.Sbus.real
Pbr = np.dot(PTDF, P).T * circuit.Sbase
return Pbr
def multiple_failure_old(flows, LODF, beta, delta, alpha):
"""
:param flows: array of all the pre-contingency flows
:param LODF: Line Outage Distribution Factors Matrix
:param beta: index of the first failed line
:param delta: index of the second failed line
:param alpha: index of the line where you want to see the effects
:return: post contingency flow in the line alpha
"""
# multiple contingency matrix
M = np.ones((2, 2))
M[0, 1] = -LODF[beta, delta]
M[1, 0] = -LODF[delta, beta]
# normal flows of the lines beta and delta
F = flows[[beta, delta]]
# contingency flows after failing the ines beta and delta
Ff = np.linalg.solve(M, F)
# flow delta in the line alpha after the multiple contingency of the lines beta and delta
L = LODF[alpha, :][[beta, delta]]
dFf_alpha = np.dot(L, Ff)
return F[alpha] + dFf_alpha
def multiple_failure(flows, LODF, failed_idx):
"""
From the paper:
Multiple Element Contingency Screening
IEEE TRANSACTIONS ON POWER SYSTEMS, VOL. 26, NO. 3, AUGUST 2011
C. Matthew Davis and Thomas J. Overbye
:param flows: array of all the pre-contingency flows (the base flows)
:param LODF: Line Outage Distribution Factors Matrix
:param failed_idx: indices of the failed lines
:return: all post contingency flows
"""
# multiple contingency matrix
M = -LODF[np.ix_(failed_idx, failed_idx)]
for i in range(len(failed_idx)):
M[i, i] = 1.0
# normal flows of the failed lines indicated by failed_idx
F = flows[failed_idx]
# Affected flows after failing the lines indicated by failed_idx
Ff = np.linalg.solve(M, F)
# flow delta in the line alpha after the multiple contingency of the lines indicated by failed_idx
L = LODF[:, failed_idx]
dFf_alpha = np.dot(L, Ff)
# return the final contingency flow as the base flow plus the contingency flow delta
return flows + dFf_alpha
def get_n_minus_1_flows(circuit: MultiCircuit):
opt = PowerFlowOptions()
branches = circuit.get_branches()
m = circuit.get_branch_number()
Pmat = np.zeros((m, m)) # monitored, contingency
for c, branch in enumerate(branches):
if branch.active:
branch.active = False
pf = PowerFlowDriver(circuit, opt)
pf.run()
Pmat[:, c] = pf.results.Sbranch.real
branch.active = True
return Pmat
def check_lodf(grid: MultiCircuit):
flows_n1_nr = get_n_minus_1_flows(grid)
# assume 1 island
nc = compile_snapshot_circuit(grid)
islands = split_into_islands(nc)
circuit = islands[0]
pf_driver = PowerFlowDriver(grid, PowerFlowOptions())
pf_driver.run()
PTDF = compute_acptdf(Ybus=circuit.Ybus,
Yf=circuit.Yf,
Yt=circuit.Yt,
Cf=circuit.C_branch_bus_f,
V=pf_driver.results.voltage,
Ibus=circuit.Ibus,
pq=circuit.pq,
pv=circuit.pv,
distribute_slack=True)
LODF = make_lodf(circuit, PTDF)
Pbus = circuit.get_injections(False).real
flows_n = np.dot(PTDF, Pbus)
nl = circuit.nbr
flows_n1 = np.zeros((nl, nl))
for c in range(nl): # branch that fails (contingency)
# for m in range(nl): # branch to monitor
# flows_n1[m, c] = flows_n[m] + LODF[m, c] * flows_n[c]
flows_n1[:, c] = flows_n[:] + LODF[:, c] * flows_n[c]
return flows_n, flows_n1_nr, flows_n1
def test_ptdf(grid):
"""
Sigma-distances test
:param grid:
:return:
"""
nc = compile_snapshot_circuit(grid)
islands = split_into_islands(nc)
inputs = islands[0] # pick the first island
pf_driver = PowerFlowDriver(grid, PowerFlowOptions())
pf_driver.run()
PTDF = compute_acptdf(Ybus=inputs.Ybus,
Yf=inputs.Yf,
Yt=inputs.Yt,
Cf=inputs.C_branch_bus_f,
V=pf_driver.results.voltage,
Ibus=inputs.Ibus,
pq=inputs.pq,
pv=inputs.pv,
distribute_slack=False)
print('PTDF:')
print(PTDF)
if __name__ == '__main__':
from GridCal.Engine import FileOpen
import pandas as pd
np.set_printoptions(threshold=sys.maxsize, linewidth=200000000)
# np.set_printoptions(linewidth=2000, suppress=True)
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/IEEE39_1W.gridcal'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/IEEE 14.xlsx'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/lynn5buspv.xlsx'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/IEEE 118.xlsx'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/1354 Pegase.xlsx'
# fname = 'helm_data1.gridcal'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/IEEE 14 PQ only.gridcal'
# fname = 'IEEE 14 PQ only full.gridcal'
fname = '/home/santi/Descargas/matpower-fubm-master/data/case5.m'
# fname = '/home/santi/Descargas/matpower-fubm-master/data/case30.m'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/PGOC_6bus.gridcal'
grid_ = FileOpen(fname).open()
test_ptdf(grid_)
name = os.path.splitext(fname.split(os.sep)[-1])[0]
method = 'ACPTDF (Jacobian, V=Vpf)'
nc_ = compile_snapshot_circuit(grid_)
islands_ = split_into_islands(nc_)
circuit_ = islands_[0]
H_ = compute_acptdf(Ybus=circuit_.Ybus,
Yf=circuit_.Yf,
Yt=circuit_.Yt,
Cf=circuit_.C_branch_bus_f,
V=circuit_.Vbus,
Ibus=circuit_.Ibus,
pq=circuit_.pq,
pv=circuit_.pv,
distribute_slack=False)
LODF_ = make_lodf(circuit_, H_)
if H_.shape[0] < 50:
print('PTDF:\n', H_)
print('LODF:\n', LODF_)
flows_n_, flows_n1_nr_, flows_n1_ = check_lodf(grid_)
# in the case of the grid PGOC_6bus
flows_multiple = multiple_failure(flows=flows_n_,
LODF=LODF_,
failed_idx=[1, 5]) # failed lines 2 and 6
Pn1_nr_df = pd.DataFrame(data=flows_n1_nr_, index=nc_.branch_names, columns=nc_.branch_names)
flows_n1_df = pd.DataFrame(data=flows_n1_, index=nc_.branch_names, columns=nc_.branch_names)
# plot N-1
fig = plt.figure(figsize=(12, 8))
title = 'N-1 with ' + method + ' (' + name + ')'
fig.suptitle(title)
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
Pn1_nr_df.plot(ax=ax1, legend=False)
flows_n1_df.plot(ax=ax2, legend=False)
diff = Pn1_nr_df - flows_n1_df
diff.plot(ax=ax3, legend=False)
ax1.set_title('Newton-Raphson N-1 flows')
ax2.set_title('PTDF N-1 flows')
ax3.set_title('Difference')
fig.savefig(title + '.png')
# ------------------------------------------------------------------------------------------------------------------
# Perform real time series
# ------------------------------------------------------------------------------------------------------------------
if grid_.time_profile is not None:
grid_.ensure_profiles_exist()
nc_ts = compile_time_circuit(grid_)
islands_ts = split_time_circuit_into_islands(nc_ts)
circuit_ts = islands_ts[0]
pf_options = PowerFlowOptions()
ts_driver = TimeSeries(grid=grid_, options=pf_options)
ts_driver.run()
Pbr_nr = ts_driver.results.Sbranch.real
df_Pbr_nr = pd.DataFrame(data=Pbr_nr, columns=circuit_ts.branch_names, index=circuit_ts.time_array)
# Compute the PTDF based flows
Pbr_ptdf = get_branch_time_series(circuit=circuit_ts, PTDF=H_)
df_Pbr_ptdf = pd.DataFrame(data=Pbr_ptdf, columns=circuit_ts.branch_names, index=circuit_ts.time_array)
# plot
fig = plt.figure(figsize=(12, 8))
title = 'Flows with ' + method + ' (' + name + ')'
fig.suptitle(title)
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
df_Pbr_nr.plot(ax=ax1, legend=False)
df_Pbr_ptdf.plot(ax=ax2, legend=False)
diff = df_Pbr_nr - df_Pbr_ptdf
diff.plot(ax=ax3, legend=False)
ax1.set_title('Newton-Raphson flows')
ax2.set_title('PTDF flows')
ax3.set_title('Difference')
fig.savefig(title + '.png')
plt.show()
|
linino/kernel_3.3.8
|
refs/heads/master
|
tools/perf/scripts/python/sched-migration.py
|
11215
|
#!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <fweisbec@gmail.com>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
|
barykaed/Pelican-Test
|
refs/heads/gh-pages
|
activate/Lib/encodings/johab.py
|
816
|
#
# johab.py: Python Unicode Codec for JOHAB
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_kr, codecs
import _multibytecodec as mbc
codec = _codecs_kr.getcodec('johab')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='johab',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
okolisny/integration_tests
|
refs/heads/master
|
cfme/utils/ansible.py
|
1
|
import tempfile
from os import listdir, mkdir, makedirs, path
from shutil import copy, copyfile, rmtree
from subprocess import check_output, CalledProcessError, STDOUT
import sys
from fauxfactory import gen_alphanumeric
from cfme.utils import conf
from cfme.utils.providers import providers_data
from git import Repo
from yaml import load, dump
local_git_repo = "manageiq_ansible_module"
yml_path = path.join(path.dirname(__file__), local_git_repo)
yml_templates_path = path.join(path.dirname(__file__), 'ansible_conf')
basic_script = "basic_script.yml"
yml = ".yml"
random_token = str(gen_alphanumeric(906))
random_miq_user = str(gen_alphanumeric(8))
pulled_repo_library_path = path.join(local_git_repo, 'library')
remote_git_repo_url = "git://github.com/dkorn/manageiq-ansible-module.git"
def create_tmp_directory():
global lib_path
lib_path = tempfile.mkdtemp()
lib_sub_path = 'ansible_conf'
lib_sub_path_library = path.join(lib_sub_path, 'library')
makedirs(path.join((lib_path), lib_sub_path_library))
global library_path_to_copy_to
global basic_yml_path
library_path_to_copy_to = path.join(lib_path, lib_sub_path_library)
basic_yml_path = path.join(lib_path, lib_sub_path)
def fetch_miq_ansible_module():
if path.isdir(local_git_repo):
rmtree(local_git_repo)
mkdir(local_git_repo)
if path.isdir(library_path_to_copy_to):
rmtree(library_path_to_copy_to)
mkdir(library_path_to_copy_to)
Repo.clone_from(remote_git_repo_url, local_git_repo)
src_files = listdir(pulled_repo_library_path)
for file_name in src_files:
full_file_name = path.join(pulled_repo_library_path, file_name)
if path.isfile(full_file_name):
copy(full_file_name, library_path_to_copy_to)
rmtree(local_git_repo)
def get_values_for_providers_test(provider):
return {
'name': provider.name,
'state': 'present',
'miq_url': config_formatter(),
'miq_username': conf.credentials['default'].username,
'miq_password': conf.credentials['default'].password,
'provider_api_hostname': providers_data[provider.name]['endpoints']['default'].hostname,
'provider_api_port': providers_data[provider.name]['endpoints']['default'].api_port,
'provider_api_auth_token': providers_data[provider.name]['endpoints']['default'].token,
'monitoring_hostname': providers_data[provider.name]['endpoints']['hawkular'].hostname,
'monitoring_port': providers_data[provider.name]['endpoints']['hawkular'].api_port
}
def get_values_for_users_test():
return {
'fullname': 'MIQUser',
'name': 'MIQU',
'password': 'smartvm',
'state': 'present',
'miq_url': config_formatter(),
'miq_username': conf.credentials['default'].username,
'miq_password': conf.credentials['default'].password,
}
def get_values_for_custom_attributes_test(provider):
return {
'entity_type': 'provider',
'entity_name': conf.cfme_data.get('management_systems', {})
[provider.key].get('name', []),
'miq_url': config_formatter(),
'miq_username': conf.credentials['default'].username,
'miq_password': conf.credentials['default'].password,
}
def get_values_for_tags_test(provider):
return {
'resource': 'provider',
'resource_name': provider.name,
'miq_url': config_formatter(),
'miq_username': conf.credentials['default'].username,
'miq_password': conf.credentials['default'].password,
}
def get_values_from_conf(provider, script_type):
if script_type == 'providers':
return get_values_for_providers_test(provider)
if script_type == 'users':
return get_values_for_users_test()
if script_type == 'custom_attributes':
return get_values_for_custom_attributes_test(provider)
if script_type == 'tags':
return get_values_for_tags_test(provider)
# TODO Avoid reading files every time
def read_yml(script, value):
with open(yml_path + script + yml, 'r') as f:
doc = load(f)
return doc[0]['tasks'][0]['manageiq_provider'][value]
def get_yml_value(script, value):
with open(path.join(basic_yml_path, script) + yml, 'r') as f:
doc = load(f)
return doc[0]['tasks'][0]['manageiq_provider'][value]
def setup_basic_script(provider, script_type):
script_path_source = path.join(yml_templates_path, script_type + "_" + basic_script)
script_path = path.join(basic_yml_path, script_type + "_" + basic_script)
copyfile(script_path_source, script_path)
with open(script_path, 'rw') as f:
doc = load(f)
values_dict = get_values_from_conf(provider, script_type)
for key in values_dict:
if script_type == 'providers':
doc[0]['tasks'][0]['manageiq_provider'][key] = values_dict[key]
elif script_type == 'users':
doc[0]['tasks'][0]['manageiq_user'][key] = values_dict[key]
elif script_type == 'custom_attributes':
doc[0]['tasks'][0]['manageiq_custom_attributes'][key] = values_dict[key]
elif script_type == 'tags':
doc[0]['tasks'][0]['manageiq_tag_assignment'][key] = values_dict[key]
with open(script_path, 'w') as f:
f.write(dump(doc))
def open_yml(script, script_type):
copyfile((path.join(basic_yml_path, script_type + "_" + basic_script)),
path.join(basic_yml_path, script + yml))
with open(path.join(basic_yml_path, script + yml), 'rw') as f:
return load(f)
def write_yml(script, doc):
with open(path.join(basic_yml_path, script + yml), 'w') as f:
f.write(dump(doc))
def setup_ansible_script(provider, script, script_type=None, values_to_update=None):
# This function prepares the ansible scripts to work with the correct
# appliance configs that will be received from Jenkins
setup_basic_script(provider, script_type)
doc = open_yml(script, script_type)
if script == 'add_provider':
write_yml(script, doc)
if script == 'add_provider_ssl':
doc[0]['tasks'][0]['manageiq_provider']['provider_verify_ssl'] = 'True'
write_yml(script, doc)
elif script == 'update_provider':
for key in values_to_update:
doc[0]['tasks'][0]['manageiq_provider'][key] = values_to_update[key]
write_yml(script, doc)
elif script == 'remove_provider':
doc[0]['tasks'][0]['manageiq_provider']['state'] = 'absent'
write_yml(script, doc)
elif script == 'remove_non_existing_provider':
doc[0]['tasks'][0]['manageiq_provider']['state'] = 'absent'
doc[0]['tasks'][0]['manageiq_provider']['name'] = random_miq_user
write_yml(script, doc)
elif script == 'remove_provider_bad_user':
doc[0]['tasks'][0]['manageiq_provider']['miq_username'] = random_miq_user
write_yml(script, doc)
elif script == 'add_provider_bad_token':
doc[0]['tasks'][0]['manageiq_provider']['provider_api_auth_token'] = random_token
write_yml(script, doc)
elif script == 'add_provider_bad_user':
doc[0]['tasks'][0]['manageiq_provider']['miq_username'] = random_miq_user
write_yml(script, doc)
elif script == 'update_non_existing_provider':
doc[0]['tasks'][0]['manageiq_provider']['provider_api_hostname'] = random_miq_user
write_yml(script, doc)
elif script == 'update_provider_bad_user':
for key in values_to_update:
doc[0]['tasks'][0]['manageiq_provider'][key] = values_to_update[key]
doc[0]['tasks'][0]['manageiq_provider']['miq_username'] = random_miq_user
write_yml(script, doc)
elif script == 'create_user':
for key in values_to_update:
doc[0]['tasks'][0]['manageiq_user'][key] = values_to_update[key]
write_yml(script, doc)
elif script == 'update_user':
for key in values_to_update:
doc[0]['tasks'][0]['manageiq_user'][key] = values_to_update[key]
write_yml(script, doc)
elif script == 'create_user_bad_user_name':
doc[0]['tasks'][0]['manageiq_user']['miq_username'] = random_miq_user
for key in values_to_update:
doc[0]['tasks'][0]['manageiq_user'][key] = values_to_update[key]
write_yml(script, doc)
elif script == 'delete_user':
doc[0]['tasks'][0]['manageiq_user']['name'] = values_to_update
doc[0]['tasks'][0]['manageiq_user']['state'] = 'absent'
write_yml(script, doc)
elif script == 'add_custom_attributes':
count = 0
while count < len(values_to_update):
for key in values_to_update:
doc[0]['tasks'][0]['manageiq_custom_attributes']['custom_attributes'][count] = key
count += 1
write_yml(script, doc)
elif script == 'add_custom_attributes_bad_user':
doc[0]['tasks'][0]['manageiq_custom_attributes']['miq_username'] = str(random_miq_user)
write_yml(script, doc)
elif script == 'remove_custom_attributes':
count = 0
doc[0]['tasks'][0]['manageiq_custom_attributes']['state'] = 'absent'
while count < len(values_to_update):
for key in values_to_update:
doc[0]['tasks'][0]['manageiq_custom_attributes']['custom_attributes'][count] = key
count += 1
write_yml(script, doc)
elif script == 'add_tags':
count = 0
while count < len(values_to_update):
for key in values_to_update:
doc[0]['tasks'][0]['manageiq_tag_assignment']['tags'][count]['category'] = \
values_to_update[count]['category']
doc[0]['tasks'][0]['manageiq_tag_assignment']['tags'][count]['name'] = \
values_to_update[count]['name']
count += 1
doc[0]['tasks'][0]['manageiq_tag_assignment']['state'] = 'present'
write_yml(script, doc)
elif script == 'remove_tags':
count = 0
while count < len(values_to_update):
for key in values_to_update:
doc[0]['tasks'][0]['manageiq_tag_assignment']['tags'][count]['category'] = \
values_to_update[count]['category']
doc[0]['tasks'][0]['manageiq_tag_assignment']['tags'][count]['name'] = \
values_to_update[count]['name']
count += 1
doc[0]['tasks'][0]['manageiq_tag_assignment']['state'] = 'absent'
write_yml(script, doc)
def run_ansible(script):
ansible_playbook_cmd = "ansible-playbook -e ansible_python_interpreter="
interpreter_path = sys.executable
script_path = path.join(basic_yml_path, script + ".yml")
cmd = '{}{} {}'.format(ansible_playbook_cmd, interpreter_path, script_path)
return run_cmd(cmd)
def run_cmd(cmd):
try:
response = check_output(cmd, shell=True, stderr=STDOUT)
except CalledProcessError as exc:
print("Status : FAIL", exc.returncode, exc.output)
return exc.output
else:
print("Output: \n{}\n".format(response))
# TODO For further usage with reply statuses test. Not being used at the moment
def reply_status(reply):
ok_status = reply['stats']['localhost']['ok']
changed_status = reply['stats']['localhost']['changed']
failures_status = reply['stats']['localhost']['failures']
skipped_status = reply['stats']['localhost']['skipped']
message_status = reply['plays'][0]['tasks'][2]['hosts']['localhost']['result']['msg']
if not ok_status == '0':
ok_status = 'OK'
else:
ok_status = 'Failed'
if changed_status:
return 'Changed', message_status, ok_status
elif skipped_status:
return 'Skipped', message_status, ok_status
elif failures_status:
return 'Failed', message_status, ok_status
else:
return 'No Change', message_status, ok_status
def config_formatter():
if "https://" in conf.env.get("base_url", None):
return conf.env.get("base_url", None)
else:
return "https://" + conf.env.get("base_url", None)
def remove_tmp_files():
rmtree(lib_path, ignore_errors=True)
|
Acidburn0zzz/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/webdriver/tests/accept_alert/__init__.py
|
12133432
| |
adelton/django
|
refs/heads/master
|
tests/model_validation/__init__.py
|
12133432
| |
Gussy/mavlink
|
refs/heads/master
|
pymavlink/generator/lib/__init__.py
|
12133432
| |
binoculars/osf.io
|
refs/heads/develop
|
tests/framework_tests/__init__.py
|
12133432
| |
lindamar/ecclesi
|
refs/heads/master
|
env/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/contrib/__init__.py
|
12133432
| |
group-policy/rally
|
refs/heads/master
|
rally/plugins/openstack/scenarios/fuel/utils.py
|
1
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import random
import time
import six
from rally.common.i18n import _
from rally import osclients
from rally.plugins.openstack import scenario
from rally.task import atomic
class FuelEnvManager(object):
def __init__(self, client):
self.client = client
def get(self, env_id):
try:
return self.client.get_by_id(env_id)
except BaseException:
return None
def list(self):
"""List Fuel environments."""
try:
return self.client.get_all()
except SystemExit:
raise RuntimeError(_("Can't list envitonments. "
"Please check server availability."))
def create(self, name, release_id=1,
network_provider="neutron",
deployment_mode="ha_compact",
net_segment_type="vlan"):
try:
env = self.client.create(name, release_id, network_provider,
deployment_mode, net_segment_type)
except SystemExit:
raise RuntimeError(_("Something went wrong while creating an "
"environment. This can happen when "
"environment with name %s already exists.")
% name)
if env:
return env
raise RuntimeError(_("Environment was not created or was "
"created but not returned by server."))
def delete(self, env_id, retries=5, retry_pause=0.5):
env = self.get(env_id)
retry_number = 0
while env:
if retry_number > retries:
raise RuntimeError(_("Can't delete environment "
"id: %s ") % env_id)
try:
self.client.delete_by_id(env_id)
except BaseException:
time.sleep(retry_pause)
env = self.get(env_id)
retry_number += 1
class FuelClient(object):
"""Thin facade over `fuelclient.get_client'."""
def __init__(self, version, server_address, server_port, username,
password):
# NOTE(amaretskiy): For now, there are only 2 ways how to
# configure fuelclient connection:
# * configuration file - this is not convenient to create
# separate file for each benchmark
# * env variables - this approach is preferable
os.environ["SERVER_ADDRESS"] = server_address
os.environ["LISTEN_PORT"] = str(server_port)
os.environ["KEYSTONE_USER"] = username
os.environ["KEYSTONE_PASS"] = password
import fuelclient
FuelClient.fuelclient_module = fuelclient
get_client = fuelclient.get_client
self.environment = FuelEnvManager(get_client(
"environment", version=version))
self.node = get_client("node", version=version)
self.task = get_client("task", version=version)
@osclients.configure("fuel", default_version="v1")
class Fuel(osclients.OSClient):
"""FuelClient factory for osclients.Clients."""
def create_client(self, *args, **kwargs):
auth_url = six.moves.urllib.parse.urlparse(self.credential.auth_url)
return FuelClient(version=self.choose_version(),
server_address=auth_url.hostname,
server_port=8000,
username=self.credential.username,
password=self.credential.password)
class FuelScenario(scenario.OpenStackScenario):
"""Base class for Fuel scenarios."""
@atomic.action_timer("fuel.list_environments")
def _list_environments(self):
return self.admin_clients("fuel").environment.list()
@atomic.action_timer("fuel.create_environment")
def _create_environment(self, release_id=1,
network_provider="neutron",
deployment_mode="ha_compact",
net_segment_type="vlan"):
name = self.generate_random_name()
env = self.admin_clients("fuel").environment.create(
name, release_id, network_provider, deployment_mode,
net_segment_type)
return env["id"]
@atomic.action_timer("fuel.delete_environment")
def _delete_environment(self, env_id, retries=5):
self.admin_clients("fuel").environment.delete(env_id, retries)
@atomic.action_timer("fuel.add_node")
def _add_node(self, env_id, node_ids, node_roles=None):
"""Add node to environment
:param env_id environment id
:param node_ids list of node ids
:param node_roles list of roles
"""
node_roles = node_roles or ["compute"]
try:
self.admin_clients("fuel").environment.client.add_nodes(
env_id, node_ids, node_roles)
except BaseException as e:
raise RuntimeError(
"Unable to add node(s) to environment. Fuel client exited "
"with error %s" % e)
@atomic.action_timer("fuel.delete_node")
def _remove_node(self, env_id, node_id):
env = FuelClient.fuelclient_module.objects.environment.Environment(
env_id)
try:
env.unassign([node_id])
except BaseException as e:
raise RuntimeError(
"Unable to add node(s) to environment. Fuel client exited "
"with error %s" % e)
@atomic.action_timer("fuel.list_nodes")
def _list_node_ids(self, env_id=None):
result = self.admin_clients("fuel").node.get_all(
environment_id=env_id)
return [x["id"] for x in result]
def _node_is_assigned(self, node_id):
try:
node = self.admin_clients("fuel").node.get_by_id(node_id)
return bool(node["cluster"])
except BaseException as e:
raise RuntimeError(
"Unable to add node(s) to environment. Fuel client exited "
"with error %s" % e)
def _get_free_node_id(self):
node_ids = self._list_node_ids()
random.shuffle(node_ids)
for node_id in node_ids:
if not self._node_is_assigned(node_id):
return node_id
else:
raise RuntimeError("Can not found free node.")
|
toddeye/home-assistant
|
refs/heads/dev
|
homeassistant/components/tellduslive.py
|
2
|
"""
homeassistant.components.tellduslive
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tellduslive Component.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/tellduslive/
"""
from datetime import timedelta
import logging
from homeassistant.loader import get_component
from homeassistant import bootstrap
from homeassistant.util import Throttle
from homeassistant.helpers import validate_config
from homeassistant.const import (
EVENT_PLATFORM_DISCOVERED, ATTR_SERVICE, ATTR_DISCOVERED)
DOMAIN = "tellduslive"
REQUIREMENTS = ['tellive-py==0.5.2']
_LOGGER = logging.getLogger(__name__)
DISCOVER_SENSORS = "tellduslive.sensors"
DISCOVER_SWITCHES = "tellduslive.switches"
DISCOVERY_TYPES = {"sensor": DISCOVER_SENSORS,
"switch": DISCOVER_SWITCHES}
CONF_PUBLIC_KEY = "public_key"
CONF_PRIVATE_KEY = "private_key"
CONF_TOKEN = "token"
CONF_TOKEN_SECRET = "token_secret"
MIN_TIME_BETWEEN_SWITCH_UPDATES = timedelta(minutes=1)
MIN_TIME_BETWEEN_SENSOR_UPDATES = timedelta(minutes=5)
NETWORK = None
@Throttle(MIN_TIME_BETWEEN_SWITCH_UPDATES)
def request_switches():
""" make request to online service """
_LOGGER.debug("Updating switches from Telldus Live")
switches = NETWORK.request("devices/list")["device"]
# filter out any group of switches
switches = {switch["id"]: switch for switch in switches
if switch["type"] == "device"}
return switches
@Throttle(MIN_TIME_BETWEEN_SENSOR_UPDATES)
def request_sensors():
""" make request to online service """
_LOGGER.debug("Updating sensors from Telldus Live")
units = NETWORK.request("sensors/list")["sensor"]
# one unit can contain many sensors
sensors = {unit["id"]+sensor["name"]: dict(unit, data=sensor)
for unit in units
for sensor in unit["data"]}
return sensors
class TelldusLiveData(object):
""" Gets the latest data and update the states. """
def __init__(self, hass, config):
public_key = config[DOMAIN].get(CONF_PUBLIC_KEY)
private_key = config[DOMAIN].get(CONF_PRIVATE_KEY)
token = config[DOMAIN].get(CONF_TOKEN)
token_secret = config[DOMAIN].get(CONF_TOKEN_SECRET)
from tellive.client import LiveClient
self._switches = {}
self._sensors = {}
self._hass = hass
self._config = config
self._client = LiveClient(public_key=public_key,
private_key=private_key,
access_token=token,
access_secret=token_secret)
def validate_session(self):
""" Make a dummy request to see if the session is valid """
try:
return 'email' in self.request("user/profile")
except RuntimeError:
return False
def discover(self):
""" Update states, will trigger discover """
self.update_sensors()
self.update_switches()
def _discover(self, found_devices, component_name):
""" Send discovery event if component not yet discovered """
if not len(found_devices):
return
_LOGGER.info("discovered %d new %s devices",
len(found_devices), component_name)
component = get_component(component_name)
bootstrap.setup_component(self._hass,
component.DOMAIN,
self._config)
discovery_type = DISCOVERY_TYPES[component_name]
self._hass.bus.fire(EVENT_PLATFORM_DISCOVERED,
{ATTR_SERVICE: discovery_type,
ATTR_DISCOVERED: found_devices})
def request(self, what, **params):
""" Sends a request to the tellstick live API """
from tellive.live import const
supported_methods = const.TELLSTICK_TURNON \
| const.TELLSTICK_TURNOFF \
| const.TELLSTICK_TOGGLE \
# Tellstick device methods not yet supported
# | const.TELLSTICK_BELL \
# | const.TELLSTICK_DIM \
# | const.TELLSTICK_LEARN \
# | const.TELLSTICK_EXECUTE \
# | const.TELLSTICK_UP \
# | const.TELLSTICK_DOWN \
# | const.TELLSTICK_STOP
default_params = {'supportedMethods': supported_methods,
"includeValues": 1,
"includeScale": 1,
"includeIgnored": 0}
params.update(default_params)
# room for improvement: the telllive library doesn't seem to
# re-use sessions, instead it opens a new session for each request
# this needs to be fixed
response = self._client.request(what, params)
_LOGGER.debug("got response %s", response)
return response
def update_devices(self,
local_devices,
remote_devices,
component_name):
""" update local device list and discover new devices """
if remote_devices is None:
return local_devices
remote_ids = remote_devices.keys()
local_ids = local_devices.keys()
added_devices = list(remote_ids - local_ids)
self._discover(added_devices,
component_name)
removed_devices = list(local_ids - remote_ids)
remote_devices.update({id: dict(local_devices[id], offline=True)
for id in removed_devices})
return remote_devices
def update_sensors(self):
""" update local list of sensors """
try:
self._sensors = self.update_devices(self._sensors,
request_sensors(),
"sensor")
except OSError:
_LOGGER.warning("could not update sensors")
def update_switches(self):
""" update local list of switches """
try:
self._switches = self.update_devices(self._switches,
request_switches(),
"switch")
except OSError:
_LOGGER.warning("could not update switches")
def _check_request(self, what, **params):
""" Make request, check result if successful """
response = self.request(what, **params)
return response and response.get('status') == 'success'
def get_switch(self, switch_id):
""" return switch representation """
return self._switches[switch_id]
def get_sensor(self, sensor_id):
""" return sensor representation """
return self._sensors[sensor_id]
def turn_switch_on(self, switch_id):
""" Turn switch off. """
if self._check_request("device/turnOn", id=switch_id):
from tellive.live import const
self.get_switch(switch_id)["state"] = const.TELLSTICK_TURNON
def turn_switch_off(self, switch_id):
""" Turn switch on. """
if self._check_request("device/turnOff", id=switch_id):
from tellive.live import const
self.get_switch(switch_id)["state"] = const.TELLSTICK_TURNOFF
def setup(hass, config):
""" Setup the Telldus Live component. """
# fixme: aquire app key and provide authentication
# using username + password
if not validate_config(config,
{DOMAIN: [CONF_PUBLIC_KEY,
CONF_PRIVATE_KEY,
CONF_TOKEN,
CONF_TOKEN_SECRET]},
_LOGGER):
_LOGGER.error(
"Configuration Error: "
"Please make sure you have configured your keys "
"that can be aquired from https://api.telldus.com/keys/index")
return False
global NETWORK
NETWORK = TelldusLiveData(hass, config)
if not NETWORK.validate_session():
_LOGGER.error(
"Authentication Error: "
"Please make sure you have configured your keys "
"that can be aquired from https://api.telldus.com/keys/index")
return False
NETWORK.discover()
return True
|
ecolell/pfamserver
|
refs/heads/master
|
pfamserver/models/__init__.py
|
1
|
from .pfam_a import PfamA
from .pdb import Pdb
from .pdb_pfam_a_reg import PdbPfamAReg
from .pfam_a_reg_full_significant import PfamARegFullSignificant
from .pfamseq import Pfamseq
from .uniprot import Uniprot
from .uniprot_reg_full import UniprotRegFull
from .pfam_a_pfamseq import PfamAPfamseq
|
timvandermeij/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/pytest/_pytest/runner.py
|
173
|
""" basic collect and runtest protocol implementations """
import bdb
import sys
from time import time
import py
import pytest
from _pytest._code.code import TerminalRepr, ExceptionInfo
def pytest_namespace():
return {
'fail' : fail,
'skip' : skip,
'importorskip' : importorskip,
'exit' : exit,
}
#
# pytest plugin hooks
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting", "reporting", after="general")
group.addoption('--durations',
action="store", type=int, default=None, metavar="N",
help="show N slowest setup/test durations (N=0 for all)."),
def pytest_terminal_summary(terminalreporter):
durations = terminalreporter.config.option.durations
if durations is None:
return
tr = terminalreporter
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, 'duration'):
dlist.append(rep)
if not dlist:
return
dlist.sort(key=lambda x: x.duration)
dlist.reverse()
if not durations:
tr.write_sep("=", "slowest test durations")
else:
tr.write_sep("=", "slowest %s test durations" % durations)
dlist = dlist[:durations]
for rep in dlist:
nodeid = rep.nodeid.replace("::()::", "::")
tr.write_line("%02.2fs %-8s %s" %
(rep.duration, rep.when, nodeid))
def pytest_sessionstart(session):
session._setupstate = SetupState()
def pytest_sessionfinish(session):
session._setupstate.teardown_all()
class NodeInfo:
def __init__(self, location):
self.location = location
def pytest_runtest_protocol(item, nextitem):
item.ihook.pytest_runtest_logstart(
nodeid=item.nodeid, location=item.location,
)
runtestprotocol(item, nextitem=nextitem)
return True
def runtestprotocol(item, log=True, nextitem=None):
hasrequest = hasattr(item, "_request")
if hasrequest and not item._request:
item._initrequest()
rep = call_and_report(item, "setup", log)
reports = [rep]
if rep.passed:
reports.append(call_and_report(item, "call", log))
reports.append(call_and_report(item, "teardown", log,
nextitem=nextitem))
# after all teardown hooks have been called
# want funcargs and request info to go away
if hasrequest:
item._request = False
item.funcargs = None
return reports
def pytest_runtest_setup(item):
item.session._setupstate.prepare(item)
def pytest_runtest_call(item):
try:
item.runtest()
except Exception:
# Store trace info to allow postmortem debugging
type, value, tb = sys.exc_info()
tb = tb.tb_next # Skip *this* frame
sys.last_type = type
sys.last_value = value
sys.last_traceback = tb
del tb # Get rid of it in this namespace
raise
def pytest_runtest_teardown(item, nextitem):
item.session._setupstate.teardown_exact(item, nextitem)
def pytest_report_teststatus(report):
if report.when in ("setup", "teardown"):
if report.failed:
# category, shortletter, verbose-word
return "error", "E", "ERROR"
elif report.skipped:
return "skipped", "s", "SKIPPED"
else:
return "", "", ""
#
# Implementation
def call_and_report(item, when, log=True, **kwds):
call = call_runtest_hook(item, when, **kwds)
hook = item.ihook
report = hook.pytest_runtest_makereport(item=item, call=call)
if log:
hook.pytest_runtest_logreport(report=report)
if check_interactive_exception(call, report):
hook.pytest_exception_interact(node=item, call=call, report=report)
return report
def check_interactive_exception(call, report):
return call.excinfo and not (
hasattr(report, "wasxfail") or
call.excinfo.errisinstance(skip.Exception) or
call.excinfo.errisinstance(bdb.BdbQuit))
def call_runtest_hook(item, when, **kwds):
hookname = "pytest_runtest_" + when
ihook = getattr(item.ihook, hookname)
return CallInfo(lambda: ihook(item=item, **kwds), when=when)
class CallInfo:
""" Result/Exception info a function invocation. """
#: None or ExceptionInfo object.
excinfo = None
def __init__(self, func, when):
#: context of invocation: one of "setup", "call",
#: "teardown", "memocollect"
self.when = when
self.start = time()
try:
self.result = func()
except KeyboardInterrupt:
self.stop = time()
raise
except:
self.excinfo = ExceptionInfo()
self.stop = time()
def __repr__(self):
if self.excinfo:
status = "exception: %s" % str(self.excinfo.value)
else:
status = "result: %r" % (self.result,)
return "<CallInfo when=%r %s>" % (self.when, status)
def getslaveinfoline(node):
try:
return node._slaveinfocache
except AttributeError:
d = node.slaveinfo
ver = "%s.%s.%s" % d['version_info'][:3]
node._slaveinfocache = s = "[%s] %s -- Python %s %s" % (
d['id'], d['sysplatform'], ver, d['executable'])
return s
class BaseReport(object):
def __init__(self, **kw):
self.__dict__.update(kw)
def toterminal(self, out):
if hasattr(self, 'node'):
out.line(getslaveinfoline(self.node))
longrepr = self.longrepr
if longrepr is None:
return
if hasattr(longrepr, 'toterminal'):
longrepr.toterminal(out)
else:
try:
out.line(longrepr)
except UnicodeEncodeError:
out.line("<unprintable longrepr>")
def get_sections(self, prefix):
for name, content in self.sections:
if name.startswith(prefix):
yield prefix, content
passed = property(lambda x: x.outcome == "passed")
failed = property(lambda x: x.outcome == "failed")
skipped = property(lambda x: x.outcome == "skipped")
@property
def fspath(self):
return self.nodeid.split("::")[0]
def pytest_runtest_makereport(item, call):
when = call.when
duration = call.stop-call.start
keywords = dict([(x,1) for x in item.keywords])
excinfo = call.excinfo
sections = []
if not call.excinfo:
outcome = "passed"
longrepr = None
else:
if not isinstance(excinfo, ExceptionInfo):
outcome = "failed"
longrepr = excinfo
elif excinfo.errisinstance(pytest.skip.Exception):
outcome = "skipped"
r = excinfo._getreprcrash()
longrepr = (str(r.path), r.lineno, r.message)
else:
outcome = "failed"
if call.when == "call":
longrepr = item.repr_failure(excinfo)
else: # exception in setup or teardown
longrepr = item._repr_failure_py(excinfo,
style=item.config.option.tbstyle)
for rwhen, key, content in item._report_sections:
sections.append(("Captured %s %s" %(key, rwhen), content))
return TestReport(item.nodeid, item.location,
keywords, outcome, longrepr, when,
sections, duration)
class TestReport(BaseReport):
""" Basic test report object (also used for setup and teardown calls if
they fail).
"""
def __init__(self, nodeid, location, keywords, outcome,
longrepr, when, sections=(), duration=0, **extra):
#: normalized collection node id
self.nodeid = nodeid
#: a (filesystempath, lineno, domaininfo) tuple indicating the
#: actual location of a test item - it might be different from the
#: collected one e.g. if a method is inherited from a different module.
self.location = location
#: a name -> value dictionary containing all keywords and
#: markers associated with a test invocation.
self.keywords = keywords
#: test outcome, always one of "passed", "failed", "skipped".
self.outcome = outcome
#: None or a failure representation.
self.longrepr = longrepr
#: one of 'setup', 'call', 'teardown' to indicate runtest phase.
self.when = when
#: list of (secname, data) extra information which needs to
#: marshallable
self.sections = list(sections)
#: time it took to run just the test
self.duration = duration
self.__dict__.update(extra)
def __repr__(self):
return "<TestReport %r when=%r outcome=%r>" % (
self.nodeid, self.when, self.outcome)
class TeardownErrorReport(BaseReport):
outcome = "failed"
when = "teardown"
def __init__(self, longrepr, **extra):
self.longrepr = longrepr
self.sections = []
self.__dict__.update(extra)
def pytest_make_collect_report(collector):
call = CallInfo(collector._memocollect, "memocollect")
longrepr = None
if not call.excinfo:
outcome = "passed"
else:
from _pytest import nose
skip_exceptions = (Skipped,) + nose.get_skip_exceptions()
if call.excinfo.errisinstance(skip_exceptions):
outcome = "skipped"
r = collector._repr_failure_py(call.excinfo, "line").reprcrash
longrepr = (str(r.path), r.lineno, r.message)
else:
outcome = "failed"
errorinfo = collector.repr_failure(call.excinfo)
if not hasattr(errorinfo, "toterminal"):
errorinfo = CollectErrorRepr(errorinfo)
longrepr = errorinfo
rep = CollectReport(collector.nodeid, outcome, longrepr,
getattr(call, 'result', None))
rep.call = call # see collect_one_node
return rep
class CollectReport(BaseReport):
def __init__(self, nodeid, outcome, longrepr, result,
sections=(), **extra):
self.nodeid = nodeid
self.outcome = outcome
self.longrepr = longrepr
self.result = result or []
self.sections = list(sections)
self.__dict__.update(extra)
@property
def location(self):
return (self.fspath, None, self.fspath)
def __repr__(self):
return "<CollectReport %r lenresult=%s outcome=%r>" % (
self.nodeid, len(self.result), self.outcome)
class CollectErrorRepr(TerminalRepr):
def __init__(self, msg):
self.longrepr = msg
def toterminal(self, out):
out.line(self.longrepr, red=True)
class SetupState(object):
""" shared state for setting up/tearing down test items or collectors. """
def __init__(self):
self.stack = []
self._finalizers = {}
def addfinalizer(self, finalizer, colitem):
""" attach a finalizer to the given colitem.
if colitem is None, this will add a finalizer that
is called at the end of teardown_all().
"""
assert colitem and not isinstance(colitem, tuple)
assert py.builtin.callable(finalizer)
#assert colitem in self.stack # some unit tests don't setup stack :/
self._finalizers.setdefault(colitem, []).append(finalizer)
def _pop_and_teardown(self):
colitem = self.stack.pop()
self._teardown_with_finalization(colitem)
def _callfinalizers(self, colitem):
finalizers = self._finalizers.pop(colitem, None)
exc = None
while finalizers:
fin = finalizers.pop()
try:
fin()
except Exception:
# XXX Only first exception will be seen by user,
# ideally all should be reported.
if exc is None:
exc = sys.exc_info()
if exc:
py.builtin._reraise(*exc)
def _teardown_with_finalization(self, colitem):
self._callfinalizers(colitem)
if hasattr(colitem, "teardown"):
colitem.teardown()
for colitem in self._finalizers:
assert colitem is None or colitem in self.stack \
or isinstance(colitem, tuple)
def teardown_all(self):
while self.stack:
self._pop_and_teardown()
for key in list(self._finalizers):
self._teardown_with_finalization(key)
assert not self._finalizers
def teardown_exact(self, item, nextitem):
needed_collectors = nextitem and nextitem.listchain() or []
self._teardown_towards(needed_collectors)
def _teardown_towards(self, needed_collectors):
while self.stack:
if self.stack == needed_collectors[:len(self.stack)]:
break
self._pop_and_teardown()
def prepare(self, colitem):
""" setup objects along the collector chain to the test-method
and teardown previously setup objects."""
needed_collectors = colitem.listchain()
self._teardown_towards(needed_collectors)
# check if the last collection node has raised an error
for col in self.stack:
if hasattr(col, '_prepare_exc'):
py.builtin._reraise(*col._prepare_exc)
for col in needed_collectors[len(self.stack):]:
self.stack.append(col)
try:
col.setup()
except Exception:
col._prepare_exc = sys.exc_info()
raise
def collect_one_node(collector):
ihook = collector.ihook
ihook.pytest_collectstart(collector=collector)
rep = ihook.pytest_make_collect_report(collector=collector)
call = rep.__dict__.pop("call", None)
if call and check_interactive_exception(call, rep):
ihook.pytest_exception_interact(node=collector, call=call, report=rep)
return rep
# =============================================================
# Test OutcomeExceptions and helpers for creating them.
class OutcomeException(Exception):
""" OutcomeException and its subclass instances indicate and
contain info about test and collection outcomes.
"""
def __init__(self, msg=None, pytrace=True):
Exception.__init__(self, msg)
self.msg = msg
self.pytrace = pytrace
def __repr__(self):
if self.msg:
val = self.msg
if isinstance(val, bytes):
val = py._builtin._totext(val, errors='replace')
return val
return "<%s instance>" %(self.__class__.__name__,)
__str__ = __repr__
class Skipped(OutcomeException):
# XXX hackish: on 3k we fake to live in the builtins
# in order to have Skipped exception printing shorter/nicer
__module__ = 'builtins'
class Failed(OutcomeException):
""" raised from an explicit call to pytest.fail() """
__module__ = 'builtins'
class Exit(KeyboardInterrupt):
""" raised for immediate program exits (no tracebacks/summaries)"""
def __init__(self, msg="unknown reason"):
self.msg = msg
KeyboardInterrupt.__init__(self, msg)
# exposed helper methods
def exit(msg):
""" exit testing process as if KeyboardInterrupt was triggered. """
__tracebackhide__ = True
raise Exit(msg)
exit.Exception = Exit
def skip(msg=""):
""" skip an executing test with the given message. Note: it's usually
better to use the pytest.mark.skipif marker to declare a test to be
skipped under certain conditions like mismatching platforms or
dependencies. See the pytest_skipping plugin for details.
"""
__tracebackhide__ = True
raise Skipped(msg=msg)
skip.Exception = Skipped
def fail(msg="", pytrace=True):
""" explicitly fail an currently-executing test with the given Message.
:arg pytrace: if false the msg represents the full failure information
and no python traceback will be reported.
"""
__tracebackhide__ = True
raise Failed(msg=msg, pytrace=pytrace)
fail.Exception = Failed
def importorskip(modname, minversion=None):
""" return imported module if it has at least "minversion" as its
__version__ attribute. If no minversion is specified the a skip
is only triggered if the module can not be imported.
"""
__tracebackhide__ = True
compile(modname, '', 'eval') # to catch syntaxerrors
try:
__import__(modname)
except ImportError:
skip("could not import %r" %(modname,))
mod = sys.modules[modname]
if minversion is None:
return mod
verattr = getattr(mod, '__version__', None)
if minversion is not None:
try:
from pkg_resources import parse_version as pv
except ImportError:
skip("we have a required version for %r but can not import "
"no pkg_resources to parse version strings." %(modname,))
if verattr is None or pv(verattr) < pv(minversion):
skip("module %r has __version__ %r, required is: %r" %(
modname, verattr, minversion))
return mod
|
infobloxopen/neutron
|
refs/heads/master
|
neutron/plugins/ml2/drivers/arista/config.py
|
21
|
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
# Arista ML2 Mechanism driver specific configuration knobs.
#
# Following are user configurable options for Arista ML2 Mechanism
# driver. The eapi_username, eapi_password, and eapi_host are
# required options. Region Name must be the same that is used by
# Keystone service. This option is available to support multiple
# OpenStack/Neutron controllers.
ARISTA_DRIVER_OPTS = [
cfg.StrOpt('eapi_username',
default='',
help=_('Username for Arista EOS. This is required field. '
'If not set, all communications to Arista EOS '
'will fail.')),
cfg.StrOpt('eapi_password',
default='',
secret=True, # do not expose value in the logs
help=_('Password for Arista EOS. This is required field. '
'If not set, all communications to Arista EOS '
'will fail.')),
cfg.StrOpt('eapi_host',
default='',
help=_('Arista EOS IP address. This is required field. '
'If not set, all communications to Arista EOS '
'will fail.')),
cfg.BoolOpt('use_fqdn',
default=True,
help=_('Defines if hostnames are sent to Arista EOS as FQDNs '
'("node1.domain.com") or as short names ("node1"). '
'This is optional. If not set, a value of "True" '
'is assumed.')),
cfg.IntOpt('sync_interval',
default=180,
help=_('Sync interval in seconds between Neutron plugin and '
'EOS. This interval defines how often the '
'synchronization is performed. This is an optional '
'field. If not set, a value of 180 seconds is '
'assumed.')),
cfg.StrOpt('region_name',
default='RegionOne',
help=_('Defines Region Name that is assigned to this OpenStack '
'Controller. This is useful when multiple '
'OpenStack/Neutron controllers are managing the same '
'Arista HW clusters. Note that this name must match '
'with the region name registered (or known) to keystone '
'service. Authentication with Keysotne is performed by '
'EOS. This is optional. If not set, a value of '
'"RegionOne" is assumed.'))
]
""" Arista L3 Service Plugin specific configuration knobs.
Following are user configurable options for Arista L3 plugin
driver. The eapi_username, eapi_password, and eapi_host are
required options.
"""
ARISTA_L3_PLUGIN = [
cfg.StrOpt('primary_l3_host_username',
default='',
help=_('Username for Arista EOS. This is required field. '
'If not set, all communications to Arista EOS '
'will fail')),
cfg.StrOpt('primary_l3_host_password',
default='',
secret=True, # do not expose value in the logs
help=_('Password for Arista EOS. This is required field. '
'If not set, all communications to Arista EOS '
'will fail')),
cfg.StrOpt('primary_l3_host',
default='',
help=_('Arista EOS IP address. This is required field. '
'If not set, all communications to Arista EOS '
'will fail')),
cfg.StrOpt('secondary_l3_host',
default='',
help=_('Arista EOS IP address for second Switch MLAGed with '
'the first one. This an optional field, however, if '
'mlag_config flag is set, then this is required. '
'If not set, all communications to Arista EOS '
'will fail')),
cfg.BoolOpt('mlag_config',
default=False,
help=_('This flag is used indicate if Arista Switches are '
'configured in MLAG mode. If yes, all L3 config '
'is pushed to both the switches automatically. '
'If this flag is set to True, ensure to specify IP '
'addresses of both switches. '
'This is optional. If not set, a value of "False" '
'is assumed.')),
cfg.BoolOpt('use_vrf',
default=False,
help=_('A "True" value for this flag indicates to create a '
'router in VRF. If not set, all routers are created '
'in default VRF. '
'This is optional. If not set, a value of "False" '
'is assumed.')),
cfg.IntOpt('l3_sync_interval',
default=180,
help=_('Sync interval in seconds between L3 Service plugin '
'and EOS. This interval defines how often the '
'synchronization is performed. This is an optional '
'field. If not set, a value of 180 seconds is assumed'))
]
cfg.CONF.register_opts(ARISTA_L3_PLUGIN, "l3_arista")
cfg.CONF.register_opts(ARISTA_DRIVER_OPTS, "ml2_arista")
|
googleapis/python-service-usage
|
refs/heads/master
|
google/cloud/service_usage_v1/types/serviceusage.py
|
1
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.service_usage_v1.types import resources
__protobuf__ = proto.module(
package="google.api.serviceusage.v1",
manifest={
"EnableServiceRequest",
"EnableServiceResponse",
"DisableServiceRequest",
"DisableServiceResponse",
"GetServiceRequest",
"ListServicesRequest",
"ListServicesResponse",
"BatchEnableServicesRequest",
"BatchEnableServicesResponse",
"BatchGetServicesRequest",
"BatchGetServicesResponse",
},
)
class EnableServiceRequest(proto.Message):
r"""Request message for the ``EnableService`` method.
Attributes:
name (str):
Name of the consumer and service to enable the service on.
The ``EnableService`` and ``DisableService`` methods
currently only support projects.
Enabling a service requires that the service is public or is
shared with the user enabling the service.
An example name would be:
``projects/123/services/serviceusage.googleapis.com`` where
``123`` is the project number.
"""
name = proto.Field(proto.STRING, number=1,)
class EnableServiceResponse(proto.Message):
r"""Response message for the ``EnableService`` method. This response
message is assigned to the ``response`` field of the returned
Operation when that operation is done.
Attributes:
service (google.cloud.service_usage_v1.types.Service):
The new state of the service after enabling.
"""
service = proto.Field(proto.MESSAGE, number=1, message=resources.Service,)
class DisableServiceRequest(proto.Message):
r"""Request message for the ``DisableService`` method.
Attributes:
name (str):
Name of the consumer and service to disable the service on.
The enable and disable methods currently only support
projects.
An example name would be:
``projects/123/services/serviceusage.googleapis.com`` where
``123`` is the project number.
disable_dependent_services (bool):
Indicates if services that are enabled and
which depend on this service should also be
disabled. If not set, an error will be generated
if any enabled services depend on the service to
be disabled. When set, the service, and any
enabled services that depend on it, will be
disabled together.
check_if_service_has_usage (google.cloud.service_usage_v1.types.DisableServiceRequest.CheckIfServiceHasUsage):
Defines the behavior for checking service
usage when disabling a service.
"""
class CheckIfServiceHasUsage(proto.Enum):
r"""Enum to determine if service usage should be checked when
disabling a service.
"""
CHECK_IF_SERVICE_HAS_USAGE_UNSPECIFIED = 0
SKIP = 1
CHECK = 2
name = proto.Field(proto.STRING, number=1,)
disable_dependent_services = proto.Field(proto.BOOL, number=2,)
check_if_service_has_usage = proto.Field(
proto.ENUM, number=3, enum=CheckIfServiceHasUsage,
)
class DisableServiceResponse(proto.Message):
r"""Response message for the ``DisableService`` method. This response
message is assigned to the ``response`` field of the returned
Operation when that operation is done.
Attributes:
service (google.cloud.service_usage_v1.types.Service):
The new state of the service after disabling.
"""
service = proto.Field(proto.MESSAGE, number=1, message=resources.Service,)
class GetServiceRequest(proto.Message):
r"""Request message for the ``GetService`` method.
Attributes:
name (str):
Name of the consumer and service to get the
``ConsumerState`` for.
An example name would be:
``projects/123/services/serviceusage.googleapis.com`` where
``123`` is the project number.
"""
name = proto.Field(proto.STRING, number=1,)
class ListServicesRequest(proto.Message):
r"""Request message for the ``ListServices`` method.
Attributes:
parent (str):
Parent to search for services on.
An example name would be: ``projects/123`` where ``123`` is
the project number.
page_size (int):
Requested size of the next page of data.
Requested page size cannot exceed 200.
If not set, the default page size is 50.
page_token (str):
Token identifying which result to start with,
which is returned by a previous list call.
filter (str):
Only list services that conform to the given filter. The
allowed filter strings are ``state:ENABLED`` and
``state:DISABLED``.
"""
parent = proto.Field(proto.STRING, number=1,)
page_size = proto.Field(proto.INT32, number=2,)
page_token = proto.Field(proto.STRING, number=3,)
filter = proto.Field(proto.STRING, number=4,)
class ListServicesResponse(proto.Message):
r"""Response message for the ``ListServices`` method.
Attributes:
services (Sequence[google.cloud.service_usage_v1.types.Service]):
The available services for the requested
project.
next_page_token (str):
Token that can be passed to ``ListServices`` to resume a
paginated query.
"""
@property
def raw_page(self):
return self
services = proto.RepeatedField(proto.MESSAGE, number=1, message=resources.Service,)
next_page_token = proto.Field(proto.STRING, number=2,)
class BatchEnableServicesRequest(proto.Message):
r"""Request message for the ``BatchEnableServices`` method.
Attributes:
parent (str):
Parent to enable services on.
An example name would be: ``projects/123`` where ``123`` is
the project number.
The ``BatchEnableServices`` method currently only supports
projects.
service_ids (Sequence[str]):
The identifiers of the services to enable on
the project.
A valid identifier would be:
serviceusage.googleapis.com
Enabling services requires that each service is
public or is shared with the user enabling the
service.
A single request can enable a maximum of 20
services at a time. If more than 20 services are
specified, the request will fail, and no state
changes will occur.
"""
parent = proto.Field(proto.STRING, number=1,)
service_ids = proto.RepeatedField(proto.STRING, number=2,)
class BatchEnableServicesResponse(proto.Message):
r"""Response message for the ``BatchEnableServices`` method. This
response message is assigned to the ``response`` field of the
returned Operation when that operation is done.
Attributes:
services (Sequence[google.cloud.service_usage_v1.types.Service]):
The new state of the services after enabling.
failures (Sequence[google.cloud.service_usage_v1.types.BatchEnableServicesResponse.EnableFailure]):
If allow_partial_success is true, and one or more services
could not be enabled, this field contains the details about
each failure.
"""
class EnableFailure(proto.Message):
r"""Provides error messages for the failing services.
Attributes:
service_id (str):
The service id of a service that could not be
enabled.
error_message (str):
An error message describing why the service
could not be enabled.
"""
service_id = proto.Field(proto.STRING, number=1,)
error_message = proto.Field(proto.STRING, number=2,)
services = proto.RepeatedField(proto.MESSAGE, number=1, message=resources.Service,)
failures = proto.RepeatedField(proto.MESSAGE, number=2, message=EnableFailure,)
class BatchGetServicesRequest(proto.Message):
r"""Request message for the ``BatchGetServices`` method.
Attributes:
parent (str):
Parent to retrieve services from. If this is set, the parent
of all of the services specified in ``names`` must match
this field. An example name would be: ``projects/123`` where
``123`` is the project number. The ``BatchGetServices``
method currently only supports projects.
names (Sequence[str]):
Names of the services to retrieve.
An example name would be:
``projects/123/services/serviceusage.googleapis.com`` where
``123`` is the project number. A single request can get a
maximum of 30 services at a time.
"""
parent = proto.Field(proto.STRING, number=1,)
names = proto.RepeatedField(proto.STRING, number=2,)
class BatchGetServicesResponse(proto.Message):
r"""Response message for the ``BatchGetServices`` method.
Attributes:
services (Sequence[google.cloud.service_usage_v1.types.Service]):
The requested Service states.
"""
services = proto.RepeatedField(proto.MESSAGE, number=1, message=resources.Service,)
__all__ = tuple(sorted(__protobuf__.manifest))
|
tudorian/eden
|
refs/heads/master
|
modules/s3_update_check.py
|
12
|
# -*- coding: utf-8 -*-
import os
import sys
try:
from gluon import current
except ImportError:
print >> sys.stderr, """
The installed version of Web2py is too old -- it does not define current.
Please upgrade Web2py to a more recent version.
"""
# Version of 000_config.py
# Increment this if the user should update their running instance
VERSION = 1
#def update_check(environment, template="default"):
def update_check(settings):
"""
Check whether the dependencies are sufficient to run Eden
@ToDo: Load deployment_settings so that we can configure the update_check
- need to rework so that 000_config.py is parsed 1st
@param settings: the deployment_settings
"""
# Get Web2py environment into our globals.
#globals().update(**environment)
request = current.request
# Fatal errors
errors = []
# Non-fatal warnings
warnings = []
# -------------------------------------------------------------------------
# Check Python libraries
# Get mandatory global dependencies
app_path = request.folder
template = settings.get_template()
location = settings.get_template_location()
gr_path = os.path.join(app_path, "requirements.txt")
tr_path = os.path.join(app_path, location, "templates", template, "requirements.txt")
or_path = os.path.join(app_path, "optional_requirements.txt")
tor_path = os.path.join(app_path, location, "templates", template, "optional_requirements.txt")
global_dep = parse_requirements(gr_path)
template_dep = parse_requirements(tr_path)
optional_dep = parse_requirements(or_path)
template_optional_dep = parse_requirements(tor_path)
# remove optional dependencies which are already accounted for in template dependencies
unique = set(optional_dep.keys()).difference(set(template_dep.keys()))
for dependency in optional_dep.keys():
if dependency not in unique:
del optional_dep[dependency]
# override optional dependency messages from template
unique = set(optional_dep.keys()).difference(set(template_optional_dep.keys()))
for dependency in optional_dep.keys():
if dependency not in unique:
del optional_dep[dependency]
errors, warnings = s3_check_python_lib(global_dep, template_dep, template_optional_dep, optional_dep)
# @ToDo: Move these to Template
# for now this is done in s3db.climate_first_run()
if settings.has_module("climate"):
if settings.get_database_type() != "postgres":
errors.append("Climate unresolved dependency: PostgreSQL required")
try:
import rpy2
except ImportError:
errors.append("Climate unresolved dependency: RPy2 required")
try:
from Scientific.IO import NetCDF
except ImportError:
warnings.append("Climate unresolved dependency: NetCDF required if you want to import readings")
try:
from scipy import stats
except ImportError:
warnings.append("Climate unresolved dependency: SciPy required if you want to generate graphs on the map")
# -------------------------------------------------------------------------
# Check Web2Py version
#
# Currently, the minimum usable Web2py is determined by whether the
# Scheduler is available
web2py_minimum_version = "Version 2.4.7-stable+timestamp.2013.05.27.11.49.44"
# Offset of datetime in return value of parse_version.
datetime_index = 4
web2py_version_ok = True
try:
from gluon.fileutils import parse_version
except ImportError:
web2py_version_ok = False
if web2py_version_ok:
try:
web2py_minimum_parsed = parse_version(web2py_minimum_version)
web2py_minimum_datetime = web2py_minimum_parsed[datetime_index]
web2py_installed_version = request.global_settings.web2py_version
if isinstance(web2py_installed_version, str):
# Post 2.4.2, request.global_settings.web2py_version is unparsed
web2py_installed_parsed = parse_version(web2py_installed_version)
web2py_installed_datetime = web2py_installed_parsed[datetime_index]
else:
# 2.4.2 & earlier style
web2py_installed_datetime = web2py_installed_version[datetime_index]
web2py_version_ok = web2py_installed_datetime >= web2py_minimum_datetime
except:
# Will get AttributeError if Web2py's parse_version is too old for
# its current version format, which changed in 2.3.2.
web2py_version_ok = False
if not web2py_version_ok:
warnings.append(
"The installed version of Web2py is too old to support the current version of Sahana Eden."
"\nPlease upgrade Web2py to at least version: %s" % \
web2py_minimum_version)
# -------------------------------------------------------------------------
# Create required directories if needed
databases_dir = os.path.join(app_path, "databases")
try:
os.stat(databases_dir)
except OSError:
# not found, create it
os.mkdir(databases_dir)
# -------------------------------------------------------------------------
# Copy in Templates
# - 000_config.py (machine-specific settings)
# - rest are run in-place
#
template_folder = os.path.join(app_path, "modules", "templates")
template_files = {
# source : destination
"000_config.py" : os.path.join("models", "000_config.py"),
}
copied_from_template = []
for t in template_files:
src_path = os.path.join(template_folder, t)
dst_path = os.path.join(app_path, template_files[t])
try:
os.stat(dst_path)
except OSError:
# Not found, copy from template
if t == "000_config.py":
input = open(src_path)
output = open(dst_path, "w")
for line in input:
if "akeytochange" in line:
# Generate a random hmac_key to secure the passwords in case
# the database is compromised
import uuid
hmac_key = uuid.uuid4()
line = 'settings.auth.hmac_key = "%s"' % hmac_key
output.write(line)
output.close()
input.close()
else:
import shutil
shutil.copy(src_path, dst_path)
copied_from_template.append(template_files[t])
# @ToDo: WebSetup
# http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/WebSetup
#if not os.path.exists("%s/applications/websetup" % os.getcwd()):
# # @ToDo: Check Permissions
# # Copy files into this folder (@ToDo: Pythonise)
# cp -r private/websetup "%s/applications" % os.getcwd()
# Launch WebSetup
#redirect(URL(a="websetup", c="default", f="index",
# vars=dict(appname=request.application,
# firstTime="True")))
else:
# Found the file in the destination
# Check if it has been edited
import re
edited_pattern = r"FINISHED_EDITING_\w*\s*=\s*(True|False)"
edited_matcher = re.compile(edited_pattern).match
has_edited = False
with open(dst_path) as f:
for line in f:
edited_result = edited_matcher(line)
if edited_result:
has_edited = True
edited = edited_result.group(1)
break
if has_edited and (edited != "True"):
errors.append("Please edit %s before starting the system." % t)
# Check if it's up to date (i.e. a critical update requirement)
version_pattern = r"VERSION =\s*([0-9]+)"
version_matcher = re.compile(version_pattern).match
has_version = False
with open(dst_path) as f:
for line in f:
version_result = version_matcher(line)
if version_result:
has_version = True
version = version_result.group(1)
break
if not has_version:
error = "Your %s is using settings from the old templates system. Please switch to the new templates system: http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/Templates" % t
errors.append(error)
elif int(version) != VERSION:
error = "Your %s is using settings from template version %s. Please update with new settings from template version %s before starting the system." % \
(t, version, VERSION)
errors.append(error)
if copied_from_template:
errors.append(
"The following files were copied from templates and should be edited: %s" %
", ".join(copied_from_template))
return {"error_messages": errors, "warning_messages": warnings}
# -------------------------------------------------------------------------
def parse_requirements(filepath):
"""
"""
output = {}
try:
with open(filepath) as filehandle:
dependencies = filehandle.read().splitlines()
msg = ""
for dependency in dependencies:
if dependency[0] == "#":
# either a normal comment or custom message
if dependency[:9] == "# Warning" or dependency[7] == "# Error:":
msg = dependency.split(":", 1)[1]
else:
import re
# Check if the module name is different from the package name
if "#" in dependency:
dep = dependency.split("#", 1)[1]
output[dep] = msg
else:
pattern = re.compile(r'([A-Za-z0-9_-]+)')
try:
dep = pattern.match(dependency).group(1)
output[dep] = msg
except AttributeError:
# Invalid dependency syntax
pass
msg = ""
except IOError:
# No override for Template
pass
return output
# -------------------------------------------------------------------------
def s3_check_python_lib(global_mandatory, template_mandatory, template_optional, global_optional):
"""
checks for optional as well as mandatory python libraries
"""
errors = []
warnings = []
for dependency, err in global_mandatory.iteritems():
try:
if "from" in dependency:
exec dependency
else:
exec "import %s" % dependency
except ImportError:
if err:
errors.append(err)
else:
errors.append("S3 unresolved dependency: %s required for Sahana to run" % dependency)
for dependency, err in template_mandatory.iteritems():
try:
if "from" in dependency:
exec dependency
else:
exec "import %s" % dependency
except ImportError:
if err:
errors.append(err)
else:
errors.append("Unresolved template dependency: %s required" % dependency)
for dependency, warn in template_optional.iteritems():
try:
if "from" in dependency:
exec dependency
else:
exec "import %s" % dependency
except ImportError:
if warn:
warnings.append(warn)
else:
warnings.append("Unresolved optional dependency: %s required" % dependency)
for dependency, warn in global_optional.iteritems():
try:
if "from" in dependency:
exec dependency
else:
exec "import %s" % dependency
except ImportError:
if warn:
warnings.append(warn)
else:
warnings.append("Unresolved optional dependency: %s required" % dependency)
return errors, warnings
# END =========================================================================
|
makerbot/conveyor
|
refs/heads/master
|
src/main/python/conveyor/jsonrpc.py
|
1
|
# vim:ai:et:ff=unix:fileencoding=utf-8:sw=4:ts=4:
# conveyor/src/main/python/conveyor/jsonrpc.py
#
# conveyor - Printing dispatch engine for 3D objects and their friends.
# Copyright © 2012 Matthew W. Samsonoff <matthew.samsonoff@makerbot.com>
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, print_function, unicode_literals)
import StringIO
import codecs
import errno
import json
import logging
import inspect
import io
import os
import sys
import threading
import conveyor.event
import conveyor.json
import conveyor.log
import conveyor.stoppable
import conveyor.task
def install(jsonrpc, obj):
for name, value in inspect.getmembers(obj):
if inspect.ismethod(value) and getattr(value, '_jsonrpc', False):
exported_name = getattr(value, '_jsonrpc_name', None)
if None is exported_name:
exported_name = name
jsonrpc.addmethod(exported_name, value)
class JsonRpcException(Exception):
def __init__(self, code, message, data):
Exception.__init__(self, code, message)
self.code = code
self.message = message
self.data = data
class JsonRpc(conveyor.stoppable.StoppableInterface):
""" JsonRpc handles a json stream, to gaurentee the output file pointer
gets entire valid JSON blocks of data to process, by buffering up data
into complete blocks and only passing on entirer JSON blocks
"""
def __init__(self, infp, outfp):
"""
@param infp input file pointer must have .read() and .stop()
@param outfp output file pointer. must have .write()
"""
self._condition = threading.Condition()
self._idcounter = 0
self._infp = infp # contract: .read(), .stop(), .close()
self._jsonreader = conveyor.json.JsonReader(
self._jsonreadercallback, False)
self._log = conveyor.log.getlogger(self)
self._methods = {}
self._methodsinfo={}
self._outfp = outfp # contract: .write(str), .close()
self._stopped = False
self._tasks = {}
reader_class = codecs.getreader('UTF-8')
self._infp_reader = reader_class(self._infp)
writer_class = codecs.getwriter('UTF-8')
self._outfp_writer = writer_class(self._outfp)
#
# Common part
#
def _jsonreadercallback(self, indata):
self._log.debug('indata=%r', indata)
try:
parsed = json.loads(indata)
except ValueError:
response = self._parseerror()
else:
if isinstance(parsed, dict):
response = self._handleobject(parsed)
elif isinstance(parsed, list):
response = self._handlearray(parsed)
else:
response = self._invalidrequest(None)
self._log.debug('response=%r', response)
if None is not response:
outdata = conveyor.json.dumps(response)
self._send(outdata)
def _handleobject(self, parsed):
if not isinstance(parsed, dict):
response = self._invalidrequest(None)
else:
id = parsed.get('id')
if self._isrequest(parsed):
response = self._handlerequest(parsed, id)
elif self._isresponse(parsed):
response = None
self._handleresponse(parsed, id)
else:
response = self._invalidrequest(id)
return response
def _handlearray(self, parsed):
if 0 == len(parsed):
response = self._invalidrequest(None)
else:
response = []
for subparsed in parsed:
subresponse = self._handleobject(subparsed)
if None is not subresponse:
response.append(subresponse)
if 0 == len(response):
response = None
return response
def _isrequest(self, parsed):
result = (
'jsonrpc' in parsed
and '2.0' == parsed['jsonrpc']
and 'method' in parsed
and isinstance(parsed['method'], basestring))
return result
def _isresponse(self, parsed):
result = (self._issuccessresponse(parsed)
or self._iserrorresponse(parsed))
return result
def _issuccessresponse(self, parsed):
result = (
'jsonrpc' in parsed and '2.0' == parsed['jsonrpc']
and 'result' in parsed)
return result
def _iserrorresponse(self, parsed):
result = (
'jsonrpc' in parsed and '2.0' == parsed['jsonrpc']
and 'error' in parsed)
return result
def _successresponse(self, id, result):
response = {'jsonrpc': '2.0', 'result': result, 'id': id}
return response
def _errorresponse(self, id, code, message, data=None):
error = {'code': code, 'message': message}
if None is not data:
error['data'] = data
response = {'jsonrpc': '2.0', 'error': error, 'id': id}
return response
def _parseerror(self):
response = self._errorresponse(None, -32700, 'parse error')
return response
def _invalidrequest(self, id):
response = self._errorresponse(id, -32600, 'invalid request')
return response
def _methodnotfound(self, id):
response = self._errorresponse(id, -32601, 'method not found')
return response
def _invalidparams(self, id):
response = self._errorresponse(id, -32602, 'invalid params')
return response
def _send(self, data):
self._log.debug('data=%r', data)
self._outfp_writer.write(data)
def run(self):
""" This loop will run until self._stopped is set true."""
self._log.debug('starting')
while True:
with self._condition:
stopped = self._stopped
if self._stopped:
break
else:
data = self._infp_reader.read()
if 0 == len(data):
break
else:
self._jsonreader.feed(data)
self._jsonreader.feedeof()
self._log.debug('ending')
self.close()
def stop(self):
""" required as a stoppable object. """
with self._condition:
self._stopped = True
self._infp.stop()
def close(self):
try:
self._infp_reader.close()
except:
self._log.debug('handled exception', exc_info=True)
try:
self._outfp_writer.close()
except:
self._log.debug('handled exception', exc_info=True)
#
# Client part
#
def _handleresponse(self, response, id):
self._log.debug('response=%r, id=%r', response, id)
task = self._tasks.pop(id, None)
if None is task:
self._log.debug('ignoring response for unknown id: %r', id)
elif self._iserrorresponse(response):
error = response['error']
task.fail(error)
elif self._issuccessresponse(response):
result = response['result']
task.end(result)
else:
raise ValueError(response)
def notify(self, method, params):
self._log.debug('method=%r, params=%r', method, params)
request = {'jsonrpc': '2.0', 'method': method, 'params': params}
data = conveyor.json.dumps(request)
self._send(data)
def request(self, method, params):
""" Builds a jsonrpc request task.
@param method: json rpc method to run as a task
@param params: params for method
@return a Task object with methods setup properly
"""
with self._condition:
id = self._idcounter
self._idcounter += 1
self._log.debug('method=%r, params=%r, id=%r', method, params, id)
def runningevent(task):
request = {
'jsonrpc': '2.0', 'method': method, 'params': params, 'id': id}
data = conveyor.json.dumps(request)
self._send(data)
def stoppedevent(task):
if id in self._tasks.keys():
del self._tasks[id]
else:
self._log.debug('stoppeevent fail for id=%r', id)
task = conveyor.task.Task()
task.runningevent.attach(runningevent)
task.stoppedevent.attach(stoppedevent)
self._tasks[id] = task
return task
#
# Server part
#
def _handlerequest(self, request, id):
self._log.debug('request=%r, id=%r', request, id)
method = request['method']
if method in self._methods:
func = self._methods[method]
if 'params' not in request:
response = self._invokemethod(id, func, (), {})
else:
params = request['params']
if isinstance(params, dict):
response = self._invokemethod(id, func, (), params)
elif isinstance(params, list):
response = self._invokemethod(id, func, params, {})
else:
response = self._invalidparams(id)
else:
response = self._methodnotfound(id)
return response
def _fixkwargs(self, kwargs):
kwargs1 = {}
for k, v in kwargs.items():
k = str(k)
kwargs1[k] = v
return kwargs1
def _invokemethod(self, id, func, args, kwargs):
self._log.debug(
'id=%r, func=%r, args=%r, kwargs=%r', id, func, args, kwargs)
response = None
kwargs = self._fixkwargs(kwargs)
try:
result = func(*args, **kwargs)
except TypeError as e:
self._log.warning('handled exception', exc_info=True)
if None is not id:
response = self._invalidparams(id)
except JsonRpcException as e:
self._log.warning('handled exception', exc_info=True)
if None is not id:
response = self._errorresponse(id, e.code, e.message, e.data)
except Exception as e:
self._log.warning('uncaught exception', exc_info=True)
if None is not id:
e = sys.exc_info()[1]
data = {'name': e.__class__.__name__, 'args': e.args}
response = self._errorresponse(
id, -32000, 'uncaught exception', data)
else:
if not isinstance(result, conveyor.task.Task):
if None is not id:
response = self._successresponse(id, result)
else:
task = result
def stoppedcallback(task):
if conveyor.task.TaskConclusion.ENDED == task.conclusion:
response = self._successresponse(id, task.result)
elif conveyor.task.TaskConclusion.FAILED == task.conclusion:
response = self._errorresponse(id, -32001, 'task failed', task.failure)
elif conveyor.task.TaskConclusion.CANCELED == task.conclusion:
response = self._errorresponse(id, -32002, 'task canceled', None)
else:
raise ValueError(task.conclusion)
outdata = conveyor.json.dumps(response)
self._send(outdata)
task.stoppedevent.attach(stoppedcallback)
task.start()
self._log.debug('response=%r', response)
return response
def addmethod(self, method, func):
self._log.debug('method=%r, func=%r', method, func)
self._methods[method] = func
def getmethods(self):
return self._methods
|
ferrisvienna/Disco_Defense
|
refs/heads/master
|
discodefense.py
|
1
|
#004BB1#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Disco Defense
Open source game by Ferris(FerrisofVienna) Bartak
and Paolo "Broccolimaniac" Perfahl
using python3 and pygame
"""
#the next line is only needed for python2.x and not necessary for python3.x
from __future__ import print_function, division
import random
import pygame
import time as t
class Game(object):
LIVES = 20
FORCE_OF_GRAVITY = 3
ACTORSPEEDMAX = 20
ACTORSPEEDMIN = 10
DISCTHROWERRANGE = 150
DISCMAXSPEED = 100
SPAWNRATE = 0.005
SECURITYSPAWNRATE = 0.005
SPAWNRATE2 = 0.005
XP = 0
COINS = 50
ACTOR_REGEN = 0.5
ACTOR_ATKDMG = 10
ACTOR_DEF = 5
ACTOR_SPEED = 3
ACTOR_KB = 10
ACTOR_LVL = 1
#rebalance
def __init__(self):
Monster.images.append(pygame.image.load("data/discodudel.png")) # 0 normal1
Monster.images[0].set_colorkey((255,0,182))
Monster.images.append(pygame.image.load("data/discodudel4.png")) # 1 normal2
Monster.images[1].set_colorkey((255,0,182))
Monster.images.append(pygame.image.load("data/discodudel.png")) # 2 normal3
Monster.images[2].set_colorkey((255,0,182))
Monster.images.append(pygame.image.load("data/discodudel2.png")) # 3 fight1
Monster.images[3].set_colorkey((255,0,182))
Monster.images.append(pygame.image.load("data/discodudel3.png")) # 4 fight2
Monster.images[4].set_colorkey((255,0,182))
Monster.images.append(pygame.image.load("data/discodudel2.png")) # 5 fight3
Monster.images[5].set_colorkey((255,0,182))
Monster.images[0].convert_alpha()
Monster.images[1].convert_alpha()
Monster.images[2].convert_alpha()
Monster.images[3].convert_alpha()
Monster.images[4].convert_alpha()
Monster.images[5].convert_alpha()
Monster.images.append(pygame.image.load("data/rockdudel.png")) # 6 normal1
Monster.images[6].set_colorkey((255,0,182))
Monster.images.append(pygame.image.load("data/rockdudel1.png")) # 7 normal2
Monster.images[7].set_colorkey((255,0,182))
Monster.images.append(pygame.image.load("data/rockdudel.png")) # 8 normal3
Monster.images[8].set_colorkey((255,0,182))
Monster.images.append(pygame.image.load("data/rockdudel1.png")) # 9 fight1
Monster.images[9].set_colorkey((255,0,182))
Monster.images.append(pygame.image.load("data/rockdudel.png")) # 10 fitht2
Monster.images[10].set_colorkey((255,0,182))
Monster.images.append(pygame.image.load("data/rockdudel1.png")) # 11 figth3
Monster.images[11].set_colorkey((255,0,182))
Monster.images[6].convert_alpha()
Monster.images[7].convert_alpha()
Monster.images[8].convert_alpha()
Monster.images[9].convert_alpha()
Monster.images[10].convert_alpha()
Monster.images[11].convert_alpha()
Security.images.append(pygame.image.load("data/securityw1.png")) # 0
Security.images[0].set_colorkey((255,0,182))
Security.images.append(pygame.image.load("data/securityw2.png")) # 1
Security.images[1].set_colorkey((255,0,182))
Security.images.append(pygame.image.load("data/securityw1.png")) # 2
Security.images[2].set_colorkey((255,0,182))
Security.images.append(pygame.image.load("data/securityw2.png")) # 3
Security.images[3].set_colorkey((255,0,182))
Security.images.append(pygame.image.load("data/securityw1.png")) # 4
Security.images[4].set_colorkey((255,0,182))
Security.images.append(pygame.image.load("data/securityw2.png")) # 5
Security.images[5].set_colorkey((255,0,182))
Security.images.append(pygame.image.load("data/securityw2.png")) # 5
Security.images[6].set_colorkey((255,0,182))
Security.images.append(pygame.image.load("data/securitywa1.png")) #6
Security.images[7].set_colorkey((255,0,182))
Security.images.append(pygame.image.load("data/securitywa2.png")) #7
Security.images[8].set_colorkey((255,0,182))
Security.images[0].convert_alpha()
Security.images[1].convert_alpha()
Security.images[2].convert_alpha()
Security.images[3].convert_alpha()
Security.images[4].convert_alpha()
Security.images[5].convert_alpha()
Security.images[6].convert_alpha()
Security.images[7].convert_alpha()
self.h= [pygame.image.load("data/h0.png"),
pygame.image.load("data/h1.png"),
pygame.image.load("data/h2.png"),
pygame.image.load("data/h3.png"),
pygame.image.load("data/h4.png"),
pygame.image.load("data/h5.png")]
self.h[0].set_colorkey((255,0,182))
self.h[1].set_colorkey((255,0,182))
self.h[2].set_colorkey((255,0,182))
self.h[3].set_colorkey((255,0,182))
self.h[4].set_colorkey((255,0,182))
self.h[5].set_colorkey((255,0,182))
self.p= pygame.image.load("data/p.png")
self.p.set_colorkey((255,0,182))
self.e= pygame.image.load("data/protect.png")
self.p.set_colorkey((255,0,182))
self.i= [pygame.image.load("data/i0.png"),
pygame.image.load("data/i1.png"),
pygame.image.load("data/i2.png"),
pygame.image.load("data/i3.png"),
pygame.image.load("data/i4.png"),
pygame.image.load("data/i5.png")]
self.i[1].set_colorkey((255,0,182))
self.i[2].set_colorkey((255,0,182))
self.i[3].set_colorkey((255,0,182))
self.i[4].set_colorkey((255,0,182))
self.i[5].set_colorkey((255,0,182))
self.i[0].set_colorkey((255,0,182))
self.d= [pygame.image.load("data/d0.png"),
pygame.image.load("data/d1.png"),
pygame.image.load("data/d2.png"),
pygame.image.load("data/d3.png"),
pygame.image.load("data/d4.png"),
pygame.image.load("data/d5.png")]
self.g= [pygame.image.load("data/g0.png"),
pygame.image.load("data/g1.png"),
pygame.image.load("data/g2.png"),
pygame.image.load("data/g3.png"),
pygame.image.load("data/g4.png"),
pygame.image.load("data/g5.png")]
self.v= [pygame.image.load("data/discodiscgunf.png"),
pygame.image.load("data/discodiscgunl.png"),
pygame.image.load("data/discodiscgunb.png"),
pygame.image.load("data/discodiscgunr.png"),
pygame.image.load("data/discodiscgunr.png"),
pygame.image.load("data/discodiscgunr.png")]
self.k= [pygame.image.load("data/konfettif.png"),
pygame.image.load("data/konfettir.png"),
pygame.image.load("data/konfettib.png"),
pygame.image.load("data/konfettil.png"),
pygame.image.load("data/konfettil.png"),
pygame.image.load("data/konfettil.png")]
self.w= [pygame.image.load("data/discogunf.png"),
pygame.image.load("data/discogunr.png"),
pygame.image.load("data/discogunb.png"),
pygame.image.load("data/discogunl.png"),
pygame.image.load("data/discogunl.png"),
pygame.image.load("data/discogunl.png")]
self.w[1].set_colorkey((255,0,182))
self.w[2].set_colorkey((255,0,182))
self.w[3].set_colorkey((255,0,182))
self.w[4].set_colorkey((255,0,182))
self.w[5].set_colorkey((255,0,182))
self.w[0].set_colorkey((255,0,182))
self.anim=0
self.o= [pygame.image.load("data/discoball.png"),
pygame.image.load("data/discoball2.png")]
self.o[0].set_colorkey((255,0,182))
self.o[1].set_colorkey((255,0,182))
self.anim=0
self.level=["ppppppppppppppppppppp",
"ppppppppppppppppppppp",
"dddddddddddddddddddde",
"dddddddddddddddddddde",
"dddddddddddddddddddde",
"dddddddddddddddddddde",
"dddddddddddddddddddde",
"dddddddddddddddddddde"]
anim = 0
self.legende={"h":self.h[anim],#towertop
"p":self.p,#nothing
"i":self.i[anim],#dirt
"g":self.g[anim],#lava
"d":self.d[anim], #grass
"v":self.v[anim], #discodiscgun
"w":self.w[anim], #discogun
"k":self.k[anim], #konfettigun
"e":self.e, #end of world
"o":self.o[anim] #discoball
}
#def update(self,seconds):
#neededcoins = self.ACTOR_LVL * 20 +100
class Fragment(pygame.sprite.Sprite):
"""a fragment of an exploding Bird"""
gravity = True # fragments fall down ?
def __init__(self, pos):
pygame.sprite.Sprite.__init__(self, self.groups)
self.pos = [0.0,0.0]
self.pos[0] = pos[0]
self.pos[1] = pos[1]
self.image = pygame.Surface((10,10))
self.image.set_colorkey((0,0,0)) # black transparent
pygame.draw.circle(self.image, (random.randint(20,230),random.randint(20,230),random.randint(20,230)), (5,5),
random.randint(3,10))
self.image = self.image.convert_alpha()
self.rect = self.image.get_rect()
self.rect.center = self.pos #if you forget this line the sprite sit in the topleft corner
self.lifetime = 1 + random.random()*5 # max 6 seconds
self.time = 0.0
self.fragmentmaxspeed = 200 # try out other factors !
self.dx = random.randint(-self.fragmentmaxspeed,self.fragmentmaxspeed)
self.dy = random.randint(-self.fragmentmaxspeed,self.fragmentmaxspeed)
def update(self, seconds):
self.time += seconds
if self.time > self.lifetime:
self.kill()
self.pos[0] += self.dx * seconds
self.pos[1] += self.dy * seconds
if Fragment.gravity:
self.dy += Game.FORCE_OF_GRAVITY # gravity suck fragments down
self.rect.centerx = round(self.pos[0],0)
self.rect.centery = round(self.pos[1],0)
class Barricade(pygame.sprite.Sprite):
#a laser gun
gravity= False
image=pygame.image.load("data/discogun.png")
number = 0
def __init__(self,x,y, screen):
pygame.sprite.Sprite.__init__(self, self.groups)
self.hitpoints = 300.0
self.hitpointsfull = 300.0
self.reload_time = 0.2
self.reload_time_full = 0.2#
self.image = DiscoLaserCannon.image
#efor in images:
self.image.set_colorkey((255,0,182))
self.rect = self.image.get_rect()
self.screen = screen
self.x = x
self.y = y
self.rect.centerx = self.x
self.rect.centery = self.y
self.lasermaxburntime = random.random()*2+2
self.laserburntime = 0
self.beam = False
self.Victimnumber = None
self.number = DiscoLaserCannon.number
DiscoLaserCannon.number += 1
#DiscoLaserCannonCannon.number += 1 # increase the number for next Bird
#DiscoLaserCannon.DiscoLaserCannons[self.number] = self
Healthbar(self)
#self.has_target = False
self.lasertargettime = 0
self.lasertargettimefull = 1
def update(self,seconds):
self.reload_time += seconds
if self.hitpoints < 1:
self.kill()
if self.reload_time > self.reload_time_full:
# choose new target
#Victimnumber = None
if self.Victimnumber is None:
if len(Monster.monsters) > 0:
self.Victimnumber = random.choice(list(Monster.monsters.keys()))
self.Victim = Monster.monsters[self.Victimnumber]
self.lasertargettime = 0
#self.has_target = True
#lasertimer = 4 #rebalance
if self.beam:
self.laserburntime += seconds
if self.laserburntime > self.lasermaxburntime:
self.reload_time = 0
self.laserburntime = 0
self.beam = False
self.Victimnumber = None
#lasertimer -= seconds
# is the a Victim?
if self.Victimnumber != None:
# does the victim still exist in the Monsterclass?
if self.Victimnumber in Monster.monsters:
# tödlicher weißer laser
pygame.draw.line(self.screen,
(random.randint(200,255),
random.randint(200,255),
random.randint(200,255)),
(self.x,self.y),
(self.Victim.pos[0], self.Victim.pos[1]),7)
self.Victim.hitpoints-= 1.0
self.Victim.burntime = 4.0
#self.hitpoints -= 1
#victim.pos[0] -= 3
self.beam = True
else:
self.victimnumber = None
class DiscoLaserCannon(pygame.sprite.Sprite):
gravity= False
image=pygame.image.load("data/discogun.png")
number = 0
def __init__(self,x,y, screen):
Barricade.__init__(self,x,y,screen)
#pygame.sprite.Sprite.__init__(self, self.groups)
#self.hitpoints = 300.0
#self.hitpointsfull = 300.0
#self.reload_time = 0.2
#self.reload_time_full = 0.2#
#self.image = DiscoLaserCannon.image
##efor in images:
#self.image.set_colorkey((255,0,182))
#self.rect = self.image.get_rect()
#self.screen = screen
#self.x = x
#self.y = y
#self.rect.centerx = self.x
#self.rect.centery = self.y
#self.lasermaxburntime = random.random()*2+2
#self.laserburntime = 0
#self.beam = False
#self.Victimnumber = None
#self.number = DiscoLaserCannon.number
#DiscoLaserCannon.number += 1
##DiscoLaserCannonCannon.number += 1 # increase the number for next Bird
##DiscoLaserCannon.DiscoLaserCannons[self.number] = self
#Healthbar(self)
##self.has_target = False
#self.lasertargettime = 0
#self.lasertargettimefull = 1
#def update(self,seconds):
#self.reload_time += seconds
#if self.hitpoints < 1:
#self.kill()
#if self.reload_time > self.reload_time_full:
## choose new target
##Victimnumber = None
#if self.Victimnumber is None:
#if len(Monster.monsters) > 0:
#self.Victimnumber = random.choice(list(Monster.monsters.keys()))
#self.Victim = Monster.monsters[self.Victimnumber]
#self.lasertargettime = 0
##self.has_target = True
##lasertimer = 4 #rebalance
#if self.beam:
#self.laserburntime += seconds
#if self.laserburntime > self.lasermaxburntime:
#self.reload_time = 0
#self.laserburntime = 0
#self.beam = False
#self.Victimnumber = None
##lasertimer -= seconds
## is the a Victim?
#if self.Victimnumber != None:
## does the victim still exist in the Monsterclass?
#if self.Victimnumber in Monster.monsters:
## tödlicher weißer laser
#pygame.draw.line(self.screen,
#(random.randint(200,255),
#random.randint(200,255),
#random.randint(200,255)),
#(self.x,self.y),
#(self.Victim.pos[0], self.Victim.pos[1]),7)
#self.Victim.hitpoints-= 1.0
#self.Victim.burntime = 4.0
##self.hitpoints -= 1
##victim.pos[0] -= 3
#self.beam = True
#else:
#self.victimnumber = None
class DiscProjectile(pygame.sprite.Sprite):
"""a projectile of a Disc gun"""
gravity = False # fragments fall down ?
image=pygame.image.load("data/disc.png")
def __init__(self, startpos=(random.randint(640,1024),random.randint(100,300)),
targetpos=(random.randint(640,1024),random.randint(100,300))):
# dx=random.randint(-Game.DISCMAXSPEED,Game.DISCMAXSPEED),
# dy=random.randint(-Game.DISCMAXSPEED,Game.DISCMAXSPEED)):
pygame.sprite.Sprite.__init__(self, self.groups)
self.pos = [0.0,0.0]
self.startpos = startpos
self.targetpos = targetpos
distancex = -self.startpos[0] + self.targetpos[0]
distancey = -self.startpos[1] + self.targetpos[1]
distance = (distancex**2 + distancey**2)**0.5
if distance > Game.DISCTHROWERRANGE:
self.kill()
self.dx = distancex / distance
self.dy = distancey / distance
self.dx *= Game.DISCMAXSPEED
self.dy *= Game.DISCMAXSPEED
self.hitpoints = 10
self.pos[0] = startpos[0]
self.pos[1] = startpos[1]
self.image = DiscProjectile.image
self.image.set_colorkey((255,0,182)) # black transparent
self.image = self.image.convert_alpha()
self.rect = self.image.get_rect()
self.rect.center = self.pos #if you forget this line the sprite sit in the topleft corner
self.lifetime = 1 + random.random()*5 # max 6 seconds
self.time = 0.0
#self.fragmentmaxspeed = 200 # try out other factors !
#self.dx = dx
#self.dy = dy
def update(self, seconds):
self.time += seconds
if self.time > self.lifetime:
self.kill()
if self.hitpoints <= 0:
self.kill()
self.pos[0] += self.dx * seconds
self.pos[1] += self.dy * seconds
#if Fragment.gravity:
# self.dy += FORCE_OF_GRAVITY # gravity suck fragments down
self.rect.centerx = round(self.pos[0],0)
self.rect.centery = round(self.pos[1],0)
class Flame (pygame.sprite.Sprite):
images = []
images.append(pygame.image.load("data/flamme.png"))
images.append(pygame.image.load("data/flamme2.png"))
for img in images:
img.set_colorkey((255,0,182))
#img.convert_alpha()
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self,self.groups)
self.image = random.choice(Flame.images)
self.rect = self.image.get_rect()
self.x = x
self.y = y
self.rect.centerx = x
self.rect.centery = y
def update(self, seconds):
self.kill()
class Healthbar(pygame.sprite.Sprite):
"""shows a bar with the hitpoints of a Bird sprite"""
def __init__(self, boss):
pygame.sprite.Sprite.__init__(self,self.groups)
self.boss = boss
self.image = pygame.Surface((self.boss.rect.width,7))
self.image.set_colorkey((3,3,3)) # black transparent
pygame.draw.rect(self.image, (1,1,1), (0,0,self.boss.rect.width,7),1)
self.rect = self.image.get_rect()
self.oldpercent = 0
self.bossnumber = self.boss.number # the unique number (name)
def update(self, time):
self.percent = self.boss.hitpoints / self.boss.hitpointsfull * 1.0
if self.percent != self.oldpercent:
pygame.draw.rect(self.image, (77,77,77), (1,1,self.boss.rect.width-2,5)) # fill black
pygame.draw.rect(self.image, (222,22,2), (1,1,
int(self.boss.rect.width * self.percent),5),0) # fill green
self.oldpercent = self.percent
self.rect.centerx = self.boss.rect.centerx
self.rect.centery = self.boss.rect.centery - self.boss.rect.height /2 - 10
#check if boss is still alive if not
if self.boss.hitpoints<1:
self.kill()
class Actor(pygame.sprite.Sprite):
"""Generic Monster"""
images=[] # list of all images
# not necessary:
actors = {} # a dictionary of all monsters
number = 0
neededxp = 300
def __init__(self, x, y, screen, hitpointsfull=600):
#rebalance
pygame.sprite.Sprite.__init__(self, self.groups ) #call parent class. NEVER FORGET !
self.burntime = 0.0
#print("i bin do")
Actor.x = x
Actor.y = y
self.screen=screen
self.z = 0 # animationsnumber
self.duration = 0.0 # how long was the current animation visible in seconds
#self.level=level
self.nomove = False
#self.stats{Game.ACTOR_ATKDMG : "Dmg",Game.ACTOR_SPEED : "speed", Game.ACTOR_DEF : "Def"}
#startpos=(0,screen.get_rect().center[1])
#self.pos=startpos
self.pos = [float(self.x),float(self.y)] # dummy values to create a list
#self.pos[0] = float(startpos[0]) # float for more precise calculation
#self.pos[1] = float(startpos[1])
#self.area = screen.get_rect()
self.area = pygame.Rect(0,100,1024,300)
self.image = Security.images[1]
self.hitpointsfull = float(hitpointsfull) # maximal hitpoints , float makes decimal
self.hitpoints = float(hitpointsfull) # actual hitpoints
self.rect = self.image.get_rect()
self.radius = max(self.rect.width, self.rect.height) / 2.0
self.dx = 0
self.dy = 0
#self.regen = 0.5
#self.dx = random.random()*10+20
#self.dy= random.randint(-70,70)#rebalance
self.rect.centerx = self.x
self.rect.centery = self.y
#--- not necessary:
self.number = Actor.number # get my personal Birdnumber
Actor.number+= 1
Actor.actors[self.number] = self
Healthbar(self)
def update(self, seconds):
pressed_keys = pygame.key.get_pressed()
if pressed_keys[pygame.K_UP]:
self.y -= Game.ACTOR_SPEED
if pressed_keys[pygame.K_DOWN]:
self.y += Game.ACTOR_SPEED
if pressed_keys[pygame.K_LEFT]:
self.x -= Game.ACTOR_SPEED
if pressed_keys[pygame.K_RIGHT]:
self.x += Game.ACTOR_SPEED
if pressed_keys[pygame.K_w]:
self.y -= 5
self.hitpoints -=1.5
if pressed_keys[pygame.K_s]:
self.y += 5
self.hitpoints -=1.5
if pressed_keys[pygame.K_a]:
self.x -= 5
self.hitpoints -= 1.5
if pressed_keys[pygame.K_d]:
self.x += 5
self.hitpoints -=1.5
self.rect.centerx = self.x
self.rect.centery = self.y
if self.hitpoints< self.hitpointsfull:
self.hitpoints+= Game.ACTOR_REGEN
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_p:
self.x +=50
self.hitpoints -= 50
if Game.COINS >= 50:
if event.key == pygame.K_y:
DiscoLaserCannon(self.x,self.y,self.screen)
Game.COINS -= 50
#self.mouse=pygame.mouse.get_pos()
#pygame.mouse.set_pos(self.mouse[0]-5,self.mouse[1]-5)
#self.x=self.mouse[0]
#self.y=self.mouse[1]
#if self.getChar()=="p":
# self.hitpoints=1
if self.hitpoints <= 0:
self.kill()
class Monster(pygame.sprite.Sprite): #DISCO GARY GLITTER
"""Generic Monster"""
images=[] # list of all images
# not necessary:
monsters = {} # a dictionary of all monsters
number = 0
def __init__(self, level, x=0,y=0, hitpointsfull=600, startimagenumber = 0):
#rebalance
pygame.sprite.Sprite.__init__(self, self.groups ) #call parent class. NEVER FORGET !
self.burntime = 0.0
self.z = 0 # animationsnumber
self.duration = 0.0 # how long was the current animation visible in seconds
self.level=level
self.nomove = False
self.x = float(30)
self.y = float(100)
if self.x == 0 and self.y == 0:
self.x = 30
self.y = random.randint(100,350)
self.area = pygame.Rect(0,100,1024,300)
self.image = Monster.images[self.z]
self.hitpointsfull = float(hitpointsfull) # maximal hitpoints , float makes decimal
self.hitpoints = float(hitpointsfull) # actual hitpoints
self.rect = self.image.get_rect()
self.radius = max(self.rect.width, self.rect.height) / 2.0
self.damage = random.randint(1,3)
self.dx= random.random()*10+20
self.dy= random.randint(-70,70)#rebalance
self.rect.centerx = self.x
self.rect.centery = self.y
#--- not necessary:
self.number = Monster.number
Monster.number+= 1
Monster.monsters[self.number] = self #
Healthbar(self)
def getChar(self):
#Tile = 50*50
tilex=int(self.x/50)
tiley=int(self.y/50)+50 # correction value to get the tile under the feet doesn't actually work :\
try:
char=self.level[y][x]
except:
char="?"
return char
def move(self, seconds):
self.dy=random.randint(-10, 10)
self.dx= 40 #random.randint(10,10)
#if self.nomove:
# self.dx = 0
self.x += self.dx * seconds
self.y += self.dy * seconds
#self.check_area()
#--- calculate new position on screen -----
print(self.x)
self.rect.centerx = round(self.x,0)
self.rect.centery = round(self.y,0)
def check_area(self):
# -- check if Bird out of screen
if not self.area.contains(self.rect):
#self.crashing = True # change colour later
# --- compare self.rect and area.rect
if self.x + self.rect.width/2 > self.area.right:
self.x = self.area.right - self.rect.width/2
if self.x - self.rect.width/2 < self.area.left:
self.x = self.area.left + self.rect.width/2
if self.y + self.rect.height/2 > self.area.bottom:
self.y = self.area.bottom - self.rect.height/2
if self.y - self.rect.height/2 < self.area.top:
self.y = self.area.top + self.rect.height/2
def update(self, seconds):
#------ check if lava
#Animation#
# 6 bilder sind in Monster.images []
self.duration += seconds
if self.duration > 0.5:
self.duration= 0
self.z +=1
if self.z >= len(Monster.images):
self.z = 0
self.image=Monster.images[self.z]
#------- FIGHTING?????????????????????????? ---------- TODO -----------
if self.getChar()=="?":
self.hitpoints = 0
if self.getChar()=="e":
self.hitpoints= 0
Game.LIVES-=1 # play loose health because monster reaches edge of screen
print("movE")
self.move(seconds)
if self.burntime > 0 :
self.hitpoints -= 1.0
# reduce burntime
self.burntime -= 0.4
Flame(self.rect.centerx, self.rect.centery)
if self.hitpoints < 0:
self.kill()
def kill(self):
for _ in range(random.randint(7,20)):
Fragment(self.pos)
#Monster.monsters[self.number] = None # kill Bird in sprite dictionary
del(Monster.monsters[self.number])
pygame.sprite.Sprite.kill(self) # kill the actual Monster
Game.XP += 50
Game.COINS += 50
class Monster1(Monster): #DISCO GARY GLITTER
"""white stupid dancer"""
def __init__(self, level, x=0,y=0, hitpointsfull=600, startimagenumber=0):
Monster.__init__(self, level, x, y, hitpointsfull, startimagenumber)
class Monster2(Monster): #MONSTER ROCK
"""rocker with black leather jacket and guitar"""
def __init__(self, level, x=0,y=0, hitpointsfull=900, startimagenumber=4):
Monster.__init__(self, level, x, y, hitpointsfull, startimagenumber)
def move(self, seconds):
if len(Actor.actors) > 0:
#print(len(Actor.actors))
self.victimnumber = random.choice(list(Actor.actors.keys()))
self.victim = Actor.actors[self.victimnumber]
if self.victim.x > self.x:
if self.victim.y < self.y:
self.dy=-3
if self.victim.y > self.y:
self.dy=3
if self.victim.y == self.y:
self.dy=0
else:
self.dy = random.randint(-20,20)
self.dx= 20 #random.randint(10,10)
if self.nomove:
self.dx = 0
self.x += self.dx * seconds
self.y += self.dy * seconds
self.check_area()
#--- calculate new position on screen -----
self.rect.centerx = round(self.x,0)
self.rect.centery = round(self.y,0)
class Security(pygame.sprite.Sprite):
"""Generic Monster"""
images=[] # list of all images
# not necessary:
securitys = {} # a dictionary of all monsters
number = 0
def __init__(self, level, startpos=(-1,200), hitpointsfull=1200):
pygame.sprite.Sprite.__init__(self, self.groups ) #call parent class. NEVER FORGET !
self.burntime = 0.0
if startpos[0]== -1:
startpos=(Viewer.screenwidth, random.randint(150,250))
self.z = 0 # animationsnumber
self.duration = 0.0 # how long was the current animation visible in seconds
self.level=level
self.nomove = False
#startpos=(0,screen.get_rect().center[1])
startpos=(Viewer.screenwidth,random.randint(100,350))
self.pos = [float(startpos[0]),float (startpos[1])] # dummy values to create a list
#self.pos[0] = float(startpos[0]) # float for more precise calculation
#self.pos[1] = float(startpos[1])
# self.area = screen.get_rect()
self.area = pygame.Rect(0,100,1024,300)
self.image = Security.images[self.z]
self.hitpointsfull = float(hitpointsfull) # maximal hitpoints , float makes decimal
self.hitpoints = float(hitpointsfull) # actual hitpoints
self.rect = self.image.get_rect()
self.radius = max(self.rect.width, self.rect.height) / 2.0
self.dx= random.random()*-10+20
self.dy= random.randint(-70,70)
self.rect.centerx = round(self.pos[0],0)
self.rect.centery = round(self.pos[1],0)
#--- not necessary:
self.taser = False
self.number = Security.number # get my personal Birdnumber
Security.number+= 1 # increase the number for next Bird
Security.securitys[self.number] = self #
Healthbar(self)
#def newspeed(self):
# new birdspeed, but not 0
#speedrandom = random.choice([-1,1]) # flip a coin
#self.dx = random.random() * ACTORSPEEDMAX * speedrandom + speedrandom
#self.dy = random.random() * ACTORSPEEDMAX * speedrandom + speedrandom
def getChar(self):
#Tile = 50*50
x=int(self.pos[0]/50)
y=int(self.pos[1]/50)+0 # correction value to get the tile under the feet doesn't actually work :\
try:
char=self.level[y][x]
except:
char="?"
return char
def kill(self):
for _ in range(random.randint(10,30)):
Fragment(self.pos)
Security.securitys[self.number] = None # kill Bird in sprite dictionary
Game.XP += 60
pygame.sprite.Sprite.kill(self) # kill the actual Bird
def update(self, seconds):
self.duration += seconds
if self.duration > 0.5:
self.duration= 0
self.z +=1
if self.z >= len(Security.images):
self.z = 0
self.image=Security.images[self.z]
#-------
#if self.getChar()=="g":
#self.hitpoints-=1 #lava?
#self.burntime += 1.0
if self.getChar()=="?":
self.hitpoints=0
#if self.getChar()=="e":
#self.hitpoints=0
#Game.LIVES-=1
if self.getChar()=="h":
self.nomove = True
else:
self.nomove = False
self.dy=random.randint(-50, 50)
self.dx= -25#random.randint(10,10)
if self.nomove:
self.dx = 0
self.pos[0] += self.dx * seconds
self.pos[1] += self.dy * seconds
# -- check if Bird out of screen
if not self.area.contains(self.rect):
#self.crashing = True # change colour later
# --- compare self.rect and area.rect
if self.pos[0] + self.rect.width/2 > self.area.right:
self.pos[0] = self.area.right - self.rect.width/2
if self.pos[0] - self.rect.width/2 < self.area.left:
self.pos[0] = self.area.left + self.rect.width/2
if self.pos[1] + self.rect.height/2 > self.area.bottom:
self.pos[1] = self.area.bottom - self.rect.height/2
if self.pos[1] - self.rect.height/2 < self.area.top:
self.pos[1] = self.area.top + self.rect.height/2
#self.newspeed() # calculate a new direction
#--- calculate actual image: crasing, catched, both, nothing ?
#self.image = Bird.image[self.crashing + self.catched*2]
#--- calculate new position on screen -----
self.rect.centerx = round(self.pos[0],0)
self.rect.centery = round(self.pos[1],0)
#--- loose hitpoins
#if self.crashing:
#self.hitpoints -=1
#if self.burntime > 0 :
#self.hitpoints -= 1.0
# reduce burntime
#self.burntime -= 0.4
#Flame(self.rect.centerx, self.rect.centery)
if self.hitpoints <= 0:
self.kill()
class Viewer(object):
screenwidth = 1025
screenheight = 400
def __init__(self, width=0, height=0, fps=30):
"""Initialize pygame, window, background, font,...
default arguments
"""
pygame.mixer.pre_init(44100, -16, 2, 2048) # setup mixer to avoid sound lag
pygame.init()
pygame.display.set_caption("Press ESC to quit")
self.width = width
self.height = height
if self.width == 0:
self.width = Viewer.screenwidth
else:
Viewer.screenwidth = width
if self.height == 0:
self.height = Viewer.screenheight
else:
Viewer.screenheight = self.height
self.screen = pygame.display.set_mode((self.width, self.height), pygame.DOUBLEBUF)
self.background = pygame.Surface(self.screen.get_size()).convert()
#self.background.fill((255,255,255)) # fill background white
self.background.fill((1,75,176)) # fill the background white (red,green,blue)
self.clock = pygame.time.Clock()
self.fps = fps
self.playtime = 0.0
self.font = pygame.font.SysFont('mono', 24, bold=True)
# sprite groups
self.playergroup = pygame.sprite.LayeredUpdates()
self.bargroup = pygame.sprite.Group()
self.stuffgroup = pygame.sprite.Group()
self.fragmentgroup = pygame.sprite.Group()
self.allgroup = pygame.sprite.LayeredUpdates()
self.projectilegroup = pygame.sprite.Group()
self.cannongroup = pygame.sprite.Group()
self.barricadegroup = pygame.sprite.Group()
self.monstergroup=pygame.sprite.Group()
self.allgroup=pygame.sprite.LayeredUpdates()
self.bargroup = pygame.sprite.Group()
self.fragmentgroup = pygame.sprite.Group()
self.securitygroup= pygame.sprite.Group()
self.actorgroup = pygame.sprite.Group()
DiscProjectile.groups = self.allgroup, self.projectilegroup
DiscoLaserCannon.groups = self.allgroup, self.cannongroup, self.barricadegroup
Barricade.groups = self.allgroup, self.barricadegroup
Monster1.groups = self.allgroup, self.monstergroup
Monster2.groups = self.allgroup, self.monstergroup
Fragment.groups = self.allgroup, self.fragmentgroup
Healthbar.groups = self.allgroup, self.bargroup
Flame.groups = self.allgroup
Security.groups = self.allgroup, self.securitygroup
Actor.groups = self.allgroup, self.actorgroup
self.game = Game()
def paint(self):
"""paint the level of self.game"""
x=0
y=0
self.game.fleckanim=[]
for zeile in self.game.level:
for fleck in zeile:
self.game.fleckanim.append(0)
self.background.blit(self.game.legende[fleck],(x,y))
x+=50
y+=50
x=0
def spawn_objects(self):
DiscoLaserCannon(500,100, self.screen)
#DiscoLaserCannon(700,100, self.screen)
#DiscoLaserCannon(600,100, self.screen)
#DiscoLaserCannon(400,100, self.screen)
#DiscoLaserCannon(900,100, self.screen)
#DiscoLaserCannon(500,200, self.screen)
#DiscoLaserCannon(700,350, self.screen)
#DiscoLaserCannon(600,350, self.screen)
#DiscoLaserCannon(400,450, self.screen)
#DiscoLaserCannon(900,550, self.screen)
#DiscoLaserCannon()
#print("Action....")
Actor(100,100,self.screen)
def run(self):
"""The mainloop
"""
lasertimer = 0.0 # ....klasse !!
victimnumber = None
self.paint()
self.spawn_objects()
running = True
millis = 0
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
running = False
if event.key==pygame.K_F2:
for px in range (0,5):
Security(self.game.level, hitpointsfull = 2000)
if event.key == pygame.K_F3:
Actor((random.randint(0,7),random.randint(1,5)))
if event.key == pygame.K_p:
Actor.x +=50
Actor.hitpoints -= 50
self.pressed_keys = pygame.key.get_pressed()
if Game.XP >= Actor.neededxp:
Game.ACTOR_LVL += 1
Game.ACTOR_ATKDMG += 5
Game.ACTOR_DEF += 5
Game.ACTOR_KB += 2
Game.ACTOR_REGEN += 0.2
Game.ACTOR_SPEED += 0.2
Game.XP -= Actor.neededxp
print("LVL UP:",Game.ACTOR_LVL,"\nDMG:", Game.ACTOR_ATKDMG,"\nDEF:", Game.ACTOR_DEF,
"\nknockback:",Game.ACTOR_KB, "\nSPEED:",Game.ACTOR_SPEED,"\nREGEN:", Game.ACTOR_REGEN, "\nnextlvl UP:", Actor.neededxp
)
# ------CHEAT KEY----------
#if event.key==pygame.K_F1:
#for px in range (0,240):
#DiscProjectile(pos=(random.randint(540,1024),random.randint(100,400)))12
self.pressed_keys = pygame.key.get_pressed()
if pygame.K_F1 in self.pressed_keys:
pass
milliseconds = self.clock.tick(self.fps)
millis += milliseconds
seconds=milliseconds /1000.0
self.playtime += milliseconds / 1000.0
self.playtime += milliseconds / 1000.0
self.draw_text("Xp:{} Coins:{}".format(
Game.XP,Game.COINS))
pygame.display.flip()
self.screen.blit(self.background, (0, 0)) # alles löschen
# level aufbauen
# monster spawn
if random.random()<self.game.SPAWNRATE:
Monster1(self.game.level)
if random.random()<self.game.SPAWNRATE2:
Monster2(self.game.level)
if random.random()<self.game.SECURITYSPAWNRATE:
Security(self.game.level)
if pygame.K_s in self.pressed_keys:
Actor(self.game.level)
# spritecollide
if millis > 500: # jede halbe sekunde neue animation
millis=0
z=0
x=0
y=0
for zeile in self.game.level:
for fleck in zeile:
if fleck == "d" and self.game.fleckanim[z] == 0:
if random.random() < 0.005:
self.game.fleckanim[z] += 1
elif fleck == "g" and self.game.fleckanim[z] == 0:
if random.random() < 0.5:
self.game.fleckanim[z] += 1
else:
self.game.fleckanim[z] += 1 # normaler fleck
if fleck == "v":
targetlist=[]
for target in self.monstergroup:
#pass # pythagoras distanz ausrechnen
#ziel wird gesucht reichweite getestet
#zufälliges ziel wird abgeschossen
distx=abs(target.pos[0]-x)
disty=abs(target.pos[1]-y)
dist=(distx**2+disty**2)**0.5
if dist<self.game.DISCTHROWERRANGE:
targetlist.append(target)
if len(targetlist)>0:
target=random.choice(targetlist)
#print("taget found{}".format(target.pos) )
#schuss
# fliegt nur nach rechts unten
if target.pos[0]> x:
xsign = 1
else:
xsign = -1
if target.pos[1]> y:
ysign = 1
else:
ysign = -1
DiscProjectile((x,y),(target.pos[0], target.pos[1]))
#else:
# print("No target found")
if self.game.fleckanim[z] > 5:
self.game.fleckanim[z] = 0
z+=1
x+=50
y+=50
x=0
# monster take damage from discs
for mymonster in self.monstergroup:
crashgroup = pygame.sprite.spritecollide(mymonster, self.projectilegroup, False)
for myprojectile in crashgroup:
mymonster.hitpoints-=0.25
#mymonster.pos[0] -= 5 # test for collision with bullet
myprojectile.hitpoints-=0.25
for mymonster in self.monstergroup:
crashgroup = pygame.sprite.spritecollide(mymonster, self.barricadegroup, False)
for mybarricade in crashgroup:
mybarricade.hitpoints-= mymonster.damage
mymonster.x -= 10
for mymonster in self.monstergroup:
crashgroup = pygame.sprite.spritecollide(mymonster, self.actorgroup, False)
for myactor in crashgroup:
mymonster.hitpoints-= Game.ACTOR_ATKDMG
mymonster.x -= 10
myactor.x += 10
myactor.hitpoints-=30.00 - Game.ACTOR_DEF
#and securitys
for mysecurity in self.securitygroup:
crashgroup = pygame.sprite.spritecollide(mysecurity, self.monstergroup, False)
mysecurity.taser = False
for mymonster in crashgroup:
mymonster.hitpoints-=4 # test for collision with bullet
mymonster.x-=random.randint(5,20)
mysecurity.hitpoints-=5
mysecurity.pos[0]+=random.randint(1,7)
mysecurity.taser = True
# laser # soll eine Klasse werden!!!
#pygame.draw.line #rebalance
# bunter lichtlaser
#pygame.draw.line(self.screen,(random.randint(0,255),random.randint(0,255),
#random.randint(0,255)),(925,25),(random.randint(0,950),
#random.randint(0,500)),random.randint(5,15))
#allgroup.clear(screen, background)
self.allgroup.update(seconds)
self.allgroup.draw(self.screen)
pygame.quit()
def draw_text(self, text):
"""Center text in window
"""
fw, fh = self.font.size(text)
surface = self.font.render(text, True, (0, 0, 0))
self.screen.blit(surface, (25,5))
## code on module level
if __name__ == '__main__':
# call with width of window and fps
Viewer().run()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.