code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
import unittest
from ...compatibility import StringIO
from ..helperfunctions import _xml_to_list
from ...worksheet import Worksheet
from ...sharedstrings import SharedStringTable
class TestAssembleWorksheet(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test writing a worksheet with an array formulas in cells."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
worksheet.str_table = SharedStringTable()
worksheet.select()
# Write some data and formulas.
worksheet.write_array_formula(0, 0, 2, 0, '{=SUM(B1:C1*B2:C2)}')
worksheet.write_number(0, 1, 0)
worksheet.write_number(1, 1, 0)
worksheet.write_number(2, 1, 0)
worksheet.write_number(0, 2, 0)
worksheet.write_number(1, 2, 0)
worksheet.write_number(2, 2, 0)
worksheet._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<dimension ref="A1:C3"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15"/>
<sheetData>
<row r="1" spans="1:3">
<c r="A1">
<f t="array" ref="A1:A3">SUM(B1:C1*B2:C2)</f>
<v>0</v>
</c>
<c r="B1">
<v>0</v>
</c>
<c r="C1">
<v>0</v>
</c>
</row>
<row r="2" spans="1:3">
<c r="A2">
<v>0</v>
</c>
<c r="B2">
<v>0</v>
</c>
<c r="C2">
<v>0</v>
</c>
</row>
<row r="3" spans="1:3">
<c r="A3">
<v>0</v>
</c>
<c r="B3">
<v>0</v>
</c>
<c r="C3">
<v>0</v>
</c>
</row>
</sheetData>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
</worksheet>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
|
jvrsantacruz/XlsxWriter
|
xlsxwriter/test/worksheet/test_worksheet08.py
|
Python
|
bsd-2-clause
| 3,094
|
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classifications protobuf."""
from tensorflow_lite_support.cc.task.vision.proto import classifications_pb2
Classifications = classifications_pb2.Classifications
ClassificationResult = classifications_pb2.ClassificationResult
|
chromium/chromium
|
third_party/tflite_support/src/tensorflow_lite_support/python/task/processor/proto/classifications_pb2.py
|
Python
|
bsd-3-clause
| 837
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import datetime
import logging
import multiprocessing
import os
import posixpath
import Queue
import re
import subprocess
import sys
import threading
# addr2line builds a possibly infinite memory cache that can exhaust
# the computer's memory if allowed to grow for too long. This constant
# controls how many lookups we do before restarting the process. 4000
# gives near peak performance without extreme memory usage.
ADDR2LINE_RECYCLE_LIMIT = 4000
class ELFSymbolizer(object):
"""An uber-fast (multiprocessing, pipelined and asynchronous) ELF symbolizer.
This class is a frontend for addr2line (part of GNU binutils), designed to
symbolize batches of large numbers of symbols for a given ELF file. It
supports sharding symbolization against many addr2line instances and
pipelining of multiple requests per each instance (in order to hide addr2line
internals and OS pipe latencies).
The interface exhibited by this class is a very simple asynchronous interface,
which is based on the following three methods:
- SymbolizeAsync(): used to request (enqueue) resolution of a given address.
- The |callback| method: used to communicated back the symbol information.
- Join(): called to conclude the batch to gather the last outstanding results.
In essence, before the Join method returns, this class will have issued as
many callbacks as the number of SymbolizeAsync() calls. In this regard, note
that due to multiprocess sharding, callbacks can be delivered out of order.
Some background about addr2line:
- it is invoked passing the elf path in the cmdline, piping the addresses in
its stdin and getting results on its stdout.
- it has pretty large response times for the first requests, but it
works very well in streaming mode once it has been warmed up.
- it doesn't scale by itself (on more cores). However, spawning multiple
instances at the same time on the same file is pretty efficient as they
keep hitting the pagecache and become mostly CPU bound.
- it might hang or crash, mostly for OOM. This class deals with both of these
problems.
Despite the "scary" imports and the multi* words above, (almost) no multi-
threading/processing is involved from the python viewpoint. Concurrency
here is achieved by spawning several addr2line subprocesses and handling their
output pipes asynchronously. Therefore, all the code here (with the exception
of the Queue instance in Addr2Line) should be free from mind-blowing
thread-safety concerns.
The multiprocess sharding works as follows:
The symbolizer tries to use the lowest number of addr2line instances as
possible (with respect of |max_concurrent_jobs|) and enqueue all the requests
in a single addr2line instance. For few symbols (i.e. dozens) sharding isn't
worth the startup cost.
The multiprocess logic kicks in as soon as the queues for the existing
instances grow. Specifically, once all the existing instances reach the
|max_queue_size| bound, a new addr2line instance is kicked in.
In the case of a very eager producer (i.e. all |max_concurrent_jobs| instances
have a backlog of |max_queue_size|), back-pressure is applied on the caller by
blocking the SymbolizeAsync method.
This module has been deliberately designed to be dependency free (w.r.t. of
other modules in this project), to allow easy reuse in external projects.
"""
def __init__(self, elf_file_path, addr2line_path, callback, inlines=False,
max_concurrent_jobs=None, addr2line_timeout=30, max_queue_size=50):
"""Args:
elf_file_path: path of the elf file to be symbolized.
addr2line_path: path of the toolchain's addr2line binary.
callback: a callback which will be invoked for each resolved symbol with
the two args (sym_info, callback_arg). The former is an instance of
|ELFSymbolInfo| and contains the symbol information. The latter is an
embedder-provided argument which is passed to SymbolizeAsync().
inlines: when True, the ELFSymbolInfo will contain also the details about
the outer inlining functions. When False, only the innermost function
will be provided.
max_concurrent_jobs: Max number of addr2line instances spawned.
Parallelize responsibly, addr2line is a memory and I/O monster.
max_queue_size: Max number of outstanding requests per addr2line instance.
addr2line_timeout: Max time (in seconds) to wait for a addr2line response.
After the timeout, the instance will be considered hung and respawned.
"""
assert(os.path.isfile(addr2line_path)), 'Cannot find ' + addr2line_path
self.elf_file_path = elf_file_path
self.addr2line_path = addr2line_path
self.callback = callback
self.inlines = inlines
self.max_concurrent_jobs = (max_concurrent_jobs or
min(multiprocessing.cpu_count(), 4))
self.max_queue_size = max_queue_size
self.addr2line_timeout = addr2line_timeout
self.requests_counter = 0 # For generating monotonic request IDs.
self._a2l_instances = [] # Up to |max_concurrent_jobs| _Addr2Line inst.
# Create one addr2line instance. More instances will be created on demand
# (up to |max_concurrent_jobs|) depending on the rate of the requests.
self._CreateNewA2LInstance()
def SymbolizeAsync(self, addr, callback_arg=None):
"""Requests symbolization of a given address.
This method is not guaranteed to return immediately. It generally does, but
in some scenarios (e.g. all addr2line instances have full queues) it can
block to create back-pressure.
Args:
addr: address to symbolize.
callback_arg: optional argument which will be passed to the |callback|."""
assert(isinstance(addr, int))
# Process all the symbols that have been resolved in the meanwhile.
# Essentially, this drains all the addr2line(s) out queues.
for a2l_to_purge in self._a2l_instances:
a2l_to_purge.ProcessAllResolvedSymbolsInQueue()
a2l_to_purge.RecycleIfNecessary()
# Find the best instance according to this logic:
# 1. Find an existing instance with the shortest queue.
# 2. If all of instances' queues are full, but there is room in the pool,
# (i.e. < |max_concurrent_jobs|) create a new instance.
# 3. If there were already |max_concurrent_jobs| instances and all of them
# had full queues, make back-pressure.
# 1.
def _SortByQueueSizeAndReqID(a2l):
return (a2l.queue_size, a2l.first_request_id)
a2l = min(self._a2l_instances, key=_SortByQueueSizeAndReqID)
# 2.
if (a2l.queue_size >= self.max_queue_size and
len(self._a2l_instances) < self.max_concurrent_jobs):
a2l = self._CreateNewA2LInstance()
# 3.
if a2l.queue_size >= self.max_queue_size:
a2l.WaitForNextSymbolInQueue()
a2l.EnqueueRequest(addr, callback_arg)
def Join(self):
"""Waits for all the outstanding requests to complete and terminates."""
for a2l in self._a2l_instances:
a2l.WaitForIdle()
a2l.Terminate()
def _CreateNewA2LInstance(self):
assert(len(self._a2l_instances) < self.max_concurrent_jobs)
a2l = ELFSymbolizer.Addr2Line(self)
self._a2l_instances.append(a2l)
return a2l
class Addr2Line(object):
"""A python wrapper around an addr2line instance.
The communication with the addr2line process looks as follows:
[STDIN] [STDOUT] (from addr2line's viewpoint)
> f001111
> f002222
< Symbol::Name(foo, bar) for f001111
< /path/to/source/file.c:line_number
> f003333
< Symbol::Name2() for f002222
< /path/to/source/file.c:line_number
< Symbol::Name3() for f003333
< /path/to/source/file.c:line_number
"""
SYM_ADDR_RE = re.compile(r'([^:]+):(\?|\d+).*')
def __init__(self, symbolizer):
self._symbolizer = symbolizer
self._lib_file_name = posixpath.basename(symbolizer.elf_file_path)
# The request queue (i.e. addresses pushed to addr2line's stdin and not
# yet retrieved on stdout)
self._request_queue = collections.deque()
# This is essentially len(self._request_queue). It has been optimized to a
# separate field because turned out to be a perf hot-spot.
self.queue_size = 0
# Keep track of the number of symbols a process has processed to
# avoid a single process growing too big and using all the memory.
self._processed_symbols_count = 0
# Objects required to handle the addr2line subprocess.
self._proc = None # Subprocess.Popen(...) instance.
self._thread = None # Threading.thread instance.
self._out_queue = None # Queue.Queue instance (for buffering a2l stdout).
self._RestartAddr2LineProcess()
def EnqueueRequest(self, addr, callback_arg):
"""Pushes an address to addr2line's stdin (and keeps track of it)."""
self._symbolizer.requests_counter += 1 # For global "age" of requests.
req_idx = self._symbolizer.requests_counter
self._request_queue.append((addr, callback_arg, req_idx))
self.queue_size += 1
self._WriteToA2lStdin(addr)
def WaitForIdle(self):
"""Waits until all the pending requests have been symbolized."""
while self.queue_size > 0:
self.WaitForNextSymbolInQueue()
def WaitForNextSymbolInQueue(self):
"""Waits for the next pending request to be symbolized."""
if not self.queue_size:
return
# This outer loop guards against a2l hanging (detecting stdout timeout).
while True:
start_time = datetime.datetime.now()
timeout = datetime.timedelta(seconds=self._symbolizer.addr2line_timeout)
# The inner loop guards against a2l crashing (checking if it exited).
while (datetime.datetime.now() - start_time < timeout):
# poll() returns !None if the process exited. a2l should never exit.
if self._proc.poll():
logging.warning('addr2line crashed, respawning (lib: %s).' %
self._lib_file_name)
self._RestartAddr2LineProcess()
# TODO(primiano): the best thing to do in this case would be
# shrinking the pool size as, very likely, addr2line is crashed
# due to low memory (and the respawned one will die again soon).
try:
lines = self._out_queue.get(block=True, timeout=0.25)
except Queue.Empty:
# On timeout (1/4 s.) repeat the inner loop and check if either the
# addr2line process did crash or we waited its output for too long.
continue
# In nominal conditions, we get straight to this point.
self._ProcessSymbolOutput(lines)
return
# If this point is reached, we waited more than |addr2line_timeout|.
logging.warning('Hung addr2line process, respawning (lib: %s).' %
self._lib_file_name)
self._RestartAddr2LineProcess()
def ProcessAllResolvedSymbolsInQueue(self):
"""Consumes all the addr2line output lines produced (without blocking)."""
if not self.queue_size:
return
while True:
try:
lines = self._out_queue.get_nowait()
except Queue.Empty:
break
self._ProcessSymbolOutput(lines)
def RecycleIfNecessary(self):
"""Restarts the process if it has been used for too long.
A long running addr2line process will consume excessive amounts
of memory without any gain in performance."""
if self._processed_symbols_count >= ADDR2LINE_RECYCLE_LIMIT:
self._RestartAddr2LineProcess()
def Terminate(self):
"""Kills the underlying addr2line process.
The poller |_thread| will terminate as well due to the broken pipe."""
try:
self._proc.kill()
self._proc.communicate() # Essentially wait() without risking deadlock.
except Exception: # An exception while terminating? How interesting.
pass
self._proc = None
def _WriteToA2lStdin(self, addr):
self._proc.stdin.write('%s\n' % hex(addr))
if self._symbolizer.inlines:
# In the case of inlines we output an extra blank line, which causes
# addr2line to emit a (??,??:0) tuple that we use as a boundary marker.
self._proc.stdin.write('\n')
self._proc.stdin.flush()
def _ProcessSymbolOutput(self, lines):
"""Parses an addr2line symbol output and triggers the client callback."""
(_, callback_arg, _) = self._request_queue.popleft()
self.queue_size -= 1
innermost_sym_info = None
sym_info = None
for (line1, line2) in lines:
prev_sym_info = sym_info
name = line1 if not line1.startswith('?') else None
source_path = None
source_line = None
m = ELFSymbolizer.Addr2Line.SYM_ADDR_RE.match(line2)
if m:
if not m.group(1).startswith('?'):
source_path = m.group(1)
if not m.group(2).startswith('?'):
source_line = int(m.group(2))
else:
logging.warning('Got invalid symbol path from addr2line: %s' % line2)
sym_info = ELFSymbolInfo(name, source_path, source_line)
if prev_sym_info:
prev_sym_info.inlined_by = sym_info
if not innermost_sym_info:
innermost_sym_info = sym_info
self._processed_symbols_count += 1
self._symbolizer.callback(innermost_sym_info, callback_arg)
def _RestartAddr2LineProcess(self):
if self._proc:
self.Terminate()
# The only reason of existence of this Queue (and the corresponding
# Thread below) is the lack of a subprocess.stdout.poll_avail_lines().
# Essentially this is a pipe able to extract a couple of lines atomically.
self._out_queue = Queue.Queue()
# Start the underlying addr2line process in line buffered mode.
cmd = [self._symbolizer.addr2line_path, '--functions', '--demangle',
'--exe=' + self._symbolizer.elf_file_path]
if self._symbolizer.inlines:
cmd += ['--inlines']
self._proc = subprocess.Popen(cmd, bufsize=1, stdout=subprocess.PIPE,
stdin=subprocess.PIPE, stderr=sys.stderr, close_fds=True)
# Start the poller thread, which simply moves atomically the lines read
# from the addr2line's stdout to the |_out_queue|.
self._thread = threading.Thread(
target=ELFSymbolizer.Addr2Line.StdoutReaderThread,
args=(self._proc.stdout, self._out_queue, self._symbolizer.inlines))
self._thread.daemon = True # Don't prevent early process exit.
self._thread.start()
self._processed_symbols_count = 0
# Replay the pending requests on the new process (only for the case
# of a hung addr2line timing out during the game).
for (addr, _, _) in self._request_queue:
self._WriteToA2lStdin(addr)
@staticmethod
def StdoutReaderThread(process_pipe, queue, inlines):
"""The poller thread fn, which moves the addr2line stdout to the |queue|.
This is the only piece of code not running on the main thread. It merely
writes to a Queue, which is thread-safe. In the case of inlines, it
detects the ??,??:0 marker and sends the lines atomically, such that the
main thread always receives all the lines corresponding to one symbol in
one shot."""
try:
lines_for_one_symbol = []
while True:
line1 = process_pipe.readline().rstrip('\r\n')
line2 = process_pipe.readline().rstrip('\r\n')
if not line1 or not line2:
break
inline_has_more_lines = inlines and (len(lines_for_one_symbol) == 0 or
(line1 != '??' and line2 != '??:0'))
if not inlines or inline_has_more_lines:
lines_for_one_symbol += [(line1, line2)]
if inline_has_more_lines:
continue
queue.put(lines_for_one_symbol)
lines_for_one_symbol = []
process_pipe.close()
# Every addr2line processes will die at some point, please die silently.
except (IOError, OSError):
pass
@property
def first_request_id(self):
"""Returns the request_id of the oldest pending request in the queue."""
return self._request_queue[0][2] if self._request_queue else 0
class ELFSymbolInfo(object):
"""The result of the symbolization passed as first arg. of each callback."""
def __init__(self, name, source_path, source_line):
"""All the fields here can be None (if addr2line replies with '??')."""
self.name = name
self.source_path = source_path
self.source_line = source_line
# In the case of |inlines|=True, the |inlined_by| points to the outer
# function inlining the current one (and so on, to form a chain).
self.inlined_by = None
def __str__(self):
return '%s [%s:%d]' % (
self.name or '??', self.source_path or '??', self.source_line or 0)
|
TeamEOS/external_chromium_org
|
build/android/pylib/symbols/elf_symbolizer.py
|
Python
|
bsd-3-clause
| 17,340
|
import datetime
from stream_framework.activity import Activity
from stream_framework.activity import AggregatedActivity
from stream_framework.activity import DehydratedActivity
from stream_framework.tests.utils import Pin
from stream_framework.verbs.base import Love as LoveVerb
from stream_framework.aggregators.base import RecentVerbAggregator
from stream_framework.exceptions import ActivityNotFound
from stream_framework.exceptions import DuplicateActivityException
import time
import unittest
import six
class TestActivity(unittest.TestCase):
def test_serialization_length(self):
activity_object = Pin(id=1)
activity = Activity(1, LoveVerb, activity_object)
assert len(str(activity.serialization_id)) == 26
def test_serialization_type(self):
activity_object = Pin(id=1)
activity = Activity(1, LoveVerb, activity_object)
assert isinstance(activity.serialization_id, (six.integer_types, float))
def test_serialization_overflow_check_object_id(self):
activity_object = Pin(id=10 ** 10)
activity = Activity(1, LoveVerb, activity_object)
with self.assertRaises(TypeError):
activity.serialization_id
def test_serialization_overflow_check_role_id(self):
activity_object = Pin(id=1)
Verb = type('Overflow', (LoveVerb,), {'id': 9999})
activity = Activity(1, Verb, activity_object)
with self.assertRaises(TypeError):
activity.serialization_id
def test_dehydrated_activity(self):
activity_object = Pin(id=1)
activity = Activity(1, LoveVerb, activity_object)
dehydrated = activity.get_dehydrated()
self.assertTrue(isinstance(dehydrated, DehydratedActivity))
self.assertEquals(
dehydrated.serialization_id, activity.serialization_id)
def test_compare_idempotent_init(self):
t1 = datetime.datetime.utcnow()
activity_object = Pin(id=1)
activity1 = Activity(1, LoveVerb, activity_object, time=t1)
time.sleep(0.1)
activity2 = Activity(1, LoveVerb, activity_object, time=t1)
self.assertEquals(activity1, activity2)
def test_compare_apple_and_oranges(self):
activity_object = Pin(id=1)
activity = Activity(1, LoveVerb, activity_object)
with self.assertRaises(ValueError):
activity == activity_object
class TestAggregatedActivity(unittest.TestCase):
def test_contains(self):
activity = Activity(1, LoveVerb, Pin(id=1))
aggregated = AggregatedActivity(1, [activity])
self.assertTrue(aggregated.contains(activity))
def test_duplicated_activities(self):
activity = Activity(1, LoveVerb, Pin(id=1))
aggregated = AggregatedActivity(1, [activity])
with self.assertRaises(DuplicateActivityException):
aggregated.append(activity)
def test_compare_apple_and_oranges(self):
activity = AggregatedActivity(1, [Activity(1, LoveVerb, Pin(id=1))])
with self.assertRaises(ValueError):
activity == Pin(id=1)
def test_contains_extraneous_object(self):
activity = AggregatedActivity(1, [Activity(1, LoveVerb, Pin(id=1))])
with self.assertRaises(ValueError):
activity.contains(Pin(id=1))
def test_aggregated_properties(self):
activities = []
for x in range(1, 101):
activity_object = Pin(id=x)
activity = Activity(x, LoveVerb, activity_object)
activities.append(activity)
aggregator = RecentVerbAggregator()
aggregated_activities = aggregator.aggregate(activities)
aggregated = aggregated_activities[0]
self.assertEqual(aggregated.verbs, [LoveVerb])
self.assertEqual(aggregated.verb, LoveVerb)
self.assertEqual(aggregated.actor_count, 100)
self.assertEqual(aggregated.minimized_activities, 85)
self.assertEqual(aggregated.other_actor_count, 99)
self.assertEqual(aggregated.activity_count, 100)
self.assertEqual(aggregated.object_ids, list(range(86, 101)))
# the other ones should be dropped
self.assertEqual(aggregated.actor_ids, list(range(86, 101)))
self.assertEqual(aggregated.is_seen(), False)
self.assertEqual(aggregated.is_read(), False)
def generate_activities(self):
activities = []
for x in range(1, 20):
activity = Activity(x, LoveVerb, Pin(id=x))
activities.append(activity)
return activities
def generate_aggregated_activities(self, activities):
aggregator = RecentVerbAggregator()
aggregated_activities = aggregator.aggregate(activities)
return aggregated_activities
def test_aggregated_compare(self):
activities = self.generate_activities()
aggregated_activities = self.generate_aggregated_activities(activities)
aggregated_activities_two = self.generate_aggregated_activities(activities)
new_activities = self.generate_activities()
aggregated_activities_three = self.generate_aggregated_activities(new_activities)
# this should be equal
self.assertEqual(aggregated_activities, aggregated_activities_two)
# this should not be equal
self.assertNotEqual(aggregated_activities, aggregated_activities_three)
def test_aggregated_remove(self):
activities = []
for x in range(1, 101):
activity_object = Pin(id=x)
activity = Activity(x, LoveVerb, activity_object)
activities.append(activity)
aggregator = RecentVerbAggregator()
aggregated_activities = aggregator.aggregate(activities)
aggregated = aggregated_activities[0]
for activity in activities:
try:
aggregated.remove(activity)
except (ActivityNotFound, ValueError):
pass
self.assertEqual(len(aggregated.activities), 1)
self.assertEqual(aggregated.activity_count, 72)
|
Anislav/Stream-Framework
|
stream_framework/tests/activity.py
|
Python
|
bsd-3-clause
| 6,026
|
##
# Copyright 2009-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing CP2K, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
@author: Ward Poelmans (Ghent University)
"""
import fileinput
import glob
import re
import os
import shutil
import sys
from distutils.version import LooseVersion
import easybuild.tools.toolchain as toolchain
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.modules import get_software_root, get_software_version
from easybuild.tools.run import run_cmd
from easybuild.tools.systemtools import get_avail_core_count
# CP2K needs this version of libxc
LIBXC_MIN_VERSION = '2.0.1'
class EB_CP2K(EasyBlock):
"""
Support for building CP2K
- prepare module include files if required
- generate custom config file in 'arch' directory
- build CP2K
- run regression test if desired
- install by copying binary executables
"""
def __init__(self, *args, **kwargs):
super(EB_CP2K, self).__init__(*args, **kwargs)
self.typearch = None
# this should be set to False for old versions of GCC (e.g. v4.1)
self.compilerISO_C_BINDING = True
# compiler options that need to be set in Makefile
self.debug = ''
self.fpic = ''
self.libsmm = ''
self.modincpath = ''
self.openmp = ''
self.make_instructions = ''
@staticmethod
def extra_options():
extra_vars = {
'type': ['popt', "Type of build ('popt' or 'psmp')", CUSTOM],
'typeopt': [True, "Enable optimization", CUSTOM],
'modincprefix': ['', "IMKL prefix for modinc include dir", CUSTOM],
'modinc': [[], ("List of modinc's to use (*.f90], or 'True' to use "
"all found at given prefix"), CUSTOM],
'extracflags': ['', "Extra CFLAGS to be added", CUSTOM],
'extradflags': ['', "Extra DFLAGS to be added", CUSTOM],
'ignore_regtest_fails': [False, ("Ignore failures in regression test "
"(should be used with care)"), CUSTOM],
'maxtasks': [3, ("Maximum number of CP2K instances run at "
"the same time during testing"), CUSTOM],
'runtest': [True, "Build and run CP2K tests", CUSTOM],
}
return EasyBlock.extra_options(extra_vars)
def _generate_makefile(self, options):
"""Generate Makefile based on options dictionary and optional make instructions"""
text = "# Makefile generated by CP2K._generateMakefile, items might appear in random order\n"
for key, value in options.iteritems():
text += "%s = %s\n" % (key, value)
return text + self.make_instructions
def configure_step(self):
"""Configure build
- build Libint wrapper
- generate Makefile
"""
known_types = ['popt', 'psmp']
if self.cfg['type'] not in known_types:
raise EasyBuildError("Unknown build type specified: '%s', known types are %s",
self.cfg['type'], known_types)
# correct start dir, if needed
# recent CP2K versions have a 'cp2k' dir in the unpacked 'cp2k' dir
cp2k_path = os.path.join(self.cfg['start_dir'], 'cp2k')
if os.path.exists(cp2k_path):
self.cfg['start_dir'] = cp2k_path
self.log.info("Corrected start_dir to %s" % self.cfg['start_dir'])
# set compilers options according to toolchain config
# full debug: -g -traceback -check all -fp-stack-check
# -g links to mpi debug libs
if self.toolchain.options['debug']:
self.debug = '-g'
self.log.info("Debug build")
if self.toolchain.options['pic']:
self.fpic = "-fPIC"
self.log.info("Using fPIC")
# report on extra flags being used
if self.cfg['extracflags']:
self.log.info("Using extra CFLAGS: %s" % self.cfg['extracflags'])
if self.cfg['extradflags']:
self.log.info("Using extra CFLAGS: %s" % self.cfg['extradflags'])
# libsmm support
libsmm = get_software_root('libsmm')
if libsmm:
libsmms = glob.glob(os.path.join(libsmm, 'lib', 'libsmm_*nn.a'))
dfs = [os.path.basename(os.path.splitext(x)[0]).replace('lib', '-D__HAS_') for x in libsmms]
moredflags = ' ' + ' '.join(dfs)
self.cfg.update('extradflags', moredflags)
self.libsmm = ' '.join(libsmms)
self.log.debug('Using libsmm %s (extradflags %s)' % (self.libsmm, moredflags))
# obtain list of modinc's to use
if self.cfg["modinc"]:
self.modincpath = self.prepmodinc()
# set typearch
self.typearch = "Linux-x86-64-%s" % self.toolchain.name
# extra make instructions
self.make_instructions = '' # "graphcon.o: graphcon.F\n\t$(FC) -c $(FCFLAGS2) $<\n"
# compiler toolchain specific configuration
comp_fam = self.toolchain.comp_family()
if comp_fam == toolchain.INTELCOMP:
options = self.configure_intel_based()
elif comp_fam == toolchain.GCC:
options = self.configure_GCC_based()
else:
raise EasyBuildError("Don't know how to tweak configuration for compiler family %s" % comp_fam)
# BLAS/FFTW
if get_software_root('IMKL'):
options = self.configure_MKL(options)
else:
# BLAS
if get_software_root('ACML'):
options = self.configure_ACML(options)
else:
options = self.configure_BLAS_lib(options)
# FFTW (no MKL involved)
if 'fftw3' in os.getenv('LIBFFT', ''):
options = self.configure_FFTW3(options)
# LAPACK
if os.getenv('LIBLAPACK_MT', None) is not None:
options = self.configure_LAPACK(options)
if os.getenv('LIBSCALAPACK', None) is not None:
options = self.configure_ScaLAPACK(options)
# avoid group nesting
options['LIBS'] = options['LIBS'].replace('-Wl,--start-group', '').replace('-Wl,--end-group', '')
options['LIBS'] = "-Wl,--start-group %s -Wl,--end-group" % options['LIBS']
# create arch file using options set
archfile = os.path.join(self.cfg['start_dir'], 'arch',
'%s.%s' % (self.typearch, self.cfg['type']))
try:
txt = self._generate_makefile(options)
f = open(archfile, 'w')
f.write(txt)
f.close()
self.log.info("Content of makefile (%s):\n%s" % (archfile, txt))
except IOError, err:
raise EasyBuildError("Writing makefile %s failed: %s", archfile, err)
def prepmodinc(self):
"""Prepare list of module files"""
self.log.debug("Preparing module files")
imkl = get_software_root('IMKL')
if imkl:
# prepare modinc target path
modincpath = os.path.join(os.path.dirname(os.path.normpath(self.cfg['start_dir'])), 'modinc')
self.log.debug("Preparing module files in %s" % modincpath)
try:
os.mkdir(modincpath)
except OSError, err:
raise EasyBuildError("Failed to create directory for module include files: %s", err)
# get list of modinc source files
modincdir = os.path.join(imkl, self.cfg["modincprefix"], 'include')
if type(self.cfg["modinc"]) == list:
modfiles = [os.path.join(modincdir, x) for x in self.cfg["modinc"]]
elif type(self.cfg["modinc"]) == bool and type(self.cfg["modinc"]):
modfiles = glob.glob(os.path.join(modincdir, '*.f90'))
else:
raise EasyBuildError("prepmodinc: Please specify either a boolean value or a list of files in modinc "
"(found: %s).", self.cfg["modinc"])
f77 = os.getenv('F77')
if not f77:
raise EasyBuildError("F77 environment variable not set, can't continue.")
# create modinc files
for f in modfiles:
if f77.endswith('ifort'):
cmd = "%s -module %s -c %s" % (f77, modincpath, f)
elif f77 in ['gfortran', 'mpif77']:
cmd = "%s -J%s -c %s" % (f77, modincpath, f)
else:
raise EasyBuildError("prepmodinc: Unknown value specified for F77 (%s)", f77)
run_cmd(cmd, log_all=True, simple=True)
return modincpath
else:
raise EasyBuildError("Don't know how to prepare modinc, IMKL not found")
def configure_common(self):
"""Common configuration for all toolchains"""
# openmp introduces 2 major differences
# -automatic is default: -noautomatic -auto-scalar
# some mem-bandwidth optimisation
if self.cfg['type'] == 'psmp':
self.openmp = self.toolchain.get_flag('openmp')
# determine which opt flags to use
if self.cfg['typeopt']:
optflags = 'OPT'
regflags = 'OPT2'
else:
optflags = 'NOOPT'
regflags = 'NOOPT'
# make sure a MPI-2 able MPI lib is used
mpi2 = False
if hasattr(self.toolchain, 'MPI_FAMILY') and self.toolchain.MPI_FAMILY is not None:
known_mpi2_fams = [toolchain.MPICH, toolchain.MPICH2, toolchain.MVAPICH2, toolchain.OPENMPI,
toolchain.INTELMPI]
mpi_fam = self.toolchain.mpi_family()
if mpi_fam in known_mpi2_fams:
mpi2 = True
self.log.debug("Determined MPI2 compatibility based on MPI toolchain component: %s" % mpi_fam)
else:
self.log.debug("Cannot determine MPI2 compatibility based on MPI toolchain component: %s" % mpi_fam)
else:
# can't use toolchain.mpi_family, because of dummy toolchain
mpi2libs = ['impi', 'MVAPICH2', 'OpenMPI', 'MPICH2', 'MPICH']
for mpi2lib in mpi2libs:
if get_software_root(mpi2lib):
mpi2 = True
self.log.debug("Determined MPI2 compatibility based on loaded MPI module: %s")
else:
self.log.debug("MPI-2 supporting MPI library %s not loaded.")
if not mpi2:
raise EasyBuildError("CP2K needs MPI-2, no known MPI-2 supporting library loaded?")
options = {
'CC': os.getenv('MPICC'),
'CPP': '',
'FC': '%s %s' % (os.getenv('MPIF90'), self.openmp),
'LD': '%s %s' % (os.getenv('MPIF90'), self.openmp),
'AR': 'ar -r',
'CPPFLAGS': '',
'FPIC': self.fpic,
'DEBUG': self.debug,
'FCFLAGS': '$(FCFLAGS%s)' % optflags,
'FCFLAGS2': '$(FCFLAGS%s)' % regflags,
'CFLAGS': ' %s %s $(FPIC) $(DEBUG) %s ' % (os.getenv('CPPFLAGS'), os.getenv('LDFLAGS'),
self.cfg['extracflags']),
'DFLAGS': ' -D__parallel -D__BLACS -D__SCALAPACK -D__FFTSG %s' % self.cfg['extradflags'],
'LIBS': os.getenv('LIBS', ''),
'FCFLAGSNOOPT': '$(DFLAGS) $(CFLAGS) -O0 $(FREE) $(FPIC) $(DEBUG)',
'FCFLAGSOPT': '-O2 $(FREE) $(SAFE) $(FPIC) $(DEBUG)',
'FCFLAGSOPT2': '-O1 $(FREE) $(SAFE) $(FPIC) $(DEBUG)'
}
libint = get_software_root('LibInt')
if libint:
options['DFLAGS'] += ' -D__LIBINT'
libintcompiler = "%s %s" % (os.getenv('CC'), os.getenv('CFLAGS'))
# Build libint-wrapper, if required
libint_wrapper = ''
# required for old versions of GCC
if not self.compilerISO_C_BINDING:
options['DFLAGS'] += ' -D__HAS_NO_ISO_C_BINDING'
# determine path for libint_tools dir
libinttools_paths = ['libint_tools', 'tools/hfx_tools/libint_tools']
libinttools_path = None
for path in libinttools_paths:
path = os.path.join(self.cfg['start_dir'], path)
if os.path.isdir(path):
libinttools_path = path
os.chdir(libinttools_path)
if not libinttools_path:
raise EasyBuildError("No libinttools dir found")
# build libint wrapper
cmd = "%s -c libint_cpp_wrapper.cpp -I%s/include" % (libintcompiler, libint)
if not run_cmd(cmd, log_all=True, simple=True):
raise EasyBuildError("Building the libint wrapper failed")
libint_wrapper = '%s/libint_cpp_wrapper.o' % libinttools_path
# determine LibInt libraries based on major version number
libint_maj_ver = get_software_version('LibInt').split('.')[0]
if libint_maj_ver == '1':
libint_libs = "$(LIBINTLIB)/libderiv.a $(LIBINTLIB)/libint.a $(LIBINTLIB)/libr12.a"
elif libint_maj_ver == '2':
libint_libs = "$(LIBINTLIB)/libint2.a"
else:
raise EasyBuildError("Don't know how to handle libint version %s", libint_maj_ver)
self.log.info("Using LibInt version %s" % (libint_maj_ver))
options['LIBINTLIB'] = '%s/lib' % libint
options['LIBS'] += ' %s -lstdc++ %s' % (libint_libs, libint_wrapper)
else:
# throw a warning, since CP2K without LibInt doesn't make much sense
self.log.warning("LibInt module not loaded, so building without LibInt support")
libxc = get_software_root('libxc')
if libxc:
cur_libxc_version = get_software_version('libxc')
if LooseVersion(cur_libxc_version) < LooseVersion(LIBXC_MIN_VERSION):
raise EasyBuildError("CP2K only works with libxc v%s (or later)", LIBXC_MIN_VERSION)
options['DFLAGS'] += ' -D__LIBXC2'
if LooseVersion(cur_libxc_version) >= LooseVersion('2.2'):
options['LIBS'] += ' -L%s/lib -lxcf90 -lxc' % libxc
else:
options['LIBS'] += ' -L%s/lib -lxc' % libxc
self.log.info("Using Libxc-%s" % cur_libxc_version)
else:
self.log.info("libxc module not loaded, so building without libxc support")
return options
def configure_intel_based(self):
"""Configure for Intel based toolchains"""
# based on guidelines available at
# http://software.intel.com/en-us/articles/build-cp2k-using-intel-fortran-compiler-professional-edition/
intelurl = ''.join(["http://software.intel.com/en-us/articles/",
"build-cp2k-using-intel-fortran-compiler-professional-edition/"])
options = self.configure_common()
extrainc = ''
if self.modincpath:
extrainc = '-I%s' % self.modincpath
options.update({
# -Vaxlib : older options
'FREE': '-fpp -free',
# SAFE = -assume protect_parens -fp-model precise -ftz # causes problems, so don't use this
'SAFE': '-assume protect_parens -no-unroll-aggressive',
'INCFLAGS': '$(DFLAGS) -I$(INTEL_INC) -I$(INTEL_INCF) %s' % extrainc,
'LDFLAGS': '$(INCFLAGS) -i-static',
'OBJECTS_ARCHITECTURE': 'machine_intel.o',
})
options['DFLAGS'] += ' -D__INTEL'
optarch = ''
if self.toolchain.options['optarch']:
optarch = '-xHOST'
options['FCFLAGSOPT'] += ' $(INCFLAGS) %s -heap-arrays 64' % optarch
options['FCFLAGSOPT2'] += ' $(INCFLAGS) %s -heap-arrays 64' % optarch
ifortver = LooseVersion(get_software_version('ifort'))
failmsg = "CP2K won't build correctly with the Intel %%s compilers prior to %%s, see %s" % intelurl
if ifortver >= LooseVersion("2011") and ifortver < LooseVersion("2012"):
# don't allow using Intel compiler 2011 prior to release 8, because of known issue (see Intel URL)
if ifortver >= LooseVersion("2011.8"):
# add additional make instructions to Makefile
self.make_instructions += "et_coupling.o: et_coupling.F\n\t$(FC) -c $(FCFLAGS2) $<\n"
self.make_instructions += "qs_vxc_atom.o: qs_vxc_atom.F\n\t$(FC) -c $(FCFLAGS2) $<\n"
else:
raise EasyBuildError(failmsg, "v12", "v2011.8")
elif ifortver >= LooseVersion("11"):
if LooseVersion(get_software_version('ifort')) >= LooseVersion("11.1.072"):
self.make_instructions += "qs_vxc_atom.o: qs_vxc_atom.F\n\t$(FC) -c $(FCFLAGS2) $<\n"
else:
raise EasyBuildError(failmsg, "v11", "v11.1.072")
else:
raise EasyBuildError("Intel compilers version %s not supported yet.", ifortver)
return options
def configure_GCC_based(self):
"""Configure for GCC based toolchains"""
options = self.configure_common()
options.update({
# need this to prevent "Unterminated character constant beginning" errors
'FREE': '-ffree-form -ffree-line-length-none',
'LDFLAGS': '$(FCFLAGS)',
'OBJECTS_ARCHITECTURE': 'machine_gfortran.o',
})
options['DFLAGS'] += ' -D__GFORTRAN'
optarch = ''
if self.toolchain.options['optarch']:
optarch = '-march=native'
options['FCFLAGSOPT'] += ' $(DFLAGS) $(CFLAGS) %s -fmax-stack-var-size=32768' % optarch
options['FCFLAGSOPT2'] += ' $(DFLAGS) $(CFLAGS) %s' % optarch
return options
def configure_ACML(self, options):
"""Configure for AMD Math Core Library (ACML)"""
openmp_suffix = ''
if self.openmp:
openmp_suffix = '_mp'
options['ACML_INC'] = '%s/gfortran64%s/include' % (get_software_root('ACML'), openmp_suffix)
options['CFLAGS'] += ' -I$(ACML_INC) -I$(FFTW_INC)'
options['DFLAGS'] += ' -D__FFTACML'
blas = os.getenv('LIBBLAS', '')
blas = blas.replace('gfortran64', 'gfortran64%s' % openmp_suffix)
options['LIBS'] += ' %s %s %s' % (self.libsmm, os.getenv('LIBSCALAPACK', ''), blas)
return options
def configure_BLAS_lib(self, options):
"""Configure for BLAS library."""
options['LIBS'] += ' %s %s' % (self.libsmm, os.getenv('LIBBLAS', ''))
return options
def configure_MKL(self, options):
"""Configure for Intel Math Kernel Library (MKL)"""
options.update({
'INTEL_INC': '$(MKLROOT)/include',
})
options['DFLAGS'] += ' -D__FFTW3'
extra = ''
if self.modincpath:
extra = '-I%s' % self.modincpath
options['CFLAGS'] += ' -I$(INTEL_INC) %s $(FPIC) $(DEBUG)' % extra
options['LIBS'] += ' %s %s' % (self.libsmm, os.getenv('LIBSCALAPACK', ''))
# only use Intel FFTW wrappers if FFTW is not loaded
if not get_software_root('FFTW'):
options.update({
'INTEL_INCF': '$(INTEL_INC)/fftw',
})
options['DFLAGS'] += ' -D__FFTMKL'
options['CFLAGS'] += ' -I$(INTEL_INCF)'
options['LIBS'] = '%s %s' % (os.getenv('LIBFFT', ''), options['LIBS'])
return options
def configure_FFTW3(self, options):
"""Configure for FFTW3"""
options.update({
'FFTW_INC': os.getenv('FFT_INC_DIR', ''), # GCC
'FFTW3INC': os.getenv('FFT_INC_DIR', ''), # Intel
'FFTW3LIB': os.getenv('FFT_LIB_DIR', ''), # Intel
})
options['DFLAGS'] += ' -D__FFTW3'
options['LIBS'] += ' -L%s %s' % (os.getenv('FFT_LIB_DIR', '.'), os.getenv('LIBFFT', ''))
return options
def configure_LAPACK(self, options):
"""Configure for LAPACK library"""
options['LIBS'] += ' %s' % os.getenv('LIBLAPACK_MT', '')
return options
def configure_ScaLAPACK(self, options):
"""Configure for ScaLAPACK library"""
options['LIBS'] += ' %s' % os.getenv('LIBSCALAPACK', '')
return options
def build_step(self):
"""Start the actual build
- go into makefiles dir
- patch Makefile
-build_and_install
"""
makefiles = os.path.join(self.cfg['start_dir'], 'makefiles')
try:
os.chdir(makefiles)
except OSError, err:
raise EasyBuildError("Can't change to makefiles dir %s: %s", makefiles, err)
# modify makefile for parallel build
parallel = self.cfg['parallel']
if parallel:
try:
for line in fileinput.input('Makefile', inplace=1, backup='.orig.patchictce'):
line = re.sub(r"^PMAKE\s*=.*$", "PMAKE\t= $(SMAKE) -j %s" % parallel, line)
sys.stdout.write(line)
except IOError, err:
raise EasyBuildError("Can't modify/write Makefile in %s: %s", makefiles, err)
# update make options with MAKE
self.cfg.update('buildopts', 'MAKE="make -j %s" all' % self.cfg['parallel'])
# update make options with ARCH and VERSION
self.cfg.update('buildopts', 'ARCH=%s VERSION=%s' % (self.typearch, self.cfg['type']))
cmd = "make %s" % self.cfg['buildopts']
# clean first
run_cmd(cmd + " clean", log_all=True, simple=True, log_output=True)
#build_and_install
run_cmd(cmd, log_all=True, simple=True, log_output=True)
def test_step(self):
"""Run regression test."""
if self.cfg['runtest']:
# change to root of build dir
try:
os.chdir(self.builddir)
except OSError, err:
raise EasyBuildError("Failed to change to %s: %s", self.builddir, err)
# use regression test reference output if available
# try and find an unpacked directory that starts with 'LAST-'
regtest_refdir = None
for d in os.listdir(self.builddir):
if d.startswith("LAST-"):
regtest_refdir = d
break
# location of do_regtest script
cfg_fn = "cp2k_regtest.cfg"
regtest_script = os.path.join(self.cfg['start_dir'], 'tools', 'regtesting', 'do_regtest')
regtest_cmd = "%s -nosvn -nobuild -config %s" % (regtest_script, cfg_fn)
# older version of CP2K
if not os.path.exists(regtest_script):
regtest_script = os.path.join(self.cfg['start_dir'], 'tools', 'do_regtest')
regtest_cmd = "%s -nocvs -quick -nocompile -config %s" % (regtest_script, cfg_fn)
# patch do_regtest so that reference output is used
if regtest_refdir:
self.log.info("Using reference output available in %s" % regtest_refdir)
try:
for line in fileinput.input(regtest_script, inplace=1, backup='.orig.refout'):
line = re.sub(r"^(dir_last\s*=\${dir_base})/.*$", r"\1/%s" % regtest_refdir, line)
sys.stdout.write(line)
except IOError, err:
raise EasyBuildError("Failed to modify '%s': %s", regtest_script, err)
else:
self.log.info("No reference output found for regression test, just continuing without it...")
test_core_cnt = min(self.cfg.get('parallel', sys.maxint), 2)
if get_avail_core_count() < test_core_cnt:
raise EasyBuildError("Cannot run MPI tests as not enough cores (< %s) are available", test_core_cnt)
else:
self.log.info("Using %s cores for the MPI tests" % test_core_cnt)
# configure regression test
cfg_txt = '\n'.join([
'FORT_C_NAME="%(f90)s"',
'dir_base=%(base)s',
'cp2k_version=%(cp2k_version)s',
'dir_triplet=%(triplet)s',
'export ARCH=${dir_triplet}',
'cp2k_dir=%(cp2k_dir)s',
'leakcheck="YES"',
'maxtasks=%(maxtasks)s',
'cp2k_run_prefix="%(mpicmd_prefix)s"',
]) % {
'f90': os.getenv('F90'),
'base': os.path.dirname(os.path.normpath(self.cfg['start_dir'])),
'cp2k_version': self.cfg['type'],
'triplet': self.typearch,
'cp2k_dir': os.path.basename(os.path.normpath(self.cfg['start_dir'])),
'maxtasks': self.cfg['maxtasks'],
'mpicmd_prefix': self.toolchain.mpi_cmd_for('', test_core_cnt),
}
try:
f = open(cfg_fn, "w")
f.write(cfg_txt)
f.close()
except IOError, err:
raise EasyBuildError("Failed to create config file %s: %s", cfg_fn, err)
self.log.debug("Contents of %s: %s" % (cfg_fn, cfg_txt))
# run regression test
(regtest_output, ec) = run_cmd(regtest_cmd, log_all=True, simple=False, log_output=True)
if ec == 0:
self.log.info("Regression test output:\n%s" % regtest_output)
else:
raise EasyBuildError("Regression test failed (non-zero exit code): %s", regtest_output)
# pattern to search for regression test summary
re_pattern = "number\s+of\s+%s\s+tests\s+(?P<cnt>[0-9]+)"
# find total number of tests
regexp = re.compile(re_pattern % "", re.M | re.I)
res = regexp.search(regtest_output)
tot_cnt = None
if res:
tot_cnt = int(res.group('cnt'))
else:
raise EasyBuildError("Finding total number of tests in regression test summary failed")
# function to report on regtest results
def test_report(test_result):
"""Report on tests with given result."""
postmsg = ''
test_result = test_result.upper()
regexp = re.compile(re_pattern % test_result, re.M | re.I)
cnt = None
res = regexp.search(regtest_output)
if not res:
raise EasyBuildError("Finding number of %s tests in regression test summary failed",
test_result.lower())
else:
cnt = int(res.group('cnt'))
logmsg = "Regression test reported %s / %s %s tests"
logmsg_values = (cnt, tot_cnt, test_result.lower())
# failed tests indicate problem with installation
# wrong tests are only an issue when there are excessively many
if (test_result == "FAILED" and cnt > 0) or (test_result == "WRONG" and (cnt / tot_cnt) > 0.1):
if self.cfg['ignore_regtest_fails']:
self.log.warning(logmsg, *logmsg_values)
self.log.info("Ignoring failures in regression test, as requested.")
else:
raise EasyBuildError(logmsg, *logmsg_values)
elif test_result == "CORRECT" or cnt == 0:
self.log.info(logmsg, *logmsg_values)
else:
self.log.warning(logmsg, *logmsg_values)
return postmsg
# number of failed/wrong tests, will report error if count is positive
self.postmsg += test_report("FAILED")
self.postmsg += test_report("WRONG")
# number of new tests, will be high if a non-suitable regtest reference was used
# will report error if count is positive (is that what we want?)
self.postmsg += test_report("NEW")
# number of correct tests: just report
test_report("CORRECT")
def install_step(self):
"""Install built CP2K
- copy from exe to bin
- copy tests
"""
# copy executables
targetdir = os.path.join(self.installdir, 'bin')
exedir = os.path.join(self.cfg['start_dir'], 'exe/%s' % self.typearch)
try:
if not os.path.exists(targetdir):
os.makedirs(targetdir)
os.chdir(exedir)
for exefile in os.listdir(exedir):
if os.path.isfile(exefile):
shutil.copy2(exefile, targetdir)
except OSError, err:
raise EasyBuildError("Copying executables from %s to bin dir %s failed: %s", exedir, targetdir, err)
# copy tests
srctests = os.path.join(self.cfg['start_dir'], 'tests')
targetdir = os.path.join(self.installdir, 'tests')
if os.path.exists(targetdir):
self.log.info("Won't copy tests. Destination directory %s already exists" % targetdir)
else:
try:
shutil.copytree(srctests, targetdir)
except:
raise EasyBuildError("Copying tests from %s to %s failed", srctests, targetdir)
# copy regression test results
if self.cfg['runtest']:
try:
testdir = os.path.dirname(os.path.normpath(self.cfg['start_dir']))
for d in os.listdir(testdir):
if d.startswith('TEST-%s-%s' % (self.typearch, self.cfg['type'])):
path = os.path.join(testdir, d)
target = os.path.join(self.installdir, d)
shutil.copytree(path, target)
self.log.info("Regression test results dir %s copied to %s" % (d, self.installdir))
break
except (OSError, IOError), err:
raise EasyBuildError("Failed to copy regression test results dir: %s", err)
def sanity_check_step(self):
"""Custom sanity check for CP2K"""
cp2k_type = self.cfg['type']
custom_paths = {
'files': ["bin/%s.%s" % (x, cp2k_type) for x in ["cp2k", "cp2k_shell"]],
'dirs': ["tests"]
}
super(EB_CP2K, self).sanity_check_step(custom_paths=custom_paths)
|
ULHPC/modules
|
easybuild/easybuild-easyblocks/easybuild/easyblocks/c/cp2k.py
|
Python
|
mit
| 31,859
|
# encoding: UTF-8
from vnshzd import ShzdApi
|
mumuwoyou/vnpy-master
|
vnpy/api/shzd/__init__.py
|
Python
|
mit
| 45
|
"""Implements the upcoming event widget."""
|
yongwen/makahiki
|
makahiki/apps/widgets/upcoming_events/__init__.py
|
Python
|
mit
| 44
|
# xml.etree test for cElementTree
import doctest, sys
from test import test_support
from xml.etree import cElementTree as ET
SAMPLE_XML = """
<body>
<tag>text</tag>
<tag />
<section>
<tag>subtext</tag>
</section>
</body>
"""
SAMPLE_XML_NS = """
<body xmlns="http://effbot.org/ns">
<tag>text</tag>
<tag />
<section>
<tag>subtext</tag>
</section>
</body>
"""
def sanity():
"""
Import sanity.
>>> from xml.etree import cElementTree
"""
def check_method(method):
if not callable(method):
print method, "not callable"
def serialize(ET, elem, encoding=None):
import StringIO
file = StringIO.StringIO()
tree = ET.ElementTree(elem)
if encoding:
tree.write(file, encoding)
else:
tree.write(file)
return file.getvalue()
def summarize(elem):
return elem.tag
def summarize_list(seq):
return map(summarize, seq)
def interface():
"""
Test element tree interface.
>>> element = ET.Element("tag", key="value")
>>> tree = ET.ElementTree(element)
Make sure all standard element methods exist.
>>> check_method(element.append)
>>> check_method(element.insert)
>>> check_method(element.remove)
>>> check_method(element.getchildren)
>>> check_method(element.find)
>>> check_method(element.findall)
>>> check_method(element.findtext)
>>> check_method(element.clear)
>>> check_method(element.get)
>>> check_method(element.set)
>>> check_method(element.keys)
>>> check_method(element.items)
>>> check_method(element.getiterator)
Basic method sanity checks.
>>> serialize(ET, element) # 1
'<tag key="value" />'
>>> subelement = ET.Element("subtag")
>>> element.append(subelement)
>>> serialize(ET, element) # 2
'<tag key="value"><subtag /></tag>'
>>> element.insert(0, subelement)
>>> serialize(ET, element) # 3
'<tag key="value"><subtag /><subtag /></tag>'
>>> element.remove(subelement)
>>> serialize(ET, element) # 4
'<tag key="value"><subtag /></tag>'
>>> element.remove(subelement)
>>> serialize(ET, element) # 5
'<tag key="value" />'
>>> element.remove(subelement)
Traceback (most recent call last):
ValueError: list.remove(x): x not in list
>>> serialize(ET, element) # 6
'<tag key="value" />'
"""
def find():
"""
Test find methods (including xpath syntax).
>>> elem = ET.XML(SAMPLE_XML)
>>> elem.find("tag").tag
'tag'
>>> ET.ElementTree(elem).find("tag").tag
'tag'
>>> elem.find("section/tag").tag
'tag'
>>> ET.ElementTree(elem).find("section/tag").tag
'tag'
>>> elem.findtext("tag")
'text'
>>> elem.findtext("tog")
>>> elem.findtext("tog", "default")
'default'
>>> ET.ElementTree(elem).findtext("tag")
'text'
>>> elem.findtext("section/tag")
'subtext'
>>> ET.ElementTree(elem).findtext("section/tag")
'subtext'
>>> summarize_list(elem.findall("tag"))
['tag', 'tag']
>>> summarize_list(elem.findall("*"))
['tag', 'tag', 'section']
>>> summarize_list(elem.findall(".//tag"))
['tag', 'tag', 'tag']
>>> summarize_list(elem.findall("section/tag"))
['tag']
>>> summarize_list(elem.findall("section//tag"))
['tag']
>>> summarize_list(elem.findall("section/*"))
['tag']
>>> summarize_list(elem.findall("section//*"))
['tag']
>>> summarize_list(elem.findall("section/.//*"))
['tag']
>>> summarize_list(elem.findall("*/*"))
['tag']
>>> summarize_list(elem.findall("*//*"))
['tag']
>>> summarize_list(elem.findall("*/tag"))
['tag']
>>> summarize_list(elem.findall("*/./tag"))
['tag']
>>> summarize_list(elem.findall("./tag"))
['tag', 'tag']
>>> summarize_list(elem.findall(".//tag"))
['tag', 'tag', 'tag']
>>> summarize_list(elem.findall("././tag"))
['tag', 'tag']
>>> summarize_list(ET.ElementTree(elem).findall("/tag"))
['tag', 'tag']
>>> summarize_list(ET.ElementTree(elem).findall("./tag"))
['tag', 'tag']
>>> elem = ET.XML(SAMPLE_XML_NS)
>>> summarize_list(elem.findall("tag"))
[]
>>> summarize_list(elem.findall("{http://effbot.org/ns}tag"))
['{http://effbot.org/ns}tag', '{http://effbot.org/ns}tag']
>>> summarize_list(elem.findall(".//{http://effbot.org/ns}tag"))
['{http://effbot.org/ns}tag', '{http://effbot.org/ns}tag', '{http://effbot.org/ns}tag']
"""
def parseliteral():
r"""
>>> element = ET.XML("<html><body>text</body></html>")
>>> ET.ElementTree(element).write(sys.stdout)
<html><body>text</body></html>
>>> element = ET.fromstring("<html><body>text</body></html>")
>>> ET.ElementTree(element).write(sys.stdout)
<html><body>text</body></html>
>>> print ET.tostring(element)
<html><body>text</body></html>
>>> print ET.tostring(element, "ascii")
<?xml version='1.0' encoding='ascii'?>
<html><body>text</body></html>
>>> _, ids = ET.XMLID("<html><body>text</body></html>")
>>> len(ids)
0
>>> _, ids = ET.XMLID("<html><body id='body'>text</body></html>")
>>> len(ids)
1
>>> ids["body"].tag
'body'
"""
def check_encoding(encoding):
"""
>>> check_encoding("ascii")
>>> check_encoding("us-ascii")
>>> check_encoding("iso-8859-1")
>>> check_encoding("iso-8859-15")
>>> check_encoding("cp437")
>>> #check_encoding("mac-roman")
"""
ET.XML(
"<?xml version='1.0' encoding='%s'?><xml />" % encoding
)
def bug_1534630():
"""
>>> bob = ET.TreeBuilder()
>>> e = bob.data("data")
>>> e = bob.start("tag", {})
>>> e = bob.end("tag")
>>> e = bob.close()
>>> serialize(ET, e)
'<tag />'
"""
def test_main():
from test import test_xml_etree_c
test_support.run_doctest(test_xml_etree_c, verbosity=True)
if __name__ == '__main__':
test_main()
|
zephyrplugins/zephyr
|
zephyr.plugin.jython/jython2.5.2rc3/Lib/test/test_xml_etree_c.py
|
Python
|
epl-1.0
| 5,936
|
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
import os,sys,imp,types,re
from waflib.Tools import ccroot
from waflib import Utils,Configure
from waflib.Logs import debug
cxx_compiler={'win32':['msvc','g++','clang++'],'cygwin':['g++'],'darwin':['clang++','g++'],'aix':['xlc++','g++','clang++'],'linux':['g++','clang++','icpc'],'sunos':['sunc++','g++'],'irix':['g++'],'hpux':['g++'],'osf1V':['g++'],'gnu':['g++','clang++'],'java':['g++','msvc','clang++','icpc'],'default':['g++','clang++']}
def default_compilers():
build_platform=Utils.unversioned_sys_platform()
possible_compiler_list=cxx_compiler.get(build_platform,cxx_compiler['default'])
return' '.join(possible_compiler_list)
def configure(conf):
try:test_for_compiler=conf.options.check_cxx_compiler or default_compilers()
except AttributeError:conf.fatal("Add options(opt): opt.load('compiler_cxx')")
for compiler in re.split('[ ,]+',test_for_compiler):
conf.env.stash()
conf.start_msg('Checking for %r (C++ compiler)'%compiler)
try:
conf.load(compiler)
except conf.errors.ConfigurationError ,e:
conf.env.revert()
conf.end_msg(False)
debug('compiler_cxx: %r'%e)
else:
if conf.env['CXX']:
conf.end_msg(conf.env.get_flat('CXX'))
conf.env['COMPILER_CXX']=compiler
break
conf.end_msg(False)
else:
conf.fatal('could not configure a C++ compiler!')
def options(opt):
test_for_compiler=default_compilers()
opt.load_special_tools('cxx_*.py')
cxx_compiler_opts=opt.add_option_group('Configuration options')
cxx_compiler_opts.add_option('--check-cxx-compiler',default=None,help='list of C++ compilers to try [%s]'%test_for_compiler,dest="check_cxx_compiler")
for x in test_for_compiler.split():
opt.load('%s'%x)
|
asljivo1/802.11ah-ns3
|
ns-3/.waf-1.8.12-f00e5b53f6bbeab1384a38c9cc5d51f7/waflib/Tools/compiler_cxx.py
|
Python
|
gpl-2.0
| 1,790
|
#!/usr/bin/python
# (c) 2012, Mark Theunissen <mark.theunissen@gmail.com>
# Sponsored by Four Kitchens http://fourkitchens.com.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: mysql_user
short_description: Adds or removes a user from a MySQL database.
description:
- Adds or removes a user from a MySQL database.
version_added: "0.6"
options:
name:
description:
- name of the user (role) to add or remove
required: true
password:
description:
- set the user's password.
required: false
default: null
encrypted:
description:
- Indicate that the 'password' field is a `mysql_native_password` hash
required: false
choices: [ "yes", "no" ]
default: "no"
version_added: "2.0"
host:
description:
- the 'host' part of the MySQL username
required: false
default: localhost
host_all:
description:
- override the host option, making ansible apply changes to
all hostnames for a given user. This option cannot be used
when creating users
required: false
choices: [ "yes", "no" ]
default: "no"
version_added: "2.1"
priv:
description:
- "MySQL privileges string in the format: C(db.table:priv1,priv2)"
required: false
default: null
append_privs:
description:
- Append the privileges defined by priv to the existing ones for this
user instead of overwriting existing ones.
required: false
choices: [ "yes", "no" ]
default: "no"
version_added: "1.4"
sql_log_bin:
description:
- Whether binary logging should be enabled or disabled for the connection.
required: false
choices: ["yes", "no" ]
default: "yes"
version_added: "2.1"
state:
description:
- Whether the user should exist. When C(absent), removes
the user.
required: false
default: present
choices: [ "present", "absent" ]
check_implicit_admin:
description:
- Check if mysql allows login as root/nopassword before trying supplied credentials.
required: false
choices: [ "yes", "no" ]
default: "no"
version_added: "1.3"
update_password:
required: false
default: always
choices: ['always', 'on_create']
version_added: "2.0"
description:
- C(always) will update passwords if they differ. C(on_create) will only set the password for newly created users.
notes:
- "MySQL server installs with default login_user of 'root' and no password. To secure this user
as part of an idempotent playbook, you must create at least two tasks: the first must change the root user's password,
without providing any login_user/login_password details. The second must drop a ~/.my.cnf file containing
the new root credentials. Subsequent runs of the playbook will then succeed by reading the new credentials from
the file."
- Currently, there is only support for the `mysql_native_password` encrypted password hash module.
author: "Jonathan Mainguy (@Jmainguy)"
extends_documentation_fragment: mysql
'''
EXAMPLES = """
# Removes anonymous user account for localhost
- mysql_user:
name: ''
host: localhost
state: absent
# Removes all anonymous user accounts
- mysql_user:
name: ''
host_all: yes
state: absent
# Create database user with name 'bob' and password '12345' with all database privileges
- mysql_user:
name: bob
password: 12345
priv: '*.*:ALL'
state: present
# Create database user with name 'bob' and previously hashed mysql native password '*EE0D72C1085C46C5278932678FBE2C6A782821B4' with all database privileges
- mysql_user:
name: bob
password: '*EE0D72C1085C46C5278932678FBE2C6A782821B4'
encrypted: yes
priv: '*.*:ALL'
state: present
# Creates database user 'bob' and password '12345' with all database privileges and 'WITH GRANT OPTION'
- mysql_user:
name: bob
password: 12345
priv: '*.*:ALL,GRANT'
state: present
# Modify user Bob to require SSL connections. Note that REQUIRESSL is a special privilege that should only apply to *.* by itself.
- mysql_user:
name: bob
append_privs: true
priv: '*.*:REQUIRESSL'
state: present
# Ensure no user named 'sally'@'localhost' exists, also passing in the auth credentials.
- mysql_user:
login_user: root
login_password: 123456
name: sally
state: absent
# Ensure no user named 'sally' exists at all
- mysql_user:
name: sally
host_all: yes
state: absent
# Specify grants composed of more than one word
- mysql_user:
name: replication
password: 12345
priv: "*.*:REPLICATION CLIENT"
state: present
# Revoke all privileges for user 'bob' and password '12345'
- mysql_user:
name: bob
password: 12345
priv: "*.*:USAGE"
state: present
# Example privileges string format
mydb.*:INSERT,UPDATE/anotherdb.*:SELECT/yetanotherdb.*:ALL
# Example using login_unix_socket to connect to server
- mysql_user:
name: root
password: abc123
login_unix_socket: /var/run/mysqld/mysqld.sock
# Example of skipping binary logging while adding user 'bob'
- mysql_user:
name: bob
password: 12345
priv: "*.*:USAGE"
state: present
sql_log_bin: no
# Example .my.cnf file for setting the root password
[client]
user=root
password=n<_665{vS43y
"""
import getpass
import tempfile
import re
import string
try:
import MySQLdb
except ImportError:
mysqldb_found = False
else:
mysqldb_found = True
from ansible.module_utils.six import iteritems
VALID_PRIVS = frozenset(('CREATE', 'DROP', 'GRANT', 'GRANT OPTION',
'LOCK TABLES', 'REFERENCES', 'EVENT', 'ALTER',
'DELETE', 'INDEX', 'INSERT', 'SELECT', 'UPDATE',
'CREATE TEMPORARY TABLES', 'TRIGGER', 'CREATE VIEW',
'SHOW VIEW', 'ALTER ROUTINE', 'CREATE ROUTINE',
'EXECUTE', 'FILE', 'CREATE TABLESPACE', 'CREATE USER',
'PROCESS', 'PROXY', 'RELOAD', 'REPLICATION CLIENT',
'REPLICATION SLAVE', 'SHOW DATABASES', 'SHUTDOWN',
'SUPER', 'ALL', 'ALL PRIVILEGES', 'USAGE', 'REQUIRESSL'))
class InvalidPrivsError(Exception):
pass
# ===========================================
# MySQL module specific support methods.
#
# User Authentication Management was change in MySQL 5.7
# This is a generic check for if the server version is less than version 5.7
def server_version_check(cursor):
cursor.execute("SELECT VERSION()")
result = cursor.fetchone()
version_str = result[0]
version = version_str.split('.')
# Currently we have no facility to handle new-style password update on
# mariadb and the old-style update continues to work
if 'mariadb' in version_str.lower():
return True
if (int(version[0]) <= 5 and int(version[1]) < 7):
return True
else:
return False
def get_mode(cursor):
cursor.execute('SELECT @@GLOBAL.sql_mode')
result = cursor.fetchone()
mode_str = result[0]
if 'ANSI' in mode_str:
mode = 'ANSI'
else:
mode = 'NOTANSI'
return mode
def user_exists(cursor, user, host, host_all):
if host_all:
cursor.execute("SELECT count(*) FROM user WHERE user = %s", ([user]))
else:
cursor.execute("SELECT count(*) FROM user WHERE user = %s AND host = %s", (user,host))
count = cursor.fetchone()
return count[0] > 0
def user_add(cursor, user, host, host_all, password, encrypted, new_priv, check_mode):
# we cannot create users without a proper hostname
if host_all:
return False
if check_mode:
return True
if password and encrypted:
cursor.execute("CREATE USER %s@%s IDENTIFIED BY PASSWORD %s", (user,host,password))
elif password and not encrypted:
cursor.execute("CREATE USER %s@%s IDENTIFIED BY %s", (user,host,password))
else:
cursor.execute("CREATE USER %s@%s", (user,host))
if new_priv is not None:
for db_table, priv in iteritems(new_priv):
privileges_grant(cursor, user,host,db_table,priv)
return True
def is_hash(password):
ishash = False
if len(password) == 41 and password[0] == '*':
if frozenset(password[1:]).issubset(string.hexdigits):
ishash = True
return ishash
def user_mod(cursor, user, host, host_all, password, encrypted, new_priv, append_privs, module):
changed = False
grant_option = False
if host_all:
hostnames = user_get_hostnames(cursor, [user])
else:
hostnames = [host]
for host in hostnames:
# Handle clear text and hashed passwords.
if bool(password):
# Determine what user management method server uses
old_user_mgmt = server_version_check(cursor)
if old_user_mgmt:
cursor.execute("SELECT password FROM user WHERE user = %s AND host = %s", (user,host))
else:
cursor.execute("SELECT authentication_string FROM user WHERE user = %s AND host = %s", (user,host))
current_pass_hash = cursor.fetchone()
if encrypted:
encrypted_string = (password)
if is_hash(password):
if current_pass_hash[0] != encrypted_string:
if module.check_mode:
return True
if old_user_mgmt:
cursor.execute("SET PASSWORD FOR %s@%s = %s", (user, host, password))
else:
cursor.execute("ALTER USER %s@%s IDENTIFIED WITH mysql_native_password AS %s", (user, host, password))
changed = True
else:
module.fail_json(msg="encrypted was specified however it does not appear to be a valid hash expecting: *SHA1(SHA1(your_password))")
else:
if old_user_mgmt:
cursor.execute("SELECT PASSWORD(%s)", (password,))
else:
cursor.execute("SELECT CONCAT('*', UCASE(SHA1(UNHEX(SHA1(%s)))))", (password,))
new_pass_hash = cursor.fetchone()
if current_pass_hash[0] != new_pass_hash[0]:
if module.check_mode:
return True
if old_user_mgmt:
cursor.execute("SET PASSWORD FOR %s@%s = PASSWORD(%s)", (user, host, password))
else:
cursor.execute("ALTER USER %s@%s IDENTIFIED WITH mysql_native_password BY %s", (user, host, password))
changed = True
# Handle privileges
if new_priv is not None:
curr_priv = privileges_get(cursor, user,host)
# If the user has privileges on a db.table that doesn't appear at all in
# the new specification, then revoke all privileges on it.
for db_table, priv in iteritems(curr_priv):
# If the user has the GRANT OPTION on a db.table, revoke it first.
if "GRANT" in priv:
grant_option = True
if db_table not in new_priv:
if user != "root" and "PROXY" not in priv and not append_privs:
if module.check_mode:
return True
privileges_revoke(cursor, user,host,db_table,priv,grant_option)
changed = True
# If the user doesn't currently have any privileges on a db.table, then
# we can perform a straight grant operation.
for db_table, priv in iteritems(new_priv):
if db_table not in curr_priv:
if module.check_mode:
return True
privileges_grant(cursor, user,host,db_table,priv)
changed = True
# If the db.table specification exists in both the user's current privileges
# and in the new privileges, then we need to see if there's a difference.
db_table_intersect = set(new_priv.keys()) & set(curr_priv.keys())
for db_table in db_table_intersect:
priv_diff = set(new_priv[db_table]) ^ set(curr_priv[db_table])
if (len(priv_diff) > 0):
if module.check_mode:
return True
if not append_privs:
privileges_revoke(cursor, user,host,db_table,curr_priv[db_table],grant_option)
privileges_grant(cursor, user,host,db_table,new_priv[db_table])
changed = True
return changed
def user_delete(cursor, user, host, host_all, check_mode):
if check_mode:
return True
if host_all:
hostnames = user_get_hostnames(cursor, [user])
for hostname in hostnames:
cursor.execute("DROP USER %s@%s", (user, hostname))
else:
cursor.execute("DROP USER %s@%s", (user, host))
return True
def user_get_hostnames(cursor, user):
cursor.execute("SELECT Host FROM mysql.user WHERE user = %s", user)
hostnames_raw = cursor.fetchall()
hostnames = []
for hostname_raw in hostnames_raw:
hostnames.append(hostname_raw[0])
return hostnames
def privileges_get(cursor, user,host):
""" MySQL doesn't have a better method of getting privileges aside from the
SHOW GRANTS query syntax, which requires us to then parse the returned string.
Here's an example of the string that is returned from MySQL:
GRANT USAGE ON *.* TO 'user'@'localhost' IDENTIFIED BY 'pass';
This function makes the query and returns a dictionary containing the results.
The dictionary format is the same as that returned by privileges_unpack() below.
"""
output = {}
cursor.execute("SHOW GRANTS FOR %s@%s", (user, host))
grants = cursor.fetchall()
def pick(x):
if x == 'ALL PRIVILEGES':
return 'ALL'
else:
return x
for grant in grants:
res = re.match("GRANT (.+) ON (.+) TO '.*'@'.+'( IDENTIFIED BY PASSWORD '.+')? ?(.*)", grant[0])
if res is None:
raise InvalidPrivsError('unable to parse the MySQL grant string: %s' % grant[0])
privileges = res.group(1).split(", ")
privileges = [ pick(x) for x in privileges]
if "WITH GRANT OPTION" in res.group(4):
privileges.append('GRANT')
if "REQUIRE SSL" in res.group(4):
privileges.append('REQUIRESSL')
db = res.group(2)
output[db] = privileges
return output
def privileges_unpack(priv, mode):
""" Take a privileges string, typically passed as a parameter, and unserialize
it into a dictionary, the same format as privileges_get() above. We have this
custom format to avoid using YAML/JSON strings inside YAML playbooks. Example
of a privileges string:
mydb.*:INSERT,UPDATE/anotherdb.*:SELECT/yetanother.*:ALL
The privilege USAGE stands for no privileges, so we add that in on *.* if it's
not specified in the string, as MySQL will always provide this by default.
"""
if mode == 'ANSI':
quote = '"'
else:
quote = '`'
output = {}
privs = []
for item in priv.strip().split('/'):
pieces = item.strip().split(':')
dbpriv = pieces[0].rsplit(".", 1)
# Do not escape if privilege is for database or table, i.e.
# neither quote *. nor .*
for i, side in enumerate(dbpriv):
if side.strip('`') != '*':
dbpriv[i] = '%s%s%s' % (quote, side.strip('`'), quote)
pieces[0] = '.'.join(dbpriv)
if '(' in pieces[1]:
output[pieces[0]] = re.split(r',\s*(?=[^)]*(?:\(|$))', pieces[1].upper())
for i in output[pieces[0]]:
privs.append(re.sub(r'\(.*\)','',i))
else:
output[pieces[0]] = pieces[1].upper().split(',')
privs = output[pieces[0]]
new_privs = frozenset(privs)
if not new_privs.issubset(VALID_PRIVS):
raise InvalidPrivsError('Invalid privileges specified: %s' % new_privs.difference(VALID_PRIVS))
if '*.*' not in output:
output['*.*'] = ['USAGE']
# if we are only specifying something like REQUIRESSL and/or GRANT (=WITH GRANT OPTION) in *.*
# we still need to add USAGE as a privilege to avoid syntax errors
if 'REQUIRESSL' in priv and not set(output['*.*']).difference(set(['GRANT', 'REQUIRESSL'])):
output['*.*'].append('USAGE')
return output
def privileges_revoke(cursor, user,host,db_table,priv,grant_option):
# Escape '%' since mysql db.execute() uses a format string
db_table = db_table.replace('%', '%%')
if grant_option:
query = ["REVOKE GRANT OPTION ON %s" % db_table]
query.append("FROM %s@%s")
query = ' '.join(query)
cursor.execute(query, (user, host))
priv_string = ",".join([p for p in priv if p not in ('GRANT', 'REQUIRESSL')])
query = ["REVOKE %s ON %s" % (priv_string, db_table)]
query.append("FROM %s@%s")
query = ' '.join(query)
cursor.execute(query, (user, host))
def privileges_grant(cursor, user,host,db_table,priv):
# Escape '%' since mysql db.execute uses a format string and the
# specification of db and table often use a % (SQL wildcard)
db_table = db_table.replace('%', '%%')
priv_string = ",".join([p for p in priv if p not in ('GRANT', 'REQUIRESSL')])
query = ["GRANT %s ON %s" % (priv_string, db_table)]
query.append("TO %s@%s")
if 'REQUIRESSL' in priv:
query.append("REQUIRE SSL")
if 'GRANT' in priv:
query.append("WITH GRANT OPTION")
query = ' '.join(query)
cursor.execute(query, (user, host))
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec = dict(
login_user=dict(default=None),
login_password=dict(default=None, no_log=True),
login_host=dict(default="localhost"),
login_port=dict(default=3306, type='int'),
login_unix_socket=dict(default=None),
user=dict(required=True, aliases=['name']),
password=dict(default=None, no_log=True, type='str'),
encrypted=dict(default=False, type='bool'),
host=dict(default="localhost"),
host_all=dict(type="bool", default="no"),
state=dict(default="present", choices=["absent", "present"]),
priv=dict(default=None),
append_privs=dict(default=False, type='bool'),
check_implicit_admin=dict(default=False, type='bool'),
update_password=dict(default="always", choices=["always", "on_create"]),
connect_timeout=dict(default=30, type='int'),
config_file=dict(default="~/.my.cnf", type='path'),
sql_log_bin=dict(default=True, type='bool'),
ssl_cert=dict(default=None, type='path'),
ssl_key=dict(default=None, type='path'),
ssl_ca=dict(default=None, type='path'),
),
supports_check_mode=True
)
login_user = module.params["login_user"]
login_password = module.params["login_password"]
user = module.params["user"]
password = module.params["password"]
encrypted = module.boolean(module.params["encrypted"])
host = module.params["host"].lower()
host_all = module.params["host_all"]
state = module.params["state"]
priv = module.params["priv"]
check_implicit_admin = module.params['check_implicit_admin']
connect_timeout = module.params['connect_timeout']
config_file = module.params['config_file']
append_privs = module.boolean(module.params["append_privs"])
update_password = module.params['update_password']
ssl_cert = module.params["ssl_cert"]
ssl_key = module.params["ssl_key"]
ssl_ca = module.params["ssl_ca"]
db = 'mysql'
sql_log_bin = module.params["sql_log_bin"]
if not mysqldb_found:
module.fail_json(msg="the python mysqldb module is required")
cursor = None
try:
if check_implicit_admin:
try:
cursor = mysql_connect(module, 'root', '', config_file, ssl_cert, ssl_key, ssl_ca, db,
connect_timeout=connect_timeout)
except:
pass
if not cursor:
cursor = mysql_connect(module, login_user, login_password, config_file, ssl_cert, ssl_key, ssl_ca, db,
connect_timeout=connect_timeout)
except Exception:
e = get_exception()
module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or %s has the credentials. Exception message: %s" % (config_file, e))
if not sql_log_bin:
cursor.execute("SET SQL_LOG_BIN=0;")
if priv is not None:
try:
mode = get_mode(cursor)
except Exception:
e = get_exception()
module.fail_json(msg=str(e))
try:
priv = privileges_unpack(priv, mode)
except Exception:
e = get_exception()
module.fail_json(msg="invalid privileges string: %s" % str(e))
if state == "present":
if user_exists(cursor, user, host, host_all):
try:
if update_password == 'always':
changed = user_mod(cursor, user, host, host_all, password, encrypted, priv, append_privs, module)
else:
changed = user_mod(cursor, user, host, host_all, None, encrypted, priv, append_privs, module)
except (SQLParseError, InvalidPrivsError, MySQLdb.Error):
e = get_exception()
module.fail_json(msg=str(e))
else:
if host_all:
module.fail_json(msg="host_all parameter cannot be used when adding a user")
try:
changed = user_add(cursor, user, host, host_all, password, encrypted, priv, module.check_mode)
except (SQLParseError, InvalidPrivsError, MySQLdb.Error):
e = get_exception()
module.fail_json(msg=str(e))
elif state == "absent":
if user_exists(cursor, user, host, host_all):
changed = user_delete(cursor, user, host, host_all, module.check_mode)
else:
changed = False
module.exit_json(changed=changed, user=user)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.database import *
from ansible.module_utils.mysql import *
if __name__ == '__main__':
main()
|
camradal/ansible
|
lib/ansible/modules/database/mysql/mysql_user.py
|
Python
|
gpl-3.0
| 23,551
|
# -*- coding: utf-8 -*-
"""
Post processing.... take 1
Created on Thu Jan 17 14:32:06 2013
@author: ran110
"""
'''
import subprocess
subprocess.call(['pip', 'install', 'h5py'])
'''
import h5py, os
import numpy as np
import fnmatch
def findFiles(starting_dir='.', pattern='Temp*h5'):
'''look for files to process'''
matches = []
for root, dirnames, filenames in os.walk(starting_dir):
for filename in fnmatch.filter(filenames, pattern):
matches.append(os.path.join(root, filename))
return matches
def calcStats(filelist, outputfilename='results.h5'):
''' open up the hdf5 files and grab the 'data' sets,
in memory...
and then try to run some calcs '''
try:
outputfile = h5py.File(outputfilename, 'w')
bigdata = None
i=0
for currFile in filelist:
f = h5py.File(currFile, 'r')
dataset = f['/data']
mydata = dataset[...]
if bigdata is None:
bigdata = np.zeros([mydata.shape[0], len(filelist)])
bigdata[:,i]=np.squeeze(mydata)
i+=1
print np.std(bigdata, axis=1)
print np.average(bigdata, axis=1)
print np.min(bigdata, axis=1)
print np.max(bigdata, axis=1)
outputfile.create_dataset('std-dev', data=np.std(bigdata, axis=1))
outputfile.create_dataset('average', data=np.average(bigdata, axis=1))
outputfile.create_dataset('min', data=np.min(bigdata, axis=1))
outputfile.create_dataset('max', data=np.max(bigdata, axis=1))
outputfile.close()
print "outputfile created [ok]: %s" % outputfilename
except Exception, e:
print e.message
def main():
filelist = findFiles()
calcStats(filelist)
if __name__ == '__main__':
main()
|
victortey/VEGL-Portal
|
scripts/postproc-h5.py
|
Python
|
gpl-3.0
| 1,896
|
"""Support for AdGuard Home sensors."""
from __future__ import annotations
from datetime import timedelta
from adguardhome import AdGuardHome, AdGuardHomeConnectionError
from homeassistant.components.sensor import SensorEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import PERCENTAGE, TIME_MILLISECONDS
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import AdGuardHomeDeviceEntity
from .const import DATA_ADGUARD_CLIENT, DATA_ADGUARD_VERSION, DOMAIN
SCAN_INTERVAL = timedelta(seconds=300)
PARALLEL_UPDATES = 4
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up AdGuard Home sensor based on a config entry."""
adguard = hass.data[DOMAIN][entry.entry_id][DATA_ADGUARD_CLIENT]
try:
version = await adguard.version()
except AdGuardHomeConnectionError as exception:
raise PlatformNotReady from exception
hass.data[DOMAIN][entry.entry_id][DATA_ADGUARD_VERSION] = version
sensors = [
AdGuardHomeDNSQueriesSensor(adguard, entry),
AdGuardHomeBlockedFilteringSensor(adguard, entry),
AdGuardHomePercentageBlockedSensor(adguard, entry),
AdGuardHomeReplacedParentalSensor(adguard, entry),
AdGuardHomeReplacedSafeBrowsingSensor(adguard, entry),
AdGuardHomeReplacedSafeSearchSensor(adguard, entry),
AdGuardHomeAverageProcessingTimeSensor(adguard, entry),
AdGuardHomeRulesCountSensor(adguard, entry),
]
async_add_entities(sensors, True)
class AdGuardHomeSensor(AdGuardHomeDeviceEntity, SensorEntity):
"""Defines a AdGuard Home sensor."""
def __init__(
self,
adguard: AdGuardHome,
entry: ConfigEntry,
name: str,
icon: str,
measurement: str,
unit_of_measurement: str,
enabled_default: bool = True,
) -> None:
"""Initialize AdGuard Home sensor."""
self._state: int | str | None = None
self._unit_of_measurement = unit_of_measurement
self.measurement = measurement
super().__init__(adguard, entry, name, icon, enabled_default)
@property
def unique_id(self) -> str:
"""Return the unique ID for this sensor."""
return "_".join(
[
DOMAIN,
self.adguard.host,
str(self.adguard.port),
"sensor",
self.measurement,
]
)
@property
def native_value(self) -> int | str | None:
"""Return the state of the sensor."""
return self._state
@property
def native_unit_of_measurement(self) -> str | None:
"""Return the unit this state is expressed in."""
return self._unit_of_measurement
class AdGuardHomeDNSQueriesSensor(AdGuardHomeSensor):
"""Defines a AdGuard Home DNS Queries sensor."""
def __init__(self, adguard: AdGuardHome, entry: ConfigEntry) -> None:
"""Initialize AdGuard Home sensor."""
super().__init__(
adguard,
entry,
"AdGuard DNS Queries",
"mdi:magnify",
"dns_queries",
"queries",
)
async def _adguard_update(self) -> None:
"""Update AdGuard Home entity."""
self._state = await self.adguard.stats.dns_queries()
class AdGuardHomeBlockedFilteringSensor(AdGuardHomeSensor):
"""Defines a AdGuard Home blocked by filtering sensor."""
def __init__(self, adguard: AdGuardHome, entry: ConfigEntry) -> None:
"""Initialize AdGuard Home sensor."""
super().__init__(
adguard,
entry,
"AdGuard DNS Queries Blocked",
"mdi:magnify-close",
"blocked_filtering",
"queries",
enabled_default=False,
)
async def _adguard_update(self) -> None:
"""Update AdGuard Home entity."""
self._state = await self.adguard.stats.blocked_filtering()
class AdGuardHomePercentageBlockedSensor(AdGuardHomeSensor):
"""Defines a AdGuard Home blocked percentage sensor."""
def __init__(self, adguard: AdGuardHome, entry: ConfigEntry) -> None:
"""Initialize AdGuard Home sensor."""
super().__init__(
adguard,
entry,
"AdGuard DNS Queries Blocked Ratio",
"mdi:magnify-close",
"blocked_percentage",
PERCENTAGE,
)
async def _adguard_update(self) -> None:
"""Update AdGuard Home entity."""
percentage = await self.adguard.stats.blocked_percentage()
self._state = f"{percentage:.2f}"
class AdGuardHomeReplacedParentalSensor(AdGuardHomeSensor):
"""Defines a AdGuard Home replaced by parental control sensor."""
def __init__(self, adguard: AdGuardHome, entry: ConfigEntry) -> None:
"""Initialize AdGuard Home sensor."""
super().__init__(
adguard,
entry,
"AdGuard Parental Control Blocked",
"mdi:human-male-girl",
"blocked_parental",
"requests",
)
async def _adguard_update(self) -> None:
"""Update AdGuard Home entity."""
self._state = await self.adguard.stats.replaced_parental()
class AdGuardHomeReplacedSafeBrowsingSensor(AdGuardHomeSensor):
"""Defines a AdGuard Home replaced by safe browsing sensor."""
def __init__(self, adguard: AdGuardHome, entry: ConfigEntry) -> None:
"""Initialize AdGuard Home sensor."""
super().__init__(
adguard,
entry,
"AdGuard Safe Browsing Blocked",
"mdi:shield-half-full",
"blocked_safebrowsing",
"requests",
)
async def _adguard_update(self) -> None:
"""Update AdGuard Home entity."""
self._state = await self.adguard.stats.replaced_safebrowsing()
class AdGuardHomeReplacedSafeSearchSensor(AdGuardHomeSensor):
"""Defines a AdGuard Home replaced by safe search sensor."""
def __init__(self, adguard: AdGuardHome, entry: ConfigEntry) -> None:
"""Initialize AdGuard Home sensor."""
super().__init__(
adguard,
entry,
"AdGuard Safe Searches Enforced",
"mdi:shield-search",
"enforced_safesearch",
"requests",
)
async def _adguard_update(self) -> None:
"""Update AdGuard Home entity."""
self._state = await self.adguard.stats.replaced_safesearch()
class AdGuardHomeAverageProcessingTimeSensor(AdGuardHomeSensor):
"""Defines a AdGuard Home average processing time sensor."""
def __init__(self, adguard: AdGuardHome, entry: ConfigEntry) -> None:
"""Initialize AdGuard Home sensor."""
super().__init__(
adguard,
entry,
"AdGuard Average Processing Speed",
"mdi:speedometer",
"average_speed",
TIME_MILLISECONDS,
)
async def _adguard_update(self) -> None:
"""Update AdGuard Home entity."""
average = await self.adguard.stats.avg_processing_time()
self._state = f"{average:.2f}"
class AdGuardHomeRulesCountSensor(AdGuardHomeSensor):
"""Defines a AdGuard Home rules count sensor."""
def __init__(self, adguard: AdGuardHome, entry: ConfigEntry) -> None:
"""Initialize AdGuard Home sensor."""
super().__init__(
adguard,
entry,
"AdGuard Rules Count",
"mdi:counter",
"rules_count",
"rules",
enabled_default=False,
)
async def _adguard_update(self) -> None:
"""Update AdGuard Home entity."""
self._state = await self.adguard.filtering.rules_count(allowlist=False)
|
jawilson/home-assistant
|
homeassistant/components/adguard/sensor.py
|
Python
|
apache-2.0
| 7,958
|
from numpy.testing import assert_array_equal, assert_equal, assert_raises
import numpy as np
from skimage._shared.testing import test_parallel
from skimage.draw import (set_color, line, line_aa, polygon, polygon_perimeter,
circle, circle_perimeter, circle_perimeter_aa,
ellipse, ellipse_perimeter,
_bezier_segment, bezier_curve)
def test_set_color():
img = np.zeros((10, 10))
rr, cc = line(0, 0, 0, 30)
set_color(img, (rr, cc), 1)
img_ = np.zeros((10, 10))
img_[0, :] = 1
assert_array_equal(img, img_)
def test_set_color_with_alpha():
img = np.zeros((10, 10))
rr, cc, alpha = line_aa(0, 0, 0, 30)
set_color(img, (rr, cc), 1, alpha=alpha)
# Wrong dimensionality color
assert_raises(ValueError, set_color, img, (rr, cc), (255, 0, 0), alpha=alpha)
img = np.zeros((10, 10, 3))
rr, cc, alpha = line_aa(0, 0, 0, 30)
set_color(img, (rr, cc), (1, 0, 0), alpha=alpha)
@test_parallel()
def test_line_horizontal():
img = np.zeros((10, 10))
rr, cc = line(0, 0, 0, 9)
img[rr, cc] = 1
img_ = np.zeros((10, 10))
img_[0, :] = 1
assert_array_equal(img, img_)
def test_line_vertical():
img = np.zeros((10, 10))
rr, cc = line(0, 0, 9, 0)
img[rr, cc] = 1
img_ = np.zeros((10, 10))
img_[:, 0] = 1
assert_array_equal(img, img_)
def test_line_reverse():
img = np.zeros((10, 10))
rr, cc = line(0, 9, 0, 0)
img[rr, cc] = 1
img_ = np.zeros((10, 10))
img_[0, :] = 1
assert_array_equal(img, img_)
def test_line_diag():
img = np.zeros((5, 5))
rr, cc = line(0, 0, 4, 4)
img[rr, cc] = 1
img_ = np.eye(5)
assert_array_equal(img, img_)
def test_line_aa_horizontal():
img = np.zeros((10, 10))
rr, cc, val = line_aa(0, 0, 0, 9)
set_color(img, (rr, cc), 1, alpha=val)
img_ = np.zeros((10, 10))
img_[0, :] = 1
assert_array_equal(img, img_)
def test_line_aa_vertical():
img = np.zeros((10, 10))
rr, cc, val = line_aa(0, 0, 9, 0)
img[rr, cc] = val
img_ = np.zeros((10, 10))
img_[:, 0] = 1
assert_array_equal(img, img_)
def test_line_aa_diagonal():
img = np.zeros((10, 10))
rr, cc, val = line_aa(0, 0, 9, 6)
img[rr, cc] = 1
# Check that each pixel belonging to line,
# also belongs to line_aa
r, c = line(0, 0, 9, 6)
for x, y in zip(r, c):
assert_equal(img[r, c], 1)
def test_line_equal_aliasing_horizontally_vertically():
img0 = np.zeros((25, 25))
img1 = np.zeros((25, 25))
# Near-horizontal line
rr, cc, val = line_aa(10, 2, 12, 20)
img0[rr, cc] = val
# Near-vertical (transpose of prior)
rr, cc, val = line_aa(2, 10, 20, 12)
img1[rr, cc] = val
# Difference - should be zero
assert_array_equal(img0, img1.T)
def test_polygon_rectangle():
img = np.zeros((10, 10), 'uint8')
rr, cc = polygon((1, 4, 4, 1, 1), (1, 1, 4, 4, 1))
img[rr, cc] = 1
img_ = np.zeros((10, 10))
img_[1:4, 1:4] = 1
assert_array_equal(img, img_)
def test_polygon_rectangle_angular():
img = np.zeros((10, 10), 'uint8')
poly = np.array(((0, 3), (4, 7), (7, 4), (3, 0), (0, 3)))
rr, cc = polygon(poly[:, 0], poly[:, 1])
img[rr, cc] = 1
img_ = np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
)
assert_array_equal(img, img_)
def test_polygon_parallelogram():
img = np.zeros((10, 10), 'uint8')
poly = np.array(((1, 1), (5, 1), (7, 6), (3, 6), (1, 1)))
rr, cc = polygon(poly[:, 0], poly[:, 1])
img[rr, cc] = 1
img_ = np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
)
assert_array_equal(img, img_)
def test_polygon_exceed():
img = np.zeros((10, 10), 'uint8')
poly = np.array(((1, -1), (100, -1), (100, 100), (1, 100), (1, 1)))
rr, cc = polygon(poly[:, 0], poly[:, 1], img.shape)
img[rr, cc] = 1
img_ = np.zeros((10, 10))
img_[1:, :] = 1
assert_array_equal(img, img_)
def test_circle():
img = np.zeros((15, 15), 'uint8')
rr, cc = circle(7, 7, 6)
img[rr, cc] = 1
img_ = np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
)
assert_array_equal(img, img_)
def test_circle_perimeter_bresenham():
img = np.zeros((15, 15), 'uint8')
rr, cc = circle_perimeter(7, 7, 0, method='bresenham')
img[rr, cc] = 1
assert(np.sum(img) == 1)
assert(img[7][7] == 1)
img = np.zeros((17, 15), 'uint8')
rr, cc = circle_perimeter(7, 7, 7, method='bresenham')
img[rr, cc] = 1
img_ = np.array(
[[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
)
assert_array_equal(img, img_)
def test_circle_perimeter_bresenham_shape():
img = np.zeros((15, 20), 'uint8')
rr, cc = circle_perimeter(7, 10, 9, method='bresenham', shape=(15, 20))
img[rr, cc] = 1
shift = 5
img_ = np.zeros((15 + 2 * shift, 20), 'uint8')
rr, cc = circle_perimeter(7 + shift, 10, 9, method='bresenham', shape=None)
img_[rr, cc] = 1
assert_array_equal(img, img_[shift:-shift, :])
def test_circle_perimeter_andres():
img = np.zeros((15, 15), 'uint8')
rr, cc = circle_perimeter(7, 7, 0, method='andres')
img[rr, cc] = 1
assert(np.sum(img) == 1)
assert(img[7][7] == 1)
img = np.zeros((17, 15), 'uint8')
rr, cc = circle_perimeter(7, 7, 7, method='andres')
img[rr, cc] = 1
img_ = np.array(
[[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
)
assert_array_equal(img, img_)
def test_circle_perimeter_aa():
img = np.zeros((15, 15), 'uint8')
rr, cc, val = circle_perimeter_aa(7, 7, 0)
img[rr, cc] = 1
assert(np.sum(img) == 1)
assert(img[7][7] == 1)
img = np.zeros((17, 17), 'uint8')
rr, cc, val = circle_perimeter_aa(8, 8, 7)
img[rr, cc] = val * 255
img_ = np.array(
[[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 82, 180, 236, 255, 236, 180, 82, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 189, 172, 74, 18, 0, 18, 74, 172, 189, 0, 0, 0, 0],
[ 0, 0, 0, 229, 25, 0, 0, 0, 0, 0, 0, 0, 25, 229, 0, 0, 0],
[ 0, 0, 189, 25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 25, 189, 0, 0],
[ 0, 82, 172, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 172, 82, 0],
[ 0, 180, 74, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 74, 180, 0],
[ 0, 236, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 236, 0],
[ 0, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 0],
[ 0, 236, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 236, 0],
[ 0, 180, 74, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 74, 180, 0],
[ 0, 82, 172, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 172, 82, 0],
[ 0, 0, 189, 25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 25, 189, 0, 0],
[ 0, 0, 0, 229, 25, 0, 0, 0, 0, 0, 0, 0, 25, 229, 0, 0, 0],
[ 0, 0, 0, 0, 189, 172, 74, 18, 0, 18, 74, 172, 189, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 82, 180, 236, 255, 236, 180, 82, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
)
assert_array_equal(img, img_)
def test_circle_perimeter_aa_shape():
img = np.zeros((15, 20), 'uint8')
rr, cc, val = circle_perimeter_aa(7, 10, 9, shape=(15, 20))
img[rr, cc] = val * 255
shift = 5
img_ = np.zeros((15 + 2 * shift, 20), 'uint8')
rr, cc, val = circle_perimeter_aa(7 + shift, 10, 9, shape=None)
img_[rr, cc] = val * 255
assert_array_equal(img, img_[shift:-shift, :])
def test_ellipse_trivial():
img = np.zeros((2, 2), 'uint8')
rr, cc = ellipse(0.5, 0.5, 0.5, 0.5)
img[rr, cc] = 1
img_correct = np.array([
[0, 0],
[0, 0]
])
assert_array_equal(img, img_correct)
img = np.zeros((2, 2), 'uint8')
rr, cc = ellipse(0.5, 0.5, 1.1, 1.1)
img[rr, cc] = 1
img_correct = np.array([
[1, 1],
[1, 1],
])
assert_array_equal(img, img_correct)
img = np.zeros((3, 3), 'uint8')
rr, cc = ellipse(1, 1, 0.9, 0.9)
img[rr, cc] = 1
img_correct = np.array([
[0, 0, 0],
[0, 1, 0],
[0, 0, 0],
])
assert_array_equal(img, img_correct)
img = np.zeros((3, 3), 'uint8')
rr, cc = ellipse(1, 1, 1.1, 1.1)
img[rr, cc] = 1
img_correct = np.array([
[0, 1, 0],
[1, 1, 1],
[0, 1, 0],
])
assert_array_equal(img, img_correct)
img = np.zeros((3, 3), 'uint8')
rr, cc = ellipse(1, 1, 1.5, 1.5)
img[rr, cc] = 1
img_correct = np.array([
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
])
assert_array_equal(img, img_correct)
def test_ellipse_generic():
img = np.zeros((4, 4), 'uint8')
rr, cc = ellipse(1.5, 1.5, 1.1, 1.7)
img[rr, cc] = 1
img_ = np.array([
[0, 0, 0, 0],
[1, 1, 1, 1],
[1, 1, 1, 1],
[0, 0, 0, 0],
])
assert_array_equal(img, img_)
img = np.zeros((5, 5), 'uint8')
rr, cc = ellipse(2, 2, 1.7, 1.7)
img[rr, cc] = 1
img_ = np.array([
[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0],
])
assert_array_equal(img, img_)
img = np.zeros((10, 10), 'uint8')
rr, cc = ellipse(5, 5, 3, 4)
img[rr, cc] = 1
img_ = np.array([
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
])
assert_array_equal(img, img_)
img = np.zeros((10, 10), 'uint8')
rr, cc = ellipse(4.5, 5, 3.5, 4)
img[rr, cc] = 1
img_ = np.array([
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
])
assert_array_equal(img, img_)
img = np.zeros((15, 15), 'uint8')
rr, cc = ellipse(7, 7, 3, 7)
img[rr, cc] = 1
img_ = np.array([
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
])
assert_array_equal(img, img_)
def test_ellipse_with_shape():
img = np.zeros((15, 15), 'uint8')
rr, cc = ellipse(7, 7, 3, 10, shape=img.shape)
img[rr, cc] = 1
img_ = np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
)
assert_array_equal(img, img_)
def test_ellipse_negative():
rr, cc = ellipse(-3, -3, 1.7, 1.7)
rr_, cc_ = np.nonzero(np.array([
[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0],
]))
assert_array_equal(rr, rr_ - 5)
assert_array_equal(cc, cc_ - 5)
def test_ellipse_perimeter_dot_zeroangle():
# dot, angle == 0
img = np.zeros((30, 15), 'uint8')
rr, cc = ellipse_perimeter(15, 7, 0, 0, 0)
img[rr, cc] = 1
assert(np.sum(img) == 1)
assert(img[15][7] == 1)
def test_ellipse_perimeter_dot_nzeroangle():
# dot, angle != 0
img = np.zeros((30, 15), 'uint8')
rr, cc = ellipse_perimeter(15, 7, 0, 0, 1)
img[rr, cc] = 1
assert(np.sum(img) == 1)
assert(img[15][7] == 1)
def test_ellipse_perimeter_flat_zeroangle():
# flat ellipse
img = np.zeros((20, 18), 'uint8')
img_ = np.zeros((20, 18), 'uint8')
rr, cc = ellipse_perimeter(6, 7, 0, 5, 0)
img[rr, cc] = 1
rr, cc = line(6, 2, 6, 12)
img_[rr, cc] = 1
assert_array_equal(img, img_)
def test_ellipse_perimeter_zeroangle():
# angle == 0
img = np.zeros((30, 15), 'uint8')
rr, cc = ellipse_perimeter(15, 7, 14, 6, 0)
img[rr, cc] = 1
img_ = np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0]]
)
assert_array_equal(img, img_)
def test_ellipse_perimeter_nzeroangle():
# angle != 0
img = np.zeros((30, 25), 'uint8')
rr, cc = ellipse_perimeter(15, 11, 12, 6, 1.1)
img[rr, cc] = 1
img_ = np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
)
assert_array_equal(img, img_)
def test_ellipse_perimeter_shape():
img = np.zeros((15, 20), 'uint8')
rr, cc = ellipse_perimeter(7, 10, 9, 9, 0, shape=(15, 20))
img[rr, cc] = 1
shift = 5
img_ = np.zeros((15 + 2 * shift, 20), 'uint8')
rr, cc = ellipse_perimeter(7 + shift, 10, 9, 9, 0, shape=None)
img_[rr, cc] = 1
assert_array_equal(img, img_[shift:-shift, :])
def test_bezier_segment_straight():
image = np.zeros((200, 200), dtype=int)
x0 = 50
y0 = 50
x1 = 150
y1 = 50
x2 = 150
y2 = 150
rr, cc = _bezier_segment(x0, y0, x1, y1, x2, y2, 0)
image[rr, cc] = 1
image2 = np.zeros((200, 200), dtype=int)
rr, cc = line(x0, y0, x2, y2)
image2[rr, cc] = 1
assert_array_equal(image, image2)
def test_bezier_segment_curved():
img = np.zeros((25, 25), 'uint8')
x1, y1 = 20, 20
x2, y2 = 20, 2
x3, y3 = 2, 2
rr, cc = _bezier_segment(x1, y1, x2, y2, x3, y3, 1)
img[rr, cc] = 1
img_ = np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
)
assert_equal(img[x1, y1], 1)
assert_equal(img[x3, y3], 1)
assert_array_equal(img, img_)
def test_bezier_curve_straight():
image = np.zeros((200, 200), dtype=int)
x0 = 50
y0 = 50
x1 = 150
y1 = 50
x2 = 150
y2 = 150
rr, cc = bezier_curve(x0, y0, x1, y1, x2, y2, 0)
image [rr, cc] = 1
image2 = np.zeros((200, 200), dtype=int)
rr, cc = line(x0, y0, x2, y2)
image2 [rr, cc] = 1
assert_array_equal(image, image2)
def test_bezier_curved_weight_eq_1():
img = np.zeros((23, 8), 'uint8')
x1, y1 = (1, 1)
x2, y2 = (11, 11)
x3, y3 = (21, 1)
rr, cc = bezier_curve(x1, y1, x2, y2, x3, y3, 1)
img[rr, cc] = 1
assert_equal(img[x1, y1], 1)
assert_equal(img[x3, y3], 1)
img_ = np.array(
[[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
)
assert_equal(img, img_)
def test_bezier_curved_weight_neq_1():
img = np.zeros((23, 10), 'uint8')
x1, y1 = (1, 1)
x2, y2 = (11, 11)
x3, y3 = (21, 1)
rr, cc = bezier_curve(x1, y1, x2, y2, x3, y3, 2)
img[rr, cc] = 1
assert_equal(img[x1, y1], 1)
assert_equal(img[x3, y3], 1)
img_ = np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
)
assert_equal(img, img_)
def test_bezier_curve_shape():
img = np.zeros((15, 20), 'uint8')
x1, y1 = (1, 5)
x2, y2 = (6, 11)
x3, y3 = (1, 14)
rr, cc = bezier_curve(x1, y1, x2, y2, x3, y3, 2, shape=(15, 20))
img[rr, cc] = 1
shift = 5
img_ = np.zeros((15 + 2 * shift, 20), 'uint8')
x1, y1 = (1 + shift, 5)
x2, y2 = (6 + shift, 11)
x3, y3 = (1 + shift, 14)
rr, cc = bezier_curve(x1, y1, x2, y2, x3, y3, 2, shape=None)
img_[rr, cc] = 1
assert_array_equal(img, img_[shift:-shift, :])
def test_polygon_perimeter():
expected = np.array(
[[1, 1, 1, 1],
[1, 0, 0, 1],
[1, 1, 1, 1]]
)
out = np.zeros_like(expected)
rr, cc = polygon_perimeter([0, 2, 2, 0],
[0, 0, 3, 3])
out[rr, cc] = 1
assert_array_equal(out, expected)
out = np.zeros_like(expected)
rr, cc = polygon_perimeter([-1, -1, 3, 3],
[-1, 4, 4, -1],
shape=out.shape, clip=True)
out[rr, cc] = 1
assert_array_equal(out, expected)
assert_raises(ValueError, polygon_perimeter, [0], [1], clip=True)
def test_polygon_perimeter_outside_image():
rr, cc = polygon_perimeter([-1, -1, 3, 3],
[-1, 4, 4, -1], shape=(3, 4))
assert_equal(len(rr), 0)
assert_equal(len(cc), 0)
if __name__ == "__main__":
from numpy.testing import run_module_suite
run_module_suite()
|
pratapvardhan/scikit-image
|
skimage/draw/tests/test_draw.py
|
Python
|
bsd-3-clause
| 29,321
|
#!/usr/bin/env python3
# Copyright (c) 2015-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Utilities for manipulating blocks and transactions."""
from .address import (
key_to_p2sh_p2wpkh,
key_to_p2wpkh,
script_to_p2sh_p2wsh,
script_to_p2wsh,
)
from .messages import (
CBlock,
COIN,
COutPoint,
CTransaction,
CTxIn,
CTxInWitness,
CTxOut,
FromHex,
ToHex,
hash256,
hex_str_to_bytes,
ser_uint256,
sha256,
uint256_from_str,
)
from .script import (
CScript,
CScriptNum,
CScriptOp,
OP_0,
OP_1,
OP_CHECKMULTISIG,
OP_CHECKSIG,
OP_RETURN,
OP_TRUE,
hash160,
)
from .util import assert_equal
from io import BytesIO
MAX_BLOCK_SIGOPS = 20000
# Genesis block time (regtest)
TIME_GENESIS_BLOCK = 1296688602
# From BIP141
WITNESS_COMMITMENT_HEADER = b"\xaa\x21\xa9\xed"
def create_block(hashprev, coinbase, ntime=None, *, version=1):
"""Create a block (with regtest difficulty)."""
block = CBlock()
block.nVersion = version
if ntime is None:
import time
block.nTime = int(time.time() + 600)
else:
block.nTime = ntime
block.hashPrevBlock = hashprev
block.nBits = 0x207fffff # difficulty retargeting is disabled in REGTEST chainparams
block.vtx.append(coinbase)
block.hashMerkleRoot = block.calc_merkle_root()
block.calc_sha256()
return block
def get_witness_script(witness_root, witness_nonce):
witness_commitment = uint256_from_str(hash256(ser_uint256(witness_root) + ser_uint256(witness_nonce)))
output_data = WITNESS_COMMITMENT_HEADER + ser_uint256(witness_commitment)
return CScript([OP_RETURN, output_data])
def add_witness_commitment(block, nonce=0):
"""Add a witness commitment to the block's coinbase transaction.
According to BIP141, blocks with witness rules active must commit to the
hash of all in-block transactions including witness."""
# First calculate the merkle root of the block's
# transactions, with witnesses.
witness_nonce = nonce
witness_root = block.calc_witness_merkle_root()
# witness_nonce should go to coinbase witness.
block.vtx[0].wit.vtxinwit = [CTxInWitness()]
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(witness_nonce)]
# witness commitment is the last OP_RETURN output in coinbase
block.vtx[0].vout.append(CTxOut(0, get_witness_script(witness_root, witness_nonce)))
block.vtx[0].rehash()
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
def script_BIP34_coinbase_height(height):
if height <= 16:
res = CScriptOp.encode_op_n(height)
# Append dummy to increase scriptSig size above 2 (see bad-cb-length consensus rule)
return CScript([res, OP_1])
return CScript([CScriptNum(height)])
def create_coinbase(height, pubkey=None):
"""Create a coinbase transaction, assuming no miner fees.
If pubkey is passed in, the coinbase output will be a P2PK output;
otherwise an anyone-can-spend output."""
coinbase = CTransaction()
coinbase.vin.append(CTxIn(COutPoint(0, 0xffffffff), script_BIP34_coinbase_height(height), 0xffffffff))
coinbaseoutput = CTxOut()
coinbaseoutput.nValue = 50 * COIN
halvings = int(height / 150) # regtest
coinbaseoutput.nValue >>= halvings
if (pubkey is not None):
coinbaseoutput.scriptPubKey = CScript([pubkey, OP_CHECKSIG])
else:
coinbaseoutput.scriptPubKey = CScript([OP_TRUE])
coinbase.vout = [coinbaseoutput]
coinbase.calc_sha256()
return coinbase
def create_tx_with_script(prevtx, n, script_sig=b"", *, amount, script_pub_key=CScript()):
"""Return one-input, one-output transaction object
spending the prevtx's n-th output with the given amount.
Can optionally pass scriptPubKey and scriptSig, default is anyone-can-spend output.
"""
tx = CTransaction()
assert n < len(prevtx.vout)
tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), script_sig, 0xffffffff))
tx.vout.append(CTxOut(amount, script_pub_key))
tx.calc_sha256()
return tx
def create_transaction(node, txid, to_address, *, amount):
""" Return signed transaction spending the first output of the
input txid. Note that the node must be able to sign for the
output that is being spent, and the node must not be running
multiple wallets.
"""
raw_tx = create_raw_transaction(node, txid, to_address, amount=amount)
tx = CTransaction()
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx)))
return tx
def create_raw_transaction(node, txid, to_address, *, amount):
""" Return raw signed transaction spending the first output of the
input txid. Note that the node must be able to sign for the
output that is being spent, and the node must not be running
multiple wallets.
"""
rawtx = node.createrawtransaction(inputs=[{"txid": txid, "vout": 0}], outputs={to_address: amount})
signresult = node.signrawtransactionwithwallet(rawtx)
assert_equal(signresult["complete"], True)
return signresult['hex']
def get_legacy_sigopcount_block(block, accurate=True):
count = 0
for tx in block.vtx:
count += get_legacy_sigopcount_tx(tx, accurate)
return count
def get_legacy_sigopcount_tx(tx, accurate=True):
count = 0
for i in tx.vout:
count += i.scriptPubKey.GetSigOpCount(accurate)
for j in tx.vin:
# scriptSig might be of type bytes, so convert to CScript for the moment
count += CScript(j.scriptSig).GetSigOpCount(accurate)
return count
def witness_script(use_p2wsh, pubkey):
"""Create a scriptPubKey for a pay-to-witness TxOut.
This is either a P2WPKH output for the given pubkey, or a P2WSH output of a
1-of-1 multisig for the given pubkey. Returns the hex encoding of the
scriptPubKey."""
if not use_p2wsh:
# P2WPKH instead
pubkeyhash = hash160(hex_str_to_bytes(pubkey))
pkscript = CScript([OP_0, pubkeyhash])
else:
# 1-of-1 multisig
witness_program = CScript([OP_1, hex_str_to_bytes(pubkey), OP_1, OP_CHECKMULTISIG])
scripthash = sha256(witness_program)
pkscript = CScript([OP_0, scripthash])
return pkscript.hex()
def create_witness_tx(node, use_p2wsh, utxo, pubkey, encode_p2sh, amount):
"""Return a transaction (in hex) that spends the given utxo to a segwit output.
Optionally wrap the segwit output using P2SH."""
if use_p2wsh:
program = CScript([OP_1, hex_str_to_bytes(pubkey), OP_1, OP_CHECKMULTISIG])
addr = script_to_p2sh_p2wsh(program) if encode_p2sh else script_to_p2wsh(program)
else:
addr = key_to_p2sh_p2wpkh(pubkey) if encode_p2sh else key_to_p2wpkh(pubkey)
if not encode_p2sh:
assert_equal(node.getaddressinfo(addr)['scriptPubKey'], witness_script(use_p2wsh, pubkey))
return node.createrawtransaction([utxo], {addr: amount})
def send_to_witness(use_p2wsh, node, utxo, pubkey, encode_p2sh, amount, sign=True, insert_redeem_script=""):
"""Create a transaction spending a given utxo to a segwit output.
The output corresponds to the given pubkey: use_p2wsh determines whether to
use P2WPKH or P2WSH; encode_p2sh determines whether to wrap in P2SH.
sign=True will have the given node sign the transaction.
insert_redeem_script will be added to the scriptSig, if given."""
tx_to_witness = create_witness_tx(node, use_p2wsh, utxo, pubkey, encode_p2sh, amount)
if (sign):
signed = node.signrawtransactionwithwallet(tx_to_witness)
assert "errors" not in signed or len(["errors"]) == 0
return node.sendrawtransaction(signed["hex"])
else:
if (insert_redeem_script):
tx = FromHex(CTransaction(), tx_to_witness)
tx.vin[0].scriptSig += CScript([hex_str_to_bytes(insert_redeem_script)])
tx_to_witness = ToHex(tx)
return node.sendrawtransaction(tx_to_witness)
|
ahmedbodi/vertcoin
|
test/functional/test_framework/blocktools.py
|
Python
|
mit
| 8,126
|
#
# Copyright 2019 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
""" constants file """
# Kernel Namespace
KERNEL = 'kernel'
# I/O Signature (Symbols and constants)
IO_SIGNATURE = 'io_signature::'
SIGNATURE_LIST = ['makev', 'make3', 'make2', 'make']
MAKE = 'make'
MAKE2 = 'make2'
MAKE3 = 'make3'
MAKEV = 'makev'
# message ports id
MESSAGE_INPUT = 'message_port_register_in'
MESSAGE_OUTPUT = 'message_port_register_out'
# Symbols and constants required for parsing
GR = 'gr-'
UTILS = 'utils'
OPEN_BRACKET = '('
CLOSE_BRACKET = ')'
STRIP_SYMBOLS = ' ,:)'
EXCLAMATION = '!'
# Blocktool special comments
BLOCKTOOL = '! BlockTool'
END_BLOCKTOOL = 'EndTool !'
INPUT_SIG = 'input_signature'
OUTPUT_SIG = 'output_signature'
INPUT_MIN = 'input_min_streams'
INPUT_MAX = 'input_max_streams'
OUTPUT_MIN = 'output_min_streams'
OUTPUT_MAX = 'output_max_streams'
INPUT_MAKE_SIZE = 'input_sizeof_stream_item'
INPUT_MAKEV_SIZE = 'input_sizeof_stream_items'
INPUT_MAKE_SIZE1 = 'input_sizeof_stream_item1'
INPUT_MAKE_SIZE2 = 'input_sizeof_stream_item2'
INPUT_MAKE_SIZE3 = 'input_sizeof_stream_item3'
OUTPUT_MAKE_SIZE = 'output_sizeof_stream_item'
OUTPUT_MAKEV_SIZE = 'output_sizeof_stream_items'
OUTPUT_MAKE_SIZE1 = 'output_sizeof_stream_item1'
OUTPUT_MAKE_SIZE2 = 'output_sizeof_stream_item2'
OUTPUT_MAKE_SIZE3 = 'output_sizeof_stream_item3'
INPUT_PORT = 'message_input'
OUTPUT_PORT = 'message_output'
|
jdemel/gnuradio
|
gr-utils/blocktool/core/Constants.py
|
Python
|
gpl-3.0
| 1,460
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.addons.sale_timesheet.tests.common import TestCommonSaleTimesheetNoChart
from odoo.exceptions import UserError
class TestProjectBilling(TestCommonSaleTimesheetNoChart):
""" This test suite provide checks for miscellaneous small things. """
@classmethod
def setUpClass(cls):
super(TestProjectBilling, cls).setUpClass()
# set up
cls.setUpServiceProducts()
cls.setUpEmployees()
cls.employee_tde = cls.env['hr.employee'].create({
'name': 'Employee TDE',
'timesheet_cost': 42,
})
cls.partner_2 = cls.env['res.partner'].create({
'name': 'Customer from the South',
'email': 'customer.usd@south.com',
'customer': True,
'property_account_payable_id': cls.account_payable.id,
'property_account_receivable_id': cls.account_receivable.id,
})
# Sale Order 1, no project/task created, used to timesheet at employee rate
SaleOrder = cls.env['sale.order'].with_context(tracking_disable=True)
SaleOrderLine = cls.env['sale.order.line'].with_context(tracking_disable=True)
cls.sale_order_1 = SaleOrder.create({
'partner_id': cls.partner_customer_usd.id,
'partner_invoice_id': cls.partner_customer_usd.id,
'partner_shipping_id': cls.partner_customer_usd.id,
})
cls.so1_line_order_no_task = SaleOrderLine.create({
'name': cls.product_order_timesheet1.name,
'product_id': cls.product_order_timesheet1.id,
'product_uom_qty': 10,
'product_uom': cls.product_order_timesheet1.uom_id.id,
'price_unit': cls.product_order_timesheet1.list_price,
'order_id': cls.sale_order_1.id,
})
cls.so1_line_deliver_no_task = SaleOrderLine.create({
'name': cls.product_delivery_timesheet1.name,
'product_id': cls.product_delivery_timesheet1.id,
'product_uom_qty': 10,
'product_uom': cls.product_delivery_timesheet1.uom_id.id,
'price_unit': cls.product_delivery_timesheet1.list_price,
'order_id': cls.sale_order_1.id,
})
# Sale Order 2, creates 2 project billed at task rate
cls.sale_order_2 = SaleOrder.create({
'partner_id': cls.partner_2.id,
'partner_invoice_id': cls.partner_2.id,
'partner_shipping_id': cls.partner_2.id,
})
cls.so2_line_deliver_project_task = SaleOrderLine.create({
'order_id': cls.sale_order_2.id,
'name': cls.product_delivery_timesheet3.name,
'product_id': cls.product_delivery_timesheet3.id,
'product_uom_qty': 5,
'product_uom': cls.product_delivery_timesheet3.uom_id.id,
'price_unit': cls.product_delivery_timesheet3.list_price
})
cls.so2_line_deliver_project_template = SaleOrderLine.create({
'order_id': cls.sale_order_2.id,
'name': cls.product_delivery_timesheet5.name,
'product_id': cls.product_delivery_timesheet5.id,
'product_uom_qty': 7,
'product_uom': cls.product_delivery_timesheet5.uom_id.id,
'price_unit': cls.product_delivery_timesheet5.list_price
})
cls.sale_order_2.action_confirm()
# Projects: at least one per billable type
Project = cls.env['project.project'].with_context(tracking_disable=True)
cls.project_subtask = Project.create({
'name': "Sub Task Project (non billable)",
'allow_timesheets': True,
'billable_type': 'no',
'partner_id': False,
})
cls.project_non_billable = Project.create({
'name': "Non Billable Project",
'allow_timesheets': True,
'billable_type': 'no',
'partner_id': False,
'subtask_project_id': cls.project_subtask.id,
})
cls.project_task_rate = cls.env['project.project'].search([('sale_line_id', '=', cls.so2_line_deliver_project_task.id)], limit=1)
cls.project_task_rate2 = cls.env['project.project'].search([('sale_line_id', '=', cls.so2_line_deliver_project_template.id)], limit=1)
cls.project_employee_rate = Project.create({
'name': "Project billed at Employee Rate",
'allow_timesheets': True,
'billable_type': 'employee_rate',
'sale_order_id': cls.sale_order_1.id,
'partner_id': cls.sale_order_1.partner_id.id,
'subtask_project_id': cls.project_subtask.id,
})
cls.project_employee_rate_manager = cls.env['project.sale.line.employee.map'].create({
'project_id': cls.project_employee_rate.id,
'sale_line_id': cls.so1_line_order_no_task.id,
'employee_id': cls.employee_manager.id,
})
cls.project_employee_rate_user = cls.env['project.sale.line.employee.map'].create({
'project_id': cls.project_employee_rate.id,
'sale_line_id': cls.so1_line_deliver_no_task.id,
'employee_id': cls.employee_user.id,
})
def test_make_billable_at_task_rate(self):
""" Starting from a non billable project, make it billable at task rate, using the wizard """
Timesheet = self.env['account.analytic.line']
Task = self.env['project.task']
# set a customer on the project
self.project_non_billable.write({
'partner_id': self.partner_2.id
})
# create a task and 2 timesheets
task = Task.with_context(default_project_id=self.project_non_billable.id).create({
'name': 'first task',
'partner_id': self.project_non_billable.partner_id.id,
'planned_hours': 10,
})
timesheet1 = Timesheet.create({
'name': 'Test Line',
'project_id': task.project_id.id,
'task_id': task.id,
'unit_amount': 3,
'employee_id': self.employee_manager.id,
})
timesheet2 = Timesheet.create({
'name': 'Test Line tde',
'project_id': task.project_id.id,
'task_id': task.id,
'unit_amount': 2,
'employee_id': self.employee_tde.id,
})
# create wizard
wizard = self.env['project.create.sale.order'].with_context(active_id=self.project_non_billable.id, active_model='project.project').create({
'product_id': self.product_delivery_timesheet3.id, # product creates new T in new P
'price_unit': self.product_delivery_timesheet3.list_price,
'billable_type': 'project_rate',
})
self.assertEqual(self.project_non_billable.billable_type, 'no', "The project should still be non billable")
self.assertEqual(wizard.partner_id, self.project_non_billable.partner_id, "The wizard should have the same partner as the project")
# create the SO from the project
action = wizard.action_create_sale_order()
sale_order = self.env['sale.order'].browse(action['res_id'])
self.assertEqual(self.project_non_billable.billable_type, 'task_rate', "The project should be 'task rate' billable")
self.assertEqual(sale_order.partner_id, self.project_non_billable.partner_id, "The customer of the SO should be the same as the project")
self.assertEqual(len(sale_order.order_line), 1, "The SO should have 1 line")
self.assertEqual(sale_order.order_line.product_id, wizard.product_id, "The product of the only SOL should be the selected on the wizard")
self.assertEqual(sale_order.order_line.project_id, self.project_non_billable, "SOL should be linked to the project")
self.assertTrue(sale_order.order_line.task_id, "The SOL creates a task as they were no task already present in the project (system limitation)")
self.assertEqual(sale_order.order_line.task_id.project_id, self.project_non_billable, "The created task should be in the project")
self.assertEqual(sale_order.order_line.qty_delivered, timesheet1.unit_amount + timesheet2.unit_amount, "The create SOL should have an delivered quantity equals to the sum of tasks'timesheets")
def test_make_billable_at_employee_rate(self):
""" Starting from a non billable project, make it billable at employee rate, using the wizard """
Timesheet = self.env['account.analytic.line']
Task = self.env['project.task']
# set a customer on the project
self.project_non_billable.write({
'partner_id': self.partner_2.id
})
# create a task and 2 timesheets
task = Task.with_context(default_project_id=self.project_non_billable.id).create({
'name': 'first task',
'partner_id': self.project_non_billable.partner_id.id,
'planned_hours': 10,
})
timesheet1 = Timesheet.create({
'name': 'Test Line',
'project_id': task.project_id.id,
'task_id': task.id,
'unit_amount': 3,
'employee_id': self.employee_manager.id,
})
timesheet2 = Timesheet.create({
'name': 'Test Line tde',
'project_id': task.project_id.id,
'task_id': task.id,
'unit_amount': 2,
'employee_id': self.employee_user.id,
})
# create wizard
wizard = self.env['project.create.sale.order'].with_context(active_id=self.project_non_billable.id, active_model='project.project').create({
'billable_type': 'employee_rate',
'partner_id': self.partner_2.id,
'line_ids': [
(0, 0, {'product_id': self.product_delivery_timesheet1.id, 'price_unit': 15, 'employee_id': self.employee_tde.id}), # product creates no T
(0, 0, {'product_id': self.product_delivery_timesheet1.id, 'price_unit': 15, 'employee_id': self.employee_manager.id}), # product creates no T (same product than previous one)
(0, 0, {'product_id': self.product_delivery_timesheet3.id, 'price_unit': self.product_delivery_timesheet3.list_price, 'employee_id': self.employee_user.id}), # product creates new T in new P
]
})
self.assertEqual(self.project_non_billable.billable_type, 'no', "The project should still be non billable")
self.assertEqual(wizard.partner_id, self.project_non_billable.partner_id, "The wizard should have the same partner as the project")
self.assertEqual(wizard.project_id, self.project_non_billable, "The wizard'project should be the non billable project")
# create the SO from the project
action = wizard.action_create_sale_order()
sale_order = self.env['sale.order'].browse(action['res_id'])
self.assertEqual(self.project_non_billable.billable_type, 'employee_rate', "The project should be 'employee rate' billable")
self.assertEqual(sale_order.partner_id, self.project_non_billable.partner_id, "The customer of the SO should be the same as the project")
self.assertEqual(len(sale_order.order_line), 2, "The SO should have 2 lines, as in wizard map there were 2 time the same product with the same price (for 2 different employees)")
self.assertEqual(len(self.project_non_billable.sale_line_employee_ids), 3, "The project have 3 lines in its map")
self.assertEqual(self.project_non_billable.sale_line_id, sale_order.order_line[0], "The wizard sets sale line fallbakc on project as the first of the list")
self.assertEqual(task.sale_line_id, sale_order.order_line[0], "The wizard sets sale line fallback on tasks")
self.assertEqual(task.partner_id, wizard.partner_id, "The wizard sets the customer on tasks to make SOL line field visible")
line1 = sale_order.order_line.filtered(lambda sol: sol.product_id == self.product_delivery_timesheet1)
line2 = sale_order.order_line.filtered(lambda sol: sol.product_id == self.product_delivery_timesheet3)
self.assertTrue(line1, "Sale line 1 with product 1 should exists")
self.assertTrue(line2, "Sale line 2 with product 3 should exists")
self.assertFalse(line1.project_id, "Sale line 1 should be linked to the 'non billable' project")
self.assertEqual(line2.project_id, self.project_non_billable, "Sale line 3 should be linked to the 'non billable' project")
self.assertEqual(line1.price_unit, 15, "The unit price of SOL 1 should be 15")
self.assertEqual(line1.product_uom_qty, 0, "The ordered qty of SOL 1 should be one")
self.assertEqual(line2.product_uom_qty, 0, "The ordered qty of SOL 1 should be one")
self.assertEqual(self.project_non_billable.sale_line_employee_ids.mapped('sale_line_id'), sale_order.order_line, "The SO lines of the map should be the same of the sales order")
self.assertEqual(timesheet1.so_line, line1, "Timesheet1 should be linked to sale line 1, as employee manager create the timesheet")
self.assertEqual(timesheet2.so_line, line2, "Timesheet2 should be linked to sale line 2, as employee tde create the timesheet")
self.assertEqual(timesheet1.unit_amount, line1.qty_delivered, "Sale line 1 should have a delivered qty equals to the sum of its linked timesheets")
self.assertEqual(timesheet2.unit_amount, line2.qty_delivered, "Sale line 2 should have a delivered qty equals to the sum of its linked timesheets")
def test_billing_employee_rate(self):
""" Check task and subtask creation, and timesheeting in a project billed at 'employee rate'. Then move the task into a 'task rate' project. """
Task = self.env['project.task'].with_context(tracking_disable=True)
Timesheet = self.env['account.analytic.line']
# create a task
task = Task.with_context(default_project_id=self.project_employee_rate.id).create({
'name': 'first task',
'partner_id': self.partner_customer_usd.id,
})
self.assertEqual(task.billable_type, 'employee_rate', "Task in project 'employee rate' should be billed at employee rate")
self.assertFalse(task.sale_line_id, "Task created in a project billed on 'employee rate' should not be linked to a SOL")
self.assertEqual(task.partner_id, task.project_id.partner_id, "Task created in a project billed on 'employee rate' should have the same customer as the one from the project")
# log timesheet on task
timesheet1 = Timesheet.create({
'name': 'Test Line',
'project_id': task.project_id.id,
'task_id': task.id,
'unit_amount': 50,
'employee_id': self.employee_manager.id,
})
self.assertEqual(self.project_employee_rate_manager.sale_line_id, timesheet1.so_line, "The timesheet should be linked to the SOL associated to the Employee manager in the map")
self.assertEqual(self.project_employee_rate_manager.project_id, timesheet1.project_id, "The timesheet should be linked to the project of the map entry")
# create a subtask
subtask = Task.with_context(default_project_id=self.project_employee_rate.subtask_project_id.id).create({
'name': 'first subtask task',
'parent_id': task.id,
})
self.assertEqual(subtask.billable_type, 'no', "Subtask in non billable project should be non billable too")
self.assertEqual(subtask.project_id.billable_type, 'no', "The subtask project is non billable even if the subtask is")
self.assertEqual(subtask.partner_id, subtask.parent_id.partner_id, "Subtask should have the same customer as the one from their mother")
# log timesheet on subtask
timesheet2 = Timesheet.create({
'name': 'Test Line on subtask',
'project_id': subtask.project_id.id,
'task_id': subtask.id,
'unit_amount': 50,
'employee_id': self.employee_user.id,
})
self.assertEqual(subtask.project_id, timesheet2.project_id, "The timesheet is in the subtask project")
self.assertNotEqual(self.project_employee_rate_user.project_id, timesheet2.project_id, "The timesheet should not be linked to the billing project for the map")
self.assertFalse(timesheet2.so_line, "The timesheet should not be linked to SOL as the task is in a non billable project")
# move task into task rate project
task.write({
'project_id': self.project_task_rate.id,
})
task._onchange_project()
self.assertEqual(task.billable_type, 'task_rate', "Task in project 'task rate' should be billed at task rate")
self.assertEqual(task.sale_line_id, self.project_task_rate.sale_line_id, "Task moved in a task rate billable project")
self.assertEqual(task.partner_id, task.project_id.partner_id, "Task created in a project billed on 'employee rate' should have the same customer as the one from the project")
# move subtask into task rate project
subtask.write({
'project_id': self.project_task_rate2.id,
})
self.assertEqual(task.billable_type, 'task_rate', "Subtask should keep the billable type from its parent, even when they are moved into another project")
self.assertEqual(task.sale_line_id, self.project_task_rate.sale_line_id, "Subtask should keep the same sale order line than their mother, even when they are moved into another project")
# create a second task in employee rate project
task2 = Task.with_context(default_project_id=self.project_employee_rate.id).create({
'name': 'first task',
'partner_id': self.partner_customer_usd.id,
'sale_line_id': False
})
# log timesheet on task in 'employee rate' project without any fallback (no map, no SOL on task, no SOL on project)
timesheet3 = Timesheet.create({
'name': 'Test Line',
'project_id': task2.project_id.id,
'task_id': task2.id,
'unit_amount': 3,
'employee_id': self.employee_tde.id,
})
self.assertFalse(timesheet3.so_line, "The timesheet should not be linked to SOL as there is no fallback at all (no map, no SOL on task, no SOL on project)")
# add a SOL on the project as fallback
self.project_employee_rate.write({'sale_line_id': self.so1_line_deliver_no_task.id})
# log timesheet on task in 'employee rate' project wit the project fallback only (no map, no SOL on task, but SOL on project)
timesheet4 = Timesheet.create({
'name': 'Test Line ',
'project_id': task2.project_id.id,
'task_id': task2.id,
'unit_amount': 4,
'employee_id': self.employee_tde.id,
})
self.assertEquals(timesheet4.so_line, self.so1_line_deliver_no_task, "The timesheet should be linked to SOL on the project, as no entry for TDE in project map and no SOL on task")
def test_billing_task_rate(self):
""" Check task and subtask creation, and timesheeting in a project billed at 'task rate'. Then move the task into a 'employee rate' project then, 'non billable'. """
Task = self.env['project.task'].with_context(tracking_disable=True)
Timesheet = self.env['account.analytic.line']
# set subtask project on task rate project
self.project_task_rate.write({'subtask_project_id': self.project_subtask.id})
# create a task
task = Task.with_context(default_project_id=self.project_task_rate.id).create({
'name': 'first task',
'partner_id': self.partner_customer_usd.id,
})
task._onchange_project()
self.assertEqual(task.billable_type, 'task_rate', "Task in project 'task rate' should be billed at task rate")
self.assertEqual(task.sale_line_id, self.project_task_rate.sale_line_id, "Task created in a project billed on 'task rate' should be linked to a SOL of the project")
self.assertEqual(task.partner_id, task.project_id.partner_id, "Task created in a project billed on 'employee rate' should have the same customer as the one from the project")
# log timesheet on task
timesheet1 = Timesheet.create({
'name': 'Test Line',
'project_id': task.project_id.id,
'task_id': task.id,
'unit_amount': 50,
'employee_id': self.employee_manager.id,
})
self.assertEqual(self.project_task_rate.sale_line_id, timesheet1.so_line, "The timesheet should be linked to the SOL associated to the Employee manager in the map")
# create a subtask
subtask = Task.with_context(default_project_id=self.project_task_rate.subtask_project_id.id).create({
'name': 'first subtask task',
'parent_id': task.id,
})
self.assertEqual(subtask.billable_type, 'task_rate', "Subtask in a non billable project with a so line set is task rate billable")
self.assertEqual(subtask.project_id.billable_type, 'no', "The subtask project is non billable even if the subtask is")
self.assertEqual(subtask.partner_id, subtask.parent_id.partner_id, "Subtask should have the same customer as the one from their mother")
# log timesheet on subtask
timesheet2 = Timesheet.create({
'name': 'Test Line on subtask',
'project_id': subtask.project_id.id,
'task_id': subtask.id,
'unit_amount': 50,
'employee_id': self.employee_user.id,
})
self.assertEqual(subtask.project_id, timesheet2.project_id, "The timesheet is in the subtask project")
self.assertEqual(timesheet2.so_line, subtask.sale_line_id, "The timesheet should be linked to SOL as the task even in a non billable project")
# move task and subtask into task rate project
task.write({
'project_id': self.project_employee_rate.id,
})
task._onchange_project()
subtask.write({
'project_id': self.project_employee_rate.id,
})
subtask._onchange_project()
self.assertEqual(task.billable_type, 'employee_rate', "Task moved in project 'employee rate' should be billed at employee rate")
self.assertFalse(task.sale_line_id, "Task moved in a employee rate billable project have empty so line")
self.assertEqual(task.partner_id, task.project_id.partner_id, "Task created in a project billed on 'employee rate' should have the same customer as the one from the project")
self.assertEqual(subtask.billable_type, 'employee_rate', "subtask moved in project 'employee rate' should be billed at employee rate")
self.assertFalse(subtask.sale_line_id, "Subask moved in a employee rate billable project have empty so line")
self.assertEqual(subtask.partner_id, task.project_id.partner_id, "Subask created in a project billed on 'employee rate' should have the same customer as the one from the project")
|
t3dev/odoo
|
addons/sale_timesheet/tests/test_project_billing.py
|
Python
|
gpl-3.0
| 23,191
|
#! /usr/bin/env python2
# Copyright (c) 2014 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2006 The Regents of The University of Michigan
# Copyright (c) 2007,2011 The Hewlett-Packard Development Company
# Copyright (c) 2016 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
# Steve Reinhardt
import sys
import os
from os.path import join as joinpath
current_dir = os.path.dirname(__file__)
sys.path.insert(0, current_dir)
from style.verifiers import all_verifiers
from style.style import MercurialUI, check_ignores
from style.region import *
from mercurial import bdiff, mdiff, commands
def modified_regions(old_data, new_data):
regions = Regions()
beg = None
for pbeg, pend, fbeg, fend in bdiff.blocks(old_data, new_data):
if beg is not None and beg != fbeg:
regions.append(beg, fbeg)
beg = fend
return regions
def modregions(wctx, fname):
fctx = wctx.filectx(fname)
pctx = fctx.parents()
file_data = fctx.data()
lines = mdiff.splitnewlines(file_data)
if len(pctx) in (1, 2):
mod_regions = modified_regions(pctx[0].data(), file_data)
if len(pctx) == 2:
m2 = modified_regions(pctx[1].data(), file_data)
# only the lines that are new in both
mod_regions &= m2
else:
mod_regions = Regions()
mod_regions.append(0, len(lines))
return mod_regions
def _modified_regions(repo, patterns, **kwargs):
opt_all = kwargs.get('all', False)
opt_no_ignore = kwargs.get('no_ignore', False)
# Import the match (repository file name matching helper)
# function. Different versions of Mercurial keep it in different
# modules and implement them differently.
try:
from mercurial import scmutil
m = scmutil.match(repo[None], patterns, kwargs)
except ImportError:
from mercurial import cmdutil
m = cmdutil.match(repo, patterns, kwargs)
modified, added, removed, deleted, unknown, ignore, clean = \
repo.status(match=m, clean=opt_all)
if not opt_all:
try:
wctx = repo.workingctx()
except:
from mercurial import context
wctx = context.workingctx(repo)
files = [ (fn, all_regions) for fn in added ] + \
[ (fn, modregions(wctx, fn)) for fn in modified ]
else:
files = [ (fn, all_regions) for fn in added + modified + clean ]
for fname, mod_regions in files:
if opt_no_ignore or not check_ignores(fname):
yield fname, mod_regions
def do_check_style(hgui, repo, *pats, **opts):
"""check files for proper m5 style guidelines
Without an argument, checks all modified and added files for gem5
coding style violations. A list of files can be specified to limit
the checker to a subset of the repository. The style rules are
normally applied on a diff of the repository state (i.e., added
files are checked in their entirety while only modifications of
modified files are checked).
The --all option can be specified to include clean files and check
modified files in their entirety.
The --fix-<check>, --ignore-<check>, and --skip-<check> options
can be used to control individual style checks:
--fix-<check> will perform the check and automatically attempt to
fix sny style error (printing a warning if unsuccessful)
--ignore-<check> will perform the check but ignore any errors
found (other than printing a message for each)
--skip-<check> will skip performing the check entirely
If none of these options are given, all checks will be performed
and the user will be prompted on how to handle each error.
--fix-all, --ignore-all, and --skip-all are equivalent to specifying
--fix-<check>, --ignore-<check>, or --skip-<check> for all checks,
respectively. However, option settings for specific checks take
precedence. Thus --skip-all --fix-white can be used to skip every
check other than whitespace errors, which will be checked and
automatically fixed.
The -v/--verbose flag will display the offending line(s) as well
as their location.
"""
ui = MercurialUI(hgui, verbose=hgui.verbose)
# instantiate varifier objects
verifiers = [v(ui, opts, base=repo.root) for v in all_verifiers]
for fname, mod_regions in _modified_regions(repo, pats, **opts):
for verifier in verifiers:
if verifier.apply(joinpath(repo.root, fname), mod_regions):
return True
return False
def check_hook(hooktype):
if hooktype not in ('pretxncommit', 'pre-qrefresh'):
raise AttributeError, \
"This hook is not meant for %s" % hooktype
# This function provides a hook that is called before transaction
# commit and on qrefresh
def check_style(ui, repo, hooktype, **kwargs):
check_hook(hooktype)
args = {}
try:
return do_check_style(ui, repo, **args)
except Exception, e:
import traceback
traceback.print_exc()
return True
try:
from mercurial.i18n import _
except ImportError:
def _(arg):
return arg
_common_region_options = [
('a', 'all', False,
_("include clean files and unmodified parts of modified files")),
('', 'no-ignore', False, _("ignore the style ignore list")),
]
fix_opts = [('f', 'fix-all', False, _("fix all style errors"))] + \
[('', 'fix-' + v.opt_name, False,
_('fix errors in ' + v.test_name)) for v in all_verifiers]
ignore_opts = [('', 'ignore-all', False, _("ignore all style errors"))] + \
[('', 'ignore-' + v.opt_name, False,
_('ignore errors in ' + v.test_name)) for v in all_verifiers]
skip_opts = [('', 'skip-all', False, _("skip all style error checks"))] + \
[('', 'skip-' + v.opt_name, False,
_('skip checking for ' + v.test_name)) for v in all_verifiers]
all_opts = fix_opts + ignore_opts + skip_opts
cmdtable = {
'^m5style' : (
do_check_style, all_opts + _common_region_options + commands.walkopts,
_('hg m5style [-a] [FILE]...')),
}
if __name__ == '__main__':
print >> sys.stderr, "This file cannot be used from the command line. Use"
print >> sys.stderr, "style.py instead."
sys.exit(1)
|
HwisooSo/gemV-update
|
util/hgstyle.py
|
Python
|
bsd-3-clause
| 8,370
|
import warnings
import numpy as np
from numpy.testing import assert_array_almost_equal as assert_close
from numpy.testing import (assert_array_equal, assert_raises,
assert_almost_equal)
import skimage
from skimage import data
from skimage import exposure
from skimage.exposure.exposure import intensity_range
from skimage.color import rgb2gray
from skimage.util.dtype import dtype_range
from skimage._shared._warnings import expected_warnings
# Test integer histograms
# =======================
def test_negative_overflow():
im = np.array([-1, 127], dtype=np.int8)
frequencies, bin_centers = exposure.histogram(im)
assert_array_equal(bin_centers, np.arange(-1, 128))
assert frequencies[0] == 1
assert frequencies[-1] == 1
assert_array_equal(frequencies[1:-1], 0)
def test_all_negative_image():
im = np.array([-128, -1], dtype=np.int8)
frequencies, bin_centers = exposure.histogram(im)
assert_array_equal(bin_centers, np.arange(-128, 0))
assert frequencies[0] == 1
assert frequencies[-1] == 1
assert_array_equal(frequencies[1:-1], 0)
# Test histogram equalization
# ===========================
np.random.seed(0)
test_img_int = data.camera()
# squeeze image intensities to lower image contrast
test_img = skimage.img_as_float(test_img_int)
test_img = exposure.rescale_intensity(test_img / 5. + 100)
def test_equalize_uint8_approx():
"""Check integer bins used for uint8 images."""
img_eq0 = exposure.equalize_hist(test_img_int)
img_eq1 = exposure.equalize_hist(test_img_int, nbins=3)
np.testing.assert_allclose(img_eq0, img_eq1)
def test_equalize_ubyte():
with expected_warnings(['precision loss']):
img = skimage.img_as_ubyte(test_img)
img_eq = exposure.equalize_hist(img)
cdf, bin_edges = exposure.cumulative_distribution(img_eq)
check_cdf_slope(cdf)
def test_equalize_float():
img = skimage.img_as_float(test_img)
img_eq = exposure.equalize_hist(img)
cdf, bin_edges = exposure.cumulative_distribution(img_eq)
check_cdf_slope(cdf)
def test_equalize_masked():
img = skimage.img_as_float(test_img)
mask = np.zeros(test_img.shape)
mask[50:150, 50:250] = 1
img_mask_eq = exposure.equalize_hist(img, mask=mask)
img_eq = exposure.equalize_hist(img)
cdf, bin_edges = exposure.cumulative_distribution(img_mask_eq)
check_cdf_slope(cdf)
assert not (img_eq == img_mask_eq).all()
def check_cdf_slope(cdf):
"""Slope of cdf which should equal 1 for an equalized histogram."""
norm_intensity = np.linspace(0, 1, len(cdf))
slope, intercept = np.polyfit(norm_intensity, cdf, 1)
assert 0.9 < slope < 1.1
# Test intensity range
# ====================
def test_intensity_range_uint8():
image = np.array([0, 1], dtype=np.uint8)
input_and_expected = [('image', [0, 1]),
('dtype', [0, 255]),
((10, 20), [10, 20])]
for range_values, expected_values in input_and_expected:
out = intensity_range(image, range_values=range_values)
yield assert_array_equal, out, expected_values
def test_intensity_range_float():
image = np.array([0.1, 0.2], dtype=np.float64)
input_and_expected = [('image', [0.1, 0.2]),
('dtype', [-1, 1]),
((0.3, 0.4), [0.3, 0.4])]
for range_values, expected_values in input_and_expected:
out = intensity_range(image, range_values=range_values)
yield assert_array_equal, out, expected_values
def test_intensity_range_clipped_float():
image = np.array([0.1, 0.2], dtype=np.float64)
out = intensity_range(image, range_values='dtype', clip_negative=True)
assert_array_equal(out, (0, 1))
# Test rescale intensity
# ======================
uint10_max = 2**10 - 1
uint12_max = 2**12 - 1
uint14_max = 2**14 - 1
uint16_max = 2**16 - 1
def test_rescale_stretch():
image = np.array([51, 102, 153], dtype=np.uint8)
out = exposure.rescale_intensity(image)
assert out.dtype == np.uint8
assert_close(out, [0, 127, 255])
def test_rescale_shrink():
image = np.array([51., 102., 153.])
out = exposure.rescale_intensity(image)
assert_close(out, [0, 0.5, 1])
def test_rescale_in_range():
image = np.array([51., 102., 153.])
out = exposure.rescale_intensity(image, in_range=(0, 255))
assert_close(out, [0.2, 0.4, 0.6])
def test_rescale_in_range_clip():
image = np.array([51., 102., 153.])
out = exposure.rescale_intensity(image, in_range=(0, 102))
assert_close(out, [0.5, 1, 1])
def test_rescale_out_range():
image = np.array([-10, 0, 10], dtype=np.int8)
out = exposure.rescale_intensity(image, out_range=(0, 127))
assert out.dtype == np.int8
assert_close(out, [0, 63, 127])
def test_rescale_named_in_range():
image = np.array([0, uint10_max, uint10_max + 100], dtype=np.uint16)
out = exposure.rescale_intensity(image, in_range='uint10')
assert_close(out, [0, uint16_max, uint16_max])
def test_rescale_named_out_range():
image = np.array([0, uint16_max], dtype=np.uint16)
out = exposure.rescale_intensity(image, out_range='uint10')
assert_close(out, [0, uint10_max])
def test_rescale_uint12_limits():
image = np.array([0, uint16_max], dtype=np.uint16)
out = exposure.rescale_intensity(image, out_range='uint12')
assert_close(out, [0, uint12_max])
def test_rescale_uint14_limits():
image = np.array([0, uint16_max], dtype=np.uint16)
out = exposure.rescale_intensity(image, out_range='uint14')
assert_close(out, [0, uint14_max])
# Test adaptive histogram equalization
# ====================================
def test_adapthist_scalar():
"""Test a scalar uint8 image
"""
img = skimage.img_as_ubyte(data.moon())
adapted = exposure.equalize_adapthist(img, kernel_size=64, clip_limit=0.02)
assert adapted.min() == 0.0
assert adapted.max() == 1.0
assert img.shape == adapted.shape
full_scale = skimage.exposure.rescale_intensity(skimage.img_as_float(img))
assert_almost_equal = np.testing.assert_almost_equal
assert_almost_equal(peak_snr(full_scale, adapted), 102.066, 3)
assert_almost_equal(norm_brightness_err(full_scale, adapted),
0.038, 3)
return img, adapted
def test_adapthist_grayscale():
"""Test a grayscale float image
"""
img = skimage.img_as_float(data.astronaut())
img = rgb2gray(img)
img = np.dstack((img, img, img))
with expected_warnings(['precision loss|non-contiguous input',
'deprecated']):
adapted_old = exposure.equalize_adapthist(img, 10, 9, clip_limit=0.001,
nbins=128)
with expected_warnings(['precision loss|non-contiguous input']):
adapted = exposure.equalize_adapthist(img, kernel_size=(57, 51),
clip_limit=0.01, nbins=128)
assert img.shape == adapted.shape
assert_almost_equal(peak_snr(img, adapted), 102.078, 3)
assert_almost_equal(norm_brightness_err(img, adapted), 0.0529, 3)
return data, adapted
def test_adapthist_color():
"""Test an RGB color uint16 image
"""
img = skimage.img_as_uint(data.astronaut())
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
hist, bin_centers = exposure.histogram(img)
assert len(w) > 0
with expected_warnings(['precision loss']):
adapted = exposure.equalize_adapthist(img, clip_limit=0.01)
assert_almost_equal = np.testing.assert_almost_equal
assert adapted.min() == 0
assert adapted.max() == 1.0
assert img.shape == adapted.shape
full_scale = skimage.exposure.rescale_intensity(img)
assert_almost_equal(peak_snr(full_scale, adapted), 109.393, 1)
assert_almost_equal(norm_brightness_err(full_scale, adapted), 0.02, 2)
return data, adapted
def test_adapthist_alpha():
"""Test an RGBA color image
"""
img = skimage.img_as_float(data.astronaut())
alpha = np.ones((img.shape[0], img.shape[1]), dtype=float)
img = np.dstack((img, alpha))
with expected_warnings(['precision loss']):
adapted = exposure.equalize_adapthist(img)
assert adapted.shape != img.shape
img = img[:, :, :3]
full_scale = skimage.exposure.rescale_intensity(img)
assert img.shape == adapted.shape
assert_almost_equal = np.testing.assert_almost_equal
assert_almost_equal(peak_snr(full_scale, adapted), 109.393, 2)
assert_almost_equal(norm_brightness_err(full_scale, adapted), 0.0248, 3)
def peak_snr(img1, img2):
"""Peak signal to noise ratio of two images
Parameters
----------
img1 : array-like
img2 : array-like
Returns
-------
peak_snr : float
Peak signal to noise ratio
"""
if img1.ndim == 3:
img1, img2 = rgb2gray(img1.copy()), rgb2gray(img2.copy())
img1 = skimage.img_as_float(img1)
img2 = skimage.img_as_float(img2)
mse = 1. / img1.size * np.square(img1 - img2).sum()
_, max_ = dtype_range[img1.dtype.type]
return 20 * np.log(max_ / mse)
def norm_brightness_err(img1, img2):
"""Normalized Absolute Mean Brightness Error between two images
Parameters
----------
img1 : array-like
img2 : array-like
Returns
-------
norm_brightness_error : float
Normalized absolute mean brightness error
"""
if img1.ndim == 3:
img1, img2 = rgb2gray(img1), rgb2gray(img2)
ambe = np.abs(img1.mean() - img2.mean())
nbe = ambe / dtype_range[img1.dtype.type][1]
return nbe
# Test Gamma Correction
# =====================
def test_adjust_gamma_one():
"""Same image should be returned for gamma equal to one"""
image = np.random.uniform(0, 255, (8, 8))
result = exposure.adjust_gamma(image, 1)
assert_array_equal(result, image)
def test_adjust_gamma_zero():
"""White image should be returned for gamma equal to zero"""
image = np.random.uniform(0, 255, (8, 8))
result = exposure.adjust_gamma(image, 0)
dtype = image.dtype.type
assert_array_equal(result, dtype_range[dtype][1])
def test_adjust_gamma_less_one():
"""Verifying the output with expected results for gamma
correction with gamma equal to half"""
image = np.arange(0, 255, 4, np.uint8).reshape(8,8)
expected = np.array([[ 0, 31, 45, 55, 63, 71, 78, 84],
[ 90, 95, 100, 105, 110, 115, 119, 123],
[127, 131, 135, 139, 142, 146, 149, 153],
[156, 159, 162, 165, 168, 171, 174, 177],
[180, 183, 186, 188, 191, 194, 196, 199],
[201, 204, 206, 209, 211, 214, 216, 218],
[221, 223, 225, 228, 230, 232, 234, 236],
[238, 241, 243, 245, 247, 249, 251, 253]], dtype=np.uint8)
result = exposure.adjust_gamma(image, 0.5)
assert_array_equal(result, expected)
def test_adjust_gamma_greater_one():
"""Verifying the output with expected results for gamma
correction with gamma equal to two"""
image = np.arange(0, 255, 4, np.uint8).reshape(8,8)
expected = np.array([[ 0, 0, 0, 0, 1, 1, 2, 3],
[ 4, 5, 6, 7, 9, 10, 12, 14],
[ 16, 18, 20, 22, 25, 27, 30, 33],
[ 36, 39, 42, 45, 49, 52, 56, 60],
[ 64, 68, 72, 76, 81, 85, 90, 95],
[100, 105, 110, 116, 121, 127, 132, 138],
[144, 150, 156, 163, 169, 176, 182, 189],
[196, 203, 211, 218, 225, 233, 241, 249]], dtype=np.uint8)
result = exposure.adjust_gamma(image, 2)
assert_array_equal(result, expected)
def test_adjust_gamma_neggative():
image = np.arange(0, 255, 4, np.uint8).reshape(8,8)
assert_raises(ValueError, exposure.adjust_gamma, image, -1)
# Test Logarithmic Correction
# ===========================
def test_adjust_log():
"""Verifying the output with expected results for logarithmic
correction with multiplier constant multiplier equal to unity"""
image = np.arange(0, 255, 4, np.uint8).reshape(8,8)
expected = np.array([[ 0, 5, 11, 16, 22, 27, 33, 38],
[ 43, 48, 53, 58, 63, 68, 73, 77],
[ 82, 86, 91, 95, 100, 104, 109, 113],
[117, 121, 125, 129, 133, 137, 141, 145],
[149, 153, 157, 160, 164, 168, 172, 175],
[179, 182, 186, 189, 193, 196, 199, 203],
[206, 209, 213, 216, 219, 222, 225, 228],
[231, 234, 238, 241, 244, 246, 249, 252]], dtype=np.uint8)
result = exposure.adjust_log(image, 1)
assert_array_equal(result, expected)
def test_adjust_inv_log():
"""Verifying the output with expected results for inverse logarithmic
correction with multiplier constant multiplier equal to unity"""
image = np.arange(0, 255, 4, np.uint8).reshape(8,8)
expected = np.array([[ 0, 2, 5, 8, 11, 14, 17, 20],
[ 23, 26, 29, 32, 35, 38, 41, 45],
[ 48, 51, 55, 58, 61, 65, 68, 72],
[ 76, 79, 83, 87, 90, 94, 98, 102],
[106, 110, 114, 118, 122, 126, 130, 134],
[138, 143, 147, 151, 156, 160, 165, 170],
[174, 179, 184, 188, 193, 198, 203, 208],
[213, 218, 224, 229, 234, 239, 245, 250]], dtype=np.uint8)
result = exposure.adjust_log(image, 1, True)
assert_array_equal(result, expected)
# Test Sigmoid Correction
# =======================
def test_adjust_sigmoid_cutoff_one():
"""Verifying the output with expected results for sigmoid correction
with cutoff equal to one and gain of 5"""
image = np.arange(0, 255, 4, np.uint8).reshape(8,8)
expected = np.array([[ 1, 1, 1, 2, 2, 2, 2, 2],
[ 3, 3, 3, 4, 4, 4, 5, 5],
[ 5, 6, 6, 7, 7, 8, 9, 10],
[ 10, 11, 12, 13, 14, 15, 16, 18],
[ 19, 20, 22, 24, 25, 27, 29, 32],
[ 34, 36, 39, 41, 44, 47, 50, 54],
[ 57, 61, 64, 68, 72, 76, 80, 85],
[ 89, 94, 99, 104, 108, 113, 118, 123]], dtype=np.uint8)
result = exposure.adjust_sigmoid(image, 1, 5)
assert_array_equal(result, expected)
def test_adjust_sigmoid_cutoff_zero():
"""Verifying the output with expected results for sigmoid correction
with cutoff equal to zero and gain of 10"""
image = np.arange(0, 255, 4, np.uint8).reshape(8,8)
expected = np.array([[127, 137, 147, 156, 166, 175, 183, 191],
[198, 205, 211, 216, 221, 225, 229, 232],
[235, 238, 240, 242, 244, 245, 247, 248],
[249, 250, 250, 251, 251, 252, 252, 253],
[253, 253, 253, 253, 254, 254, 254, 254],
[254, 254, 254, 254, 254, 254, 254, 254],
[254, 254, 254, 254, 254, 254, 254, 254],
[254, 254, 254, 254, 254, 254, 254, 254]], dtype=np.uint8)
result = exposure.adjust_sigmoid(image, 0, 10)
assert_array_equal(result, expected)
def test_adjust_sigmoid_cutoff_half():
"""Verifying the output with expected results for sigmoid correction
with cutoff equal to half and gain of 10"""
image = np.arange(0, 255, 4, np.uint8).reshape(8,8)
expected = np.array([[ 1, 1, 2, 2, 3, 3, 4, 5],
[ 5, 6, 7, 9, 10, 12, 14, 16],
[ 19, 22, 25, 29, 34, 39, 44, 50],
[ 57, 64, 72, 80, 89, 99, 108, 118],
[128, 138, 148, 158, 167, 176, 184, 192],
[199, 205, 211, 217, 221, 226, 229, 233],
[236, 238, 240, 242, 244, 246, 247, 248],
[249, 250, 250, 251, 251, 252, 252, 253]], dtype=np.uint8)
result = exposure.adjust_sigmoid(image, 0.5, 10)
assert_array_equal(result, expected)
def test_adjust_inv_sigmoid_cutoff_half():
"""Verifying the output with expected results for inverse sigmoid
correction with cutoff equal to half and gain of 10"""
image = np.arange(0, 255, 4, np.uint8).reshape(8,8)
expected = np.array([[253, 253, 252, 252, 251, 251, 250, 249],
[249, 248, 247, 245, 244, 242, 240, 238],
[235, 232, 229, 225, 220, 215, 210, 204],
[197, 190, 182, 174, 165, 155, 146, 136],
[126, 116, 106, 96, 87, 78, 70, 62],
[ 55, 49, 43, 37, 33, 28, 25, 21],
[ 18, 16, 14, 12, 10, 8, 7, 6],
[ 5, 4, 4, 3, 3, 2, 2, 1]], dtype=np.uint8)
result = exposure.adjust_sigmoid(image, 0.5, 10, True)
assert_array_equal(result, expected)
def test_negative():
image = np.arange(-10, 245, 4).reshape(8, 8).astype(np.double)
assert_raises(ValueError, exposure.adjust_gamma, image)
def test_is_low_contrast():
image = np.linspace(0, 0.04, 100)
assert exposure.is_low_contrast(image)
image[-1] = 1
assert exposure.is_low_contrast(image)
assert not exposure.is_low_contrast(image, upper_percentile=100)
image = (image * 255).astype(np.uint8)
assert exposure.is_low_contrast(image)
assert not exposure.is_low_contrast(image, upper_percentile=100)
image = (image.astype(np.uint16)) * 2**8
assert exposure.is_low_contrast(image)
assert not exposure.is_low_contrast(image, upper_percentile=100)
if __name__ == '__main__':
from numpy import testing
testing.run_module_suite()
|
ofgulban/scikit-image
|
skimage/exposure/tests/test_exposure.py
|
Python
|
bsd-3-clause
| 17,192
|
#!/usr/bin/env python
import os
import sys
import pytest
from openmc import Material
from openmc.data import NATURAL_ABUNDANCE, atomic_mass
def test_element_wo():
# This test doesn't require an OpenMC run. We just need to make sure the
# element.expand() method expands elements with the proper nuclide
# compositions.
h_am = (NATURAL_ABUNDANCE['H1'] * atomic_mass('H1') +
NATURAL_ABUNDANCE['H2'] * atomic_mass('H2'))
o_am = (NATURAL_ABUNDANCE['O17'] * atomic_mass('O17') +
(NATURAL_ABUNDANCE['O16'] + NATURAL_ABUNDANCE['O18'])
* atomic_mass('O16'))
water_am = 2 * h_am + o_am
water = Material()
water.add_element('O', o_am / water_am, 'wo')
water.add_element('H', 2 * h_am / water_am, 'wo')
densities = water.get_nuclide_densities()
for nuc in densities.keys():
assert nuc in ('H1', 'H2', 'O16', 'O17')
if nuc in ('H1', 'H2'):
val = 2 * NATURAL_ABUNDANCE[nuc] * atomic_mass(nuc) / water_am
assert densities[nuc][1] == pytest.approx(val)
if nuc == 'O16':
val = (NATURAL_ABUNDANCE[nuc] + NATURAL_ABUNDANCE['O18']) \
* atomic_mass(nuc) / water_am
assert densities[nuc][1] == pytest.approx(val)
if nuc == 'O17':
val = NATURAL_ABUNDANCE[nuc] * atomic_mass(nuc) / water_am
assert densities[nuc][1] == pytest.approx(val)
|
mit-crpg/openmc
|
tests/unit_tests/test_element_wo.py
|
Python
|
mit
| 1,427
|
"""
Fake Software Secure page for use in acceptance tests.
"""
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.views.generic.base import View
from edxmako.shortcuts import render_to_response
from lms.djangoapps.verify_student.models import SoftwareSecurePhotoVerification
class SoftwareSecureFakeView(View):
"""
Fake SoftwareSecure view for testing different photo verification statuses
and email functionality.
"""
@method_decorator(login_required)
def get(self, request):
"""
Render a fake Software Secure page that will pick the most recent
attempt for a given user and pass it to the html page.
"""
context_dict = self.response_post_params(request.user)
return render_to_response("verify_student/test/fake_softwaresecure_response.html", context_dict)
@classmethod
def response_post_params(cls, user):
"""
Calculate the POST params we want to send back to the client.
"""
access_key = settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_ACCESS_KEY"]
context = {
'receipt_id': None,
'authorization_code': 'SIS {}:0000'.format(access_key),
'results_callback': reverse('verify_student_results_callback')
}
try:
most_recent = SoftwareSecurePhotoVerification.objects.filter(user=user).order_by("-updated_at")[0]
context["receipt_id"] = most_recent.receipt_id
except: # pylint: disable=bare-except
pass
return context
|
BehavioralInsightsTeam/edx-platform
|
lms/djangoapps/verify_student/tests/fake_software_secure.py
|
Python
|
agpl-3.0
| 1,684
|
"""Config flow to configure the Ambient PWS component."""
from aioambient import Client
from aioambient.errors import AmbientError
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_API_KEY
from homeassistant.helpers import aiohttp_client
from .const import CONF_APP_KEY, DOMAIN # pylint: disable=unused-import
class AmbientStationFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle an Ambient PWS config flow."""
VERSION = 2
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_PUSH
def __init__(self):
"""Initialize the config flow."""
self.data_schema = vol.Schema(
{vol.Required(CONF_API_KEY): str, vol.Required(CONF_APP_KEY): str}
)
async def _show_form(self, errors=None):
"""Show the form to the user."""
return self.async_show_form(
step_id="user",
data_schema=self.data_schema,
errors=errors if errors else {},
)
async def async_step_import(self, import_config):
"""Import a config entry from configuration.yaml."""
return await self.async_step_user(import_config)
async def async_step_user(self, user_input=None):
"""Handle the start of the config flow."""
if not user_input:
return await self._show_form()
await self.async_set_unique_id(user_input[CONF_APP_KEY])
self._abort_if_unique_id_configured()
session = aiohttp_client.async_get_clientsession(self.hass)
client = Client(
user_input[CONF_API_KEY], user_input[CONF_APP_KEY], session=session
)
try:
devices = await client.api.get_devices()
except AmbientError:
return await self._show_form({"base": "invalid_key"})
if not devices:
return await self._show_form({"base": "no_devices"})
# The Application Key (which identifies each config entry) is too long
# to show nicely in the UI, so we take the first 12 characters (similar
# to how GitHub does it):
return self.async_create_entry(
title=user_input[CONF_APP_KEY][:12], data=user_input
)
|
tchellomello/home-assistant
|
homeassistant/components/ambient_station/config_flow.py
|
Python
|
apache-2.0
| 2,215
|
# Copyright (C) 2011, CloudCaptive
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from constants import ACCOUNT_STATUS
from django.utils import simplejson
from entities import memcache_db
from entities.pending_create import Pending_Create
from google.appengine.api import mail
from google.appengine.ext import webapp
from google.appengine.ext.db import NotSavedError
from google.appengine.ext.webapp import template
from serverside.dao import accounts_dao, pending_create_dao
from serverside.entities.emails import Email
from tools import utils
from tools.xss import XssCleaner
import constants
import environment
import logging
import messages
import uuid
json = simplejson
class NewsLetterSignUp(webapp.RequestHandler):
def post(self):
clean = XssCleaner()
email = self.request.get('email')
email = clean.strip(email)
newemail = Email(email=email)
newemail.put()
ret = {"success":"true"}
ret = json.dumps(ret)
self.response.out.write(ret)
class SignUp(webapp.RequestHandler):
"""
get: is used to activate an account.
post: is used to handle sign up requests coming from web form.
"""
def get(self):
"""Account activation via email"""
values = {'error_message' : "Activation not successful.",
'error': True}
id = self.request.get("activate")
error_message = self.request.get("error_msg")
if id == None or id == "":
if error_message:
values['error_message'] = error_message
logging.error("Activation attempted without ID")
else:
"""Look up the account in pending creates table"""
try:
pending_entity = Pending_Create.get_by_key_name(id)
account = None
if pending_entity != None:
""" Look up corresponding account entity """
email = pending_entity.email
account = memcache_db.get_entity(email, "Accounts")
else:
logging.error("Pending entity could not be looked up.")
if account != None:
if account.isEnabled == ACCOUNT_STATUS.PENDING_CREATE:
update_fields = {"isEnabled" : ACCOUNT_STATUS.ENABLED }
memcache_db.update_fields(email, "Accounts", update_fields)
try:
""" remove item from pending creates """
pending_entity.delete()
except NotSavedError:
logging.error("Entity with id: " + id + " was not in data store...")
values = {'activation' : True}
else:
logging.error("Account status is not pending create")
except:
logging.error("Activation tried and failed with ID: " + id)
""" render with values set above """
self.response.out.write(template.render(constants.TEMPLATE_PATHS.CONSOLE_LOGIN, values))
def post(self):
email = self.request.get("email")
password = self.request.get("password")
repeat_password = self.request.get('repeat_password')
show_links = self.request.get("show_links")
if not utils.validEmail(email):
values = {"success" : False,
"message" : "ERROR: You need to provide a valid email address."}
if show_links == "yes":
values['givelinks'] = True
self.response.out.write(template.render(constants.TEMPLATE_PATHS.CONSOLE_SIGN_UP, values))
logging.error("Bad email %s"%email)
return
if password != repeat_password:
values = {"success" : False,
"message" : "ERROR: Passwords did not match."}
if show_links == "yes":
values['givelinks'] = True
logging.error("Bad passwords for email %s"%email)
self.response.out.write(template.render(constants.TEMPLATE_PATHS.CONSOLE_SIGN_UP, values))
return
ent_type = 'Accounts'
existing_account = memcache_db.get_entity(email, ent_type)
if existing_account != None:
logging.error('An account already exists with that email: ' + existing_account.email)
""" if the account is a test account, activate the account """
if email in constants.TEST_ACCOUNTS and environment.is_dev():
logging.debug("Account is a valid test account")
memcache_db.delete_entity(existing_account, email)
accounts_dao.create_account(email, password, True)
message = "Your test account has been activated!"
values = {"success" : True,
"message" : message}
if show_links == "yes":
values['givelinks'] = True
elif existing_account.isEnabled == ACCOUNT_STATUS.PENDING_CREATE:
""" REPEAT SIGN UP WITH UNACTIVATED ACCOUNT!!!!!!!!! """
""" send the email again with the same activation ID """
pc = pending_create_dao.get_id_by_email(email)
activate_url = get_activate_url(pc.id)
email_sent = send_email(email, activate_url)
logging.info("Repeat sign up for account that was not activated yet. An email will be sent to with same activation link. Email: " + email + ", activation link: " + activate_url)
message = ""
if email_sent:
message = "An email has been sent to you with a link to activate your account!"
else:
message = "There was an error during account creation. Please send an email to support@cloudcaptive.com"
values = {"success" : True,
"message" : message}
if show_links == "yes":
values['givelinks'] = True
else:
message = "ERROR: An account using this email address already exists. Contact support@cloudcaptive for support."
values = {"success" : False,
"message" : message}
if show_links == "yes":
values['givelinks'] = True
else:
"""create an account and send an email for validation"""
accounts_dao.create_account(email, password)
"""Add email to pending creates table"""
id = str(uuid.uuid4())
pending_create = Pending_Create(key_name=id, id=id, email=email)
pending_create.put()
"""send an email to user to complete set up, get arguments in the string will be email and cookie ID"""
activate_url = get_activate_url(id)
logging.info("Activation URL for account: " + email + " is " + activate_url)
email_sent = send_email(email, activate_url)
message = ""
if email_sent:
message = "Sign up was a success. An activation link has been sent to your email address."
else:
message = "There was an error during account creation. Please send an email to support@cloudcaptive.com"
values = {"success" : True,
"message" : message}
if show_links == "yes":
values['givelinks'] = True
""" Render result with whatever values were filled in above """
self.response.out.write(template.render(constants.TEMPLATE_PATHS.CONSOLE_SIGN_UP, values))
def get_activate_url(id):
return constants.WEB_SIGNUP_URLS.ACTIVATE_URL + "?activate=" + id
def send_email(email, activate_url):
email_sent = False
try:
mail.send_mail(sender="UserInfuser <" + constants.APP_OWNER_EMAIL + ">",
to=email,
subject="Welcome to UserInfuser!",
body= messages.get_activation_email(activate_url))
email_sent = True
except:
email_sent = False
logging.error("Error sending account activation email to account: " + email + ", activation url was: " + activate_url)
return email_sent
|
rafasashi/userinfuser
|
serverside/signup.py
|
Python
|
gpl-3.0
| 8,144
|
import unittest
from test import test_support
import os
import socket
import StringIO
import urllib2
from urllib2 import Request, OpenerDirector
try:
import ssl
except ImportError:
ssl = None
# XXX
# Request
# CacheFTPHandler (hard to write)
# parse_keqv_list, parse_http_list, HTTPDigestAuthHandler
class TrivialTests(unittest.TestCase):
def test_trivial(self):
# A couple trivial tests
self.assertRaises(ValueError, urllib2.urlopen, 'bogus url')
# XXX Name hacking to get this to work on Windows.
fname = os.path.abspath(urllib2.__file__).replace('\\', '/')
# And more hacking to get it to work on MacOS. This assumes
# urllib.pathname2url works, unfortunately...
if os.name == 'riscos':
import string
fname = os.expand(fname)
fname = fname.translate(string.maketrans("/.", "./"))
if os.name == 'nt':
file_url = "file:///%s" % fname
else:
file_url = "file://%s" % fname
f = urllib2.urlopen(file_url)
buf = f.read()
f.close()
def test_parse_http_list(self):
tests = [('a,b,c', ['a', 'b', 'c']),
('path"o,l"og"i"cal, example', ['path"o,l"og"i"cal', 'example']),
('a, b, "c", "d", "e,f", g, h', ['a', 'b', '"c"', '"d"', '"e,f"', 'g', 'h']),
('a="b\\"c", d="e\\,f", g="h\\\\i"', ['a="b"c"', 'd="e,f"', 'g="h\\i"'])]
for string, list in tests:
self.assertEqual(urllib2.parse_http_list(string), list)
@unittest.skipUnless(ssl, "ssl module required")
def test_cafile_and_context(self):
context = ssl.create_default_context()
with self.assertRaises(ValueError):
urllib2.urlopen(
"https://localhost", cafile="/nonexistent/path", context=context
)
def test_request_headers_dict():
"""
The Request.headers dictionary is not a documented interface. It should
stay that way, because the complete set of headers are only accessible
through the .get_header(), .has_header(), .header_items() interface.
However, .headers pre-dates those methods, and so real code will be using
the dictionary.
The introduction in 2.4 of those methods was a mistake for the same reason:
code that previously saw all (urllib2 user)-provided headers in .headers
now sees only a subset (and the function interface is ugly and incomplete).
A better change would have been to replace .headers dict with a dict
subclass (or UserDict.DictMixin instance?) that preserved the .headers
interface and also provided access to the "unredirected" headers. It's
probably too late to fix that, though.
Check .capitalize() case normalization:
>>> url = "http://example.com"
>>> Request(url, headers={"Spam-eggs": "blah"}).headers["Spam-eggs"]
'blah'
>>> Request(url, headers={"spam-EggS": "blah"}).headers["Spam-eggs"]
'blah'
Currently, Request(url, "Spam-eggs").headers["Spam-Eggs"] raises KeyError,
but that could be changed in future.
"""
def test_request_headers_methods():
"""
Note the case normalization of header names here, to .capitalize()-case.
This should be preserved for backwards-compatibility. (In the HTTP case,
normalization to .title()-case is done by urllib2 before sending headers to
httplib).
>>> url = "http://example.com"
>>> r = Request(url, headers={"Spam-eggs": "blah"})
>>> r.has_header("Spam-eggs")
True
>>> r.header_items()
[('Spam-eggs', 'blah')]
>>> r.add_header("Foo-Bar", "baz")
>>> items = r.header_items()
>>> items.sort()
>>> items
[('Foo-bar', 'baz'), ('Spam-eggs', 'blah')]
Note that e.g. r.has_header("spam-EggS") is currently False, and
r.get_header("spam-EggS") returns None, but that could be changed in
future.
>>> r.has_header("Not-there")
False
>>> print r.get_header("Not-there")
None
>>> r.get_header("Not-there", "default")
'default'
"""
def test_password_manager(self):
"""
>>> mgr = urllib2.HTTPPasswordMgr()
>>> add = mgr.add_password
>>> add("Some Realm", "http://example.com/", "joe", "password")
>>> add("Some Realm", "http://example.com/ni", "ni", "ni")
>>> add("c", "http://example.com/foo", "foo", "ni")
>>> add("c", "http://example.com/bar", "bar", "nini")
>>> add("b", "http://example.com/", "first", "blah")
>>> add("b", "http://example.com/", "second", "spam")
>>> add("a", "http://example.com", "1", "a")
>>> add("Some Realm", "http://c.example.com:3128", "3", "c")
>>> add("Some Realm", "d.example.com", "4", "d")
>>> add("Some Realm", "e.example.com:3128", "5", "e")
>>> mgr.find_user_password("Some Realm", "example.com")
('joe', 'password')
>>> mgr.find_user_password("Some Realm", "http://example.com")
('joe', 'password')
>>> mgr.find_user_password("Some Realm", "http://example.com/")
('joe', 'password')
>>> mgr.find_user_password("Some Realm", "http://example.com/spam")
('joe', 'password')
>>> mgr.find_user_password("Some Realm", "http://example.com/spam/spam")
('joe', 'password')
>>> mgr.find_user_password("c", "http://example.com/foo")
('foo', 'ni')
>>> mgr.find_user_password("c", "http://example.com/bar")
('bar', 'nini')
Actually, this is really undefined ATM
## Currently, we use the highest-level path where more than one match:
## >>> mgr.find_user_password("Some Realm", "http://example.com/ni")
## ('joe', 'password')
Use latest add_password() in case of conflict:
>>> mgr.find_user_password("b", "http://example.com/")
('second', 'spam')
No special relationship between a.example.com and example.com:
>>> mgr.find_user_password("a", "http://example.com/")
('1', 'a')
>>> mgr.find_user_password("a", "http://a.example.com/")
(None, None)
Ports:
>>> mgr.find_user_password("Some Realm", "c.example.com")
(None, None)
>>> mgr.find_user_password("Some Realm", "c.example.com:3128")
('3', 'c')
>>> mgr.find_user_password("Some Realm", "http://c.example.com:3128")
('3', 'c')
>>> mgr.find_user_password("Some Realm", "d.example.com")
('4', 'd')
>>> mgr.find_user_password("Some Realm", "e.example.com:3128")
('5', 'e')
"""
pass
def test_password_manager_default_port(self):
"""
>>> mgr = urllib2.HTTPPasswordMgr()
>>> add = mgr.add_password
The point to note here is that we can't guess the default port if there's
no scheme. This applies to both add_password and find_user_password.
>>> add("f", "http://g.example.com:80", "10", "j")
>>> add("g", "http://h.example.com", "11", "k")
>>> add("h", "i.example.com:80", "12", "l")
>>> add("i", "j.example.com", "13", "m")
>>> mgr.find_user_password("f", "g.example.com:100")
(None, None)
>>> mgr.find_user_password("f", "g.example.com:80")
('10', 'j')
>>> mgr.find_user_password("f", "g.example.com")
(None, None)
>>> mgr.find_user_password("f", "http://g.example.com:100")
(None, None)
>>> mgr.find_user_password("f", "http://g.example.com:80")
('10', 'j')
>>> mgr.find_user_password("f", "http://g.example.com")
('10', 'j')
>>> mgr.find_user_password("g", "h.example.com")
('11', 'k')
>>> mgr.find_user_password("g", "h.example.com:80")
('11', 'k')
>>> mgr.find_user_password("g", "http://h.example.com:80")
('11', 'k')
>>> mgr.find_user_password("h", "i.example.com")
(None, None)
>>> mgr.find_user_password("h", "i.example.com:80")
('12', 'l')
>>> mgr.find_user_password("h", "http://i.example.com:80")
('12', 'l')
>>> mgr.find_user_password("i", "j.example.com")
('13', 'm')
>>> mgr.find_user_password("i", "j.example.com:80")
(None, None)
>>> mgr.find_user_password("i", "http://j.example.com")
('13', 'm')
>>> mgr.find_user_password("i", "http://j.example.com:80")
(None, None)
"""
class MockOpener:
addheaders = []
def open(self, req, data=None,timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
self.req, self.data, self.timeout = req, data, timeout
def error(self, proto, *args):
self.proto, self.args = proto, args
class MockFile:
def read(self, count=None): pass
def readline(self, count=None): pass
def close(self): pass
class MockHeaders(dict):
def getheaders(self, name):
return self.values()
class MockResponse(StringIO.StringIO):
def __init__(self, code, msg, headers, data, url=None):
StringIO.StringIO.__init__(self, data)
self.code, self.msg, self.headers, self.url = code, msg, headers, url
def info(self):
return self.headers
def geturl(self):
return self.url
class MockCookieJar:
def add_cookie_header(self, request):
self.ach_req = request
def extract_cookies(self, response, request):
self.ec_req, self.ec_r = request, response
class FakeMethod:
def __init__(self, meth_name, action, handle):
self.meth_name = meth_name
self.handle = handle
self.action = action
def __call__(self, *args):
return self.handle(self.meth_name, self.action, *args)
class MockHTTPResponse:
def __init__(self, fp, msg, status, reason):
self.fp = fp
self.msg = msg
self.status = status
self.reason = reason
def read(self):
return ''
class MockHTTPClass:
def __init__(self):
self.req_headers = []
self.data = None
self.raise_on_endheaders = False
self._tunnel_headers = {}
def __call__(self, host, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
self.host = host
self.timeout = timeout
return self
def set_debuglevel(self, level):
self.level = level
def set_tunnel(self, host, port=None, headers=None):
self._tunnel_host = host
self._tunnel_port = port
if headers:
self._tunnel_headers = headers
else:
self._tunnel_headers.clear()
def request(self, method, url, body=None, headers=None):
self.method = method
self.selector = url
if headers is not None:
self.req_headers += headers.items()
self.req_headers.sort()
if body:
self.data = body
if self.raise_on_endheaders:
import socket
raise socket.error()
def getresponse(self):
return MockHTTPResponse(MockFile(), {}, 200, "OK")
def close(self):
pass
class MockHandler:
# useful for testing handler machinery
# see add_ordered_mock_handlers() docstring
handler_order = 500
def __init__(self, methods):
self._define_methods(methods)
def _define_methods(self, methods):
for spec in methods:
if len(spec) == 2: name, action = spec
else: name, action = spec, None
meth = FakeMethod(name, action, self.handle)
setattr(self.__class__, name, meth)
def handle(self, fn_name, action, *args, **kwds):
self.parent.calls.append((self, fn_name, args, kwds))
if action is None:
return None
elif action == "return self":
return self
elif action == "return response":
res = MockResponse(200, "OK", {}, "")
return res
elif action == "return request":
return Request("http://blah/")
elif action.startswith("error"):
code = action[action.rfind(" ")+1:]
try:
code = int(code)
except ValueError:
pass
res = MockResponse(200, "OK", {}, "")
return self.parent.error("http", args[0], res, code, "", {})
elif action == "raise":
raise urllib2.URLError("blah")
assert False
def close(self): pass
def add_parent(self, parent):
self.parent = parent
self.parent.calls = []
def __lt__(self, other):
if not hasattr(other, "handler_order"):
# No handler_order, leave in original order. Yuck.
return True
return self.handler_order < other.handler_order
def add_ordered_mock_handlers(opener, meth_spec):
"""Create MockHandlers and add them to an OpenerDirector.
meth_spec: list of lists of tuples and strings defining methods to define
on handlers. eg:
[["http_error", "ftp_open"], ["http_open"]]
defines methods .http_error() and .ftp_open() on one handler, and
.http_open() on another. These methods just record their arguments and
return None. Using a tuple instead of a string causes the method to
perform some action (see MockHandler.handle()), eg:
[["http_error"], [("http_open", "return request")]]
defines .http_error() on one handler (which simply returns None), and
.http_open() on another handler, which returns a Request object.
"""
handlers = []
count = 0
for meths in meth_spec:
class MockHandlerSubclass(MockHandler): pass
h = MockHandlerSubclass(meths)
h.handler_order += count
h.add_parent(opener)
count = count + 1
handlers.append(h)
opener.add_handler(h)
return handlers
def build_test_opener(*handler_instances):
opener = OpenerDirector()
for h in handler_instances:
opener.add_handler(h)
return opener
class MockHTTPHandler(urllib2.BaseHandler):
# useful for testing redirections and auth
# sends supplied headers and code as first response
# sends 200 OK as second response
def __init__(self, code, headers):
self.code = code
self.headers = headers
self.reset()
def reset(self):
self._count = 0
self.requests = []
def http_open(self, req):
import mimetools, httplib, copy
from StringIO import StringIO
self.requests.append(copy.deepcopy(req))
if self._count == 0:
self._count = self._count + 1
name = httplib.responses[self.code]
msg = mimetools.Message(StringIO(self.headers))
return self.parent.error(
"http", req, MockFile(), self.code, name, msg)
else:
self.req = req
msg = mimetools.Message(StringIO("\r\n\r\n"))
return MockResponse(200, "OK", msg, "", req.get_full_url())
class MockHTTPSHandler(urllib2.AbstractHTTPHandler):
# Useful for testing the Proxy-Authorization request by verifying the
# properties of httpcon
def __init__(self):
urllib2.AbstractHTTPHandler.__init__(self)
self.httpconn = MockHTTPClass()
def https_open(self, req):
return self.do_open(self.httpconn, req)
class MockPasswordManager:
def add_password(self, realm, uri, user, password):
self.realm = realm
self.url = uri
self.user = user
self.password = password
def find_user_password(self, realm, authuri):
self.target_realm = realm
self.target_url = authuri
return self.user, self.password
class OpenerDirectorTests(unittest.TestCase):
def test_add_non_handler(self):
class NonHandler(object):
pass
self.assertRaises(TypeError,
OpenerDirector().add_handler, NonHandler())
def test_badly_named_methods(self):
# test work-around for three methods that accidentally follow the
# naming conventions for handler methods
# (*_open() / *_request() / *_response())
# These used to call the accidentally-named methods, causing a
# TypeError in real code; here, returning self from these mock
# methods would either cause no exception, or AttributeError.
from urllib2 import URLError
o = OpenerDirector()
meth_spec = [
[("do_open", "return self"), ("proxy_open", "return self")],
[("redirect_request", "return self")],
]
handlers = add_ordered_mock_handlers(o, meth_spec)
o.add_handler(urllib2.UnknownHandler())
for scheme in "do", "proxy", "redirect":
self.assertRaises(URLError, o.open, scheme+"://example.com/")
def test_handled(self):
# handler returning non-None means no more handlers will be called
o = OpenerDirector()
meth_spec = [
["http_open", "ftp_open", "http_error_302"],
["ftp_open"],
[("http_open", "return self")],
[("http_open", "return self")],
]
handlers = add_ordered_mock_handlers(o, meth_spec)
req = Request("http://example.com/")
r = o.open(req)
# Second .http_open() gets called, third doesn't, since second returned
# non-None. Handlers without .http_open() never get any methods called
# on them.
# In fact, second mock handler defining .http_open() returns self
# (instead of response), which becomes the OpenerDirector's return
# value.
self.assertEqual(r, handlers[2])
calls = [(handlers[0], "http_open"), (handlers[2], "http_open")]
for expected, got in zip(calls, o.calls):
handler, name, args, kwds = got
self.assertEqual((handler, name), expected)
self.assertEqual(args, (req,))
def test_handler_order(self):
o = OpenerDirector()
handlers = []
for meths, handler_order in [
([("http_open", "return self")], 500),
(["http_open"], 0),
]:
class MockHandlerSubclass(MockHandler): pass
h = MockHandlerSubclass(meths)
h.handler_order = handler_order
handlers.append(h)
o.add_handler(h)
r = o.open("http://example.com/")
# handlers called in reverse order, thanks to their sort order
self.assertEqual(o.calls[0][0], handlers[1])
self.assertEqual(o.calls[1][0], handlers[0])
def test_raise(self):
# raising URLError stops processing of request
o = OpenerDirector()
meth_spec = [
[("http_open", "raise")],
[("http_open", "return self")],
]
handlers = add_ordered_mock_handlers(o, meth_spec)
req = Request("http://example.com/")
self.assertRaises(urllib2.URLError, o.open, req)
self.assertEqual(o.calls, [(handlers[0], "http_open", (req,), {})])
## def test_error(self):
## # XXX this doesn't actually seem to be used in standard library,
## # but should really be tested anyway...
def test_http_error(self):
# XXX http_error_default
# http errors are a special case
o = OpenerDirector()
meth_spec = [
[("http_open", "error 302")],
[("http_error_400", "raise"), "http_open"],
[("http_error_302", "return response"), "http_error_303",
"http_error"],
[("http_error_302")],
]
handlers = add_ordered_mock_handlers(o, meth_spec)
class Unknown:
def __eq__(self, other): return True
req = Request("http://example.com/")
r = o.open(req)
assert len(o.calls) == 2
calls = [(handlers[0], "http_open", (req,)),
(handlers[2], "http_error_302",
(req, Unknown(), 302, "", {}))]
for expected, got in zip(calls, o.calls):
handler, method_name, args = expected
self.assertEqual((handler, method_name), got[:2])
self.assertEqual(args, got[2])
def test_processors(self):
# *_request / *_response methods get called appropriately
o = OpenerDirector()
meth_spec = [
[("http_request", "return request"),
("http_response", "return response")],
[("http_request", "return request"),
("http_response", "return response")],
]
handlers = add_ordered_mock_handlers(o, meth_spec)
req = Request("http://example.com/")
r = o.open(req)
# processor methods are called on *all* handlers that define them,
# not just the first handler that handles the request
calls = [
(handlers[0], "http_request"), (handlers[1], "http_request"),
(handlers[0], "http_response"), (handlers[1], "http_response")]
for i, (handler, name, args, kwds) in enumerate(o.calls):
if i < 2:
# *_request
self.assertEqual((handler, name), calls[i])
self.assertEqual(len(args), 1)
self.assertIsInstance(args[0], Request)
else:
# *_response
self.assertEqual((handler, name), calls[i])
self.assertEqual(len(args), 2)
self.assertIsInstance(args[0], Request)
# response from opener.open is None, because there's no
# handler that defines http_open to handle it
if args[1] is not None:
self.assertIsInstance(args[1], MockResponse)
def sanepathname2url(path):
import urllib
urlpath = urllib.pathname2url(path)
if os.name == "nt" and urlpath.startswith("///"):
urlpath = urlpath[2:]
# XXX don't ask me about the mac...
return urlpath
class HandlerTests(unittest.TestCase):
def test_ftp(self):
class MockFTPWrapper:
def __init__(self, data): self.data = data
def retrfile(self, filename, filetype):
self.filename, self.filetype = filename, filetype
return StringIO.StringIO(self.data), len(self.data)
def close(self): pass
class NullFTPHandler(urllib2.FTPHandler):
def __init__(self, data): self.data = data
def connect_ftp(self, user, passwd, host, port, dirs,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
self.user, self.passwd = user, passwd
self.host, self.port = host, port
self.dirs = dirs
self.ftpwrapper = MockFTPWrapper(self.data)
return self.ftpwrapper
import ftplib
data = "rheum rhaponicum"
h = NullFTPHandler(data)
o = h.parent = MockOpener()
for url, host, port, user, passwd, type_, dirs, filename, mimetype in [
("ftp://localhost/foo/bar/baz.html",
"localhost", ftplib.FTP_PORT, "", "", "I",
["foo", "bar"], "baz.html", "text/html"),
("ftp://parrot@localhost/foo/bar/baz.html",
"localhost", ftplib.FTP_PORT, "parrot", "", "I",
["foo", "bar"], "baz.html", "text/html"),
("ftp://%25parrot@localhost/foo/bar/baz.html",
"localhost", ftplib.FTP_PORT, "%parrot", "", "I",
["foo", "bar"], "baz.html", "text/html"),
("ftp://%2542parrot@localhost/foo/bar/baz.html",
"localhost", ftplib.FTP_PORT, "%42parrot", "", "I",
["foo", "bar"], "baz.html", "text/html"),
("ftp://localhost:80/foo/bar/",
"localhost", 80, "", "", "D",
["foo", "bar"], "", None),
("ftp://localhost/baz.gif;type=a",
"localhost", ftplib.FTP_PORT, "", "", "A",
[], "baz.gif", None), # XXX really this should guess image/gif
]:
req = Request(url)
req.timeout = None
r = h.ftp_open(req)
# ftp authentication not yet implemented by FTPHandler
self.assertEqual(h.user, user)
self.assertEqual(h.passwd, passwd)
self.assertEqual(h.host, socket.gethostbyname(host))
self.assertEqual(h.port, port)
self.assertEqual(h.dirs, dirs)
self.assertEqual(h.ftpwrapper.filename, filename)
self.assertEqual(h.ftpwrapper.filetype, type_)
headers = r.info()
self.assertEqual(headers.get("Content-type"), mimetype)
self.assertEqual(int(headers["Content-length"]), len(data))
def test_file(self):
import rfc822, socket
h = urllib2.FileHandler()
o = h.parent = MockOpener()
TESTFN = test_support.TESTFN
urlpath = sanepathname2url(os.path.abspath(TESTFN))
towrite = "hello, world\n"
urls = [
"file://localhost%s" % urlpath,
"file://%s" % urlpath,
"file://%s%s" % (socket.gethostbyname('localhost'), urlpath),
]
try:
localaddr = socket.gethostbyname(socket.gethostname())
except socket.gaierror:
localaddr = ''
if localaddr:
urls.append("file://%s%s" % (localaddr, urlpath))
for url in urls:
f = open(TESTFN, "wb")
try:
try:
f.write(towrite)
finally:
f.close()
r = h.file_open(Request(url))
try:
data = r.read()
headers = r.info()
respurl = r.geturl()
finally:
r.close()
stats = os.stat(TESTFN)
modified = rfc822.formatdate(stats.st_mtime)
finally:
os.remove(TESTFN)
self.assertEqual(data, towrite)
self.assertEqual(headers["Content-type"], "text/plain")
self.assertEqual(headers["Content-length"], "13")
self.assertEqual(headers["Last-modified"], modified)
self.assertEqual(respurl, url)
for url in [
"file://localhost:80%s" % urlpath,
"file:///file_does_not_exist.txt",
"file://%s:80%s/%s" % (socket.gethostbyname('localhost'),
os.getcwd(), TESTFN),
"file://somerandomhost.ontheinternet.com%s/%s" %
(os.getcwd(), TESTFN),
]:
try:
f = open(TESTFN, "wb")
try:
f.write(towrite)
finally:
f.close()
self.assertRaises(urllib2.URLError,
h.file_open, Request(url))
finally:
os.remove(TESTFN)
h = urllib2.FileHandler()
o = h.parent = MockOpener()
# XXXX why does // mean ftp (and /// mean not ftp!), and where
# is file: scheme specified? I think this is really a bug, and
# what was intended was to distinguish between URLs like:
# file:/blah.txt (a file)
# file://localhost/blah.txt (a file)
# file:///blah.txt (a file)
# file://ftp.example.com/blah.txt (an ftp URL)
for url, ftp in [
("file://ftp.example.com//foo.txt", True),
("file://ftp.example.com///foo.txt", False),
# XXXX bug: fails with OSError, should be URLError
("file://ftp.example.com/foo.txt", False),
("file://somehost//foo/something.txt", True),
("file://localhost//foo/something.txt", False),
]:
req = Request(url)
try:
h.file_open(req)
# XXXX remove OSError when bug fixed
except (urllib2.URLError, OSError):
self.assertTrue(not ftp)
else:
self.assertTrue(o.req is req)
self.assertEqual(req.type, "ftp")
self.assertEqual(req.type == "ftp", ftp)
def test_http(self):
h = urllib2.AbstractHTTPHandler()
o = h.parent = MockOpener()
url = "http://example.com/"
for method, data in [("GET", None), ("POST", "blah")]:
req = Request(url, data, {"Foo": "bar"})
req.timeout = None
req.add_unredirected_header("Spam", "eggs")
http = MockHTTPClass()
r = h.do_open(http, req)
# result attributes
r.read; r.readline # wrapped MockFile methods
r.info; r.geturl # addinfourl methods
r.code, r.msg == 200, "OK" # added from MockHTTPClass.getreply()
hdrs = r.info()
hdrs.get; hdrs.has_key # r.info() gives dict from .getreply()
self.assertEqual(r.geturl(), url)
self.assertEqual(http.host, "example.com")
self.assertEqual(http.level, 0)
self.assertEqual(http.method, method)
self.assertEqual(http.selector, "/")
self.assertEqual(http.req_headers,
[("Connection", "close"),
("Foo", "bar"), ("Spam", "eggs")])
self.assertEqual(http.data, data)
# check socket.error converted to URLError
http.raise_on_endheaders = True
self.assertRaises(urllib2.URLError, h.do_open, http, req)
# check adding of standard headers
o.addheaders = [("Spam", "eggs")]
for data in "", None: # POST, GET
req = Request("http://example.com/", data)
r = MockResponse(200, "OK", {}, "")
newreq = h.do_request_(req)
if data is None: # GET
self.assertNotIn("Content-length", req.unredirected_hdrs)
self.assertNotIn("Content-type", req.unredirected_hdrs)
else: # POST
self.assertEqual(req.unredirected_hdrs["Content-length"], "0")
self.assertEqual(req.unredirected_hdrs["Content-type"],
"application/x-www-form-urlencoded")
# XXX the details of Host could be better tested
self.assertEqual(req.unredirected_hdrs["Host"], "example.com")
self.assertEqual(req.unredirected_hdrs["Spam"], "eggs")
# don't clobber existing headers
req.add_unredirected_header("Content-length", "foo")
req.add_unredirected_header("Content-type", "bar")
req.add_unredirected_header("Host", "baz")
req.add_unredirected_header("Spam", "foo")
newreq = h.do_request_(req)
self.assertEqual(req.unredirected_hdrs["Content-length"], "foo")
self.assertEqual(req.unredirected_hdrs["Content-type"], "bar")
self.assertEqual(req.unredirected_hdrs["Host"], "baz")
self.assertEqual(req.unredirected_hdrs["Spam"], "foo")
def test_http_doubleslash(self):
# Checks that the presence of an unnecessary double slash in a url doesn't break anything
# Previously, a double slash directly after the host could cause incorrect parsing of the url
h = urllib2.AbstractHTTPHandler()
o = h.parent = MockOpener()
data = ""
ds_urls = [
"http://example.com/foo/bar/baz.html",
"http://example.com//foo/bar/baz.html",
"http://example.com/foo//bar/baz.html",
"http://example.com/foo/bar//baz.html",
]
for ds_url in ds_urls:
ds_req = Request(ds_url, data)
# Check whether host is determined correctly if there is no proxy
np_ds_req = h.do_request_(ds_req)
self.assertEqual(np_ds_req.unredirected_hdrs["Host"],"example.com")
# Check whether host is determined correctly if there is a proxy
ds_req.set_proxy("someproxy:3128",None)
p_ds_req = h.do_request_(ds_req)
self.assertEqual(p_ds_req.unredirected_hdrs["Host"],"example.com")
def test_fixpath_in_weirdurls(self):
# Issue4493: urllib2 to supply '/' when to urls where path does not
# start with'/'
h = urllib2.AbstractHTTPHandler()
o = h.parent = MockOpener()
weird_url = 'http://www.python.org?getspam'
req = Request(weird_url)
newreq = h.do_request_(req)
self.assertEqual(newreq.get_host(),'www.python.org')
self.assertEqual(newreq.get_selector(),'/?getspam')
url_without_path = 'http://www.python.org'
req = Request(url_without_path)
newreq = h.do_request_(req)
self.assertEqual(newreq.get_host(),'www.python.org')
self.assertEqual(newreq.get_selector(),'')
def test_errors(self):
h = urllib2.HTTPErrorProcessor()
o = h.parent = MockOpener()
url = "http://example.com/"
req = Request(url)
# all 2xx are passed through
r = MockResponse(200, "OK", {}, "", url)
newr = h.http_response(req, r)
self.assertTrue(r is newr)
self.assertTrue(not hasattr(o, "proto")) # o.error not called
r = MockResponse(202, "Accepted", {}, "", url)
newr = h.http_response(req, r)
self.assertTrue(r is newr)
self.assertTrue(not hasattr(o, "proto")) # o.error not called
r = MockResponse(206, "Partial content", {}, "", url)
newr = h.http_response(req, r)
self.assertTrue(r is newr)
self.assertTrue(not hasattr(o, "proto")) # o.error not called
# anything else calls o.error (and MockOpener returns None, here)
r = MockResponse(502, "Bad gateway", {}, "", url)
self.assertTrue(h.http_response(req, r) is None)
self.assertEqual(o.proto, "http") # o.error called
self.assertEqual(o.args, (req, r, 502, "Bad gateway", {}))
def test_cookies(self):
cj = MockCookieJar()
h = urllib2.HTTPCookieProcessor(cj)
o = h.parent = MockOpener()
req = Request("http://example.com/")
r = MockResponse(200, "OK", {}, "")
newreq = h.http_request(req)
self.assertTrue(cj.ach_req is req is newreq)
self.assertEqual(req.get_origin_req_host(), "example.com")
self.assertTrue(not req.is_unverifiable())
newr = h.http_response(req, r)
self.assertTrue(cj.ec_req is req)
self.assertTrue(cj.ec_r is r is newr)
def test_redirect(self):
from_url = "http://example.com/a.html"
to_url = "http://example.com/b.html"
h = urllib2.HTTPRedirectHandler()
o = h.parent = MockOpener()
# ordinary redirect behaviour
for code in 301, 302, 303, 307:
for data in None, "blah\nblah\n":
method = getattr(h, "http_error_%s" % code)
req = Request(from_url, data)
req.add_header("Nonsense", "viking=withhold")
req.timeout = socket._GLOBAL_DEFAULT_TIMEOUT
if data is not None:
req.add_header("Content-Length", str(len(data)))
req.add_unredirected_header("Spam", "spam")
try:
method(req, MockFile(), code, "Blah",
MockHeaders({"location": to_url}))
except urllib2.HTTPError:
# 307 in response to POST requires user OK
self.assertEqual(code, 307)
self.assertIsNotNone(data)
self.assertEqual(o.req.get_full_url(), to_url)
try:
self.assertEqual(o.req.get_method(), "GET")
except AttributeError:
self.assertTrue(not o.req.has_data())
# now it's a GET, there should not be headers regarding content
# (possibly dragged from before being a POST)
headers = [x.lower() for x in o.req.headers]
self.assertNotIn("content-length", headers)
self.assertNotIn("content-type", headers)
self.assertEqual(o.req.headers["Nonsense"],
"viking=withhold")
self.assertNotIn("Spam", o.req.headers)
self.assertNotIn("Spam", o.req.unredirected_hdrs)
# loop detection
req = Request(from_url)
req.timeout = socket._GLOBAL_DEFAULT_TIMEOUT
def redirect(h, req, url=to_url):
h.http_error_302(req, MockFile(), 302, "Blah",
MockHeaders({"location": url}))
# Note that the *original* request shares the same record of
# redirections with the sub-requests caused by the redirections.
# detect infinite loop redirect of a URL to itself
req = Request(from_url, origin_req_host="example.com")
count = 0
req.timeout = socket._GLOBAL_DEFAULT_TIMEOUT
try:
while 1:
redirect(h, req, "http://example.com/")
count = count + 1
except urllib2.HTTPError:
# don't stop until max_repeats, because cookies may introduce state
self.assertEqual(count, urllib2.HTTPRedirectHandler.max_repeats)
# detect endless non-repeating chain of redirects
req = Request(from_url, origin_req_host="example.com")
count = 0
req.timeout = socket._GLOBAL_DEFAULT_TIMEOUT
try:
while 1:
redirect(h, req, "http://example.com/%d" % count)
count = count + 1
except urllib2.HTTPError:
self.assertEqual(count,
urllib2.HTTPRedirectHandler.max_redirections)
def test_invalid_redirect(self):
from_url = "http://example.com/a.html"
valid_schemes = ['http', 'https', 'ftp']
invalid_schemes = ['file', 'imap', 'ldap']
schemeless_url = "example.com/b.html"
h = urllib2.HTTPRedirectHandler()
o = h.parent = MockOpener()
req = Request(from_url)
req.timeout = socket._GLOBAL_DEFAULT_TIMEOUT
for scheme in invalid_schemes:
invalid_url = scheme + '://' + schemeless_url
self.assertRaises(urllib2.HTTPError, h.http_error_302,
req, MockFile(), 302, "Security Loophole",
MockHeaders({"location": invalid_url}))
for scheme in valid_schemes:
valid_url = scheme + '://' + schemeless_url
h.http_error_302(req, MockFile(), 302, "That's fine",
MockHeaders({"location": valid_url}))
self.assertEqual(o.req.get_full_url(), valid_url)
def test_cookie_redirect(self):
# cookies shouldn't leak into redirected requests
from cookielib import CookieJar
from test.test_cookielib import interact_netscape
cj = CookieJar()
interact_netscape(cj, "http://www.example.com/", "spam=eggs")
hh = MockHTTPHandler(302, "Location: http://www.cracker.com/\r\n\r\n")
hdeh = urllib2.HTTPDefaultErrorHandler()
hrh = urllib2.HTTPRedirectHandler()
cp = urllib2.HTTPCookieProcessor(cj)
o = build_test_opener(hh, hdeh, hrh, cp)
o.open("http://www.example.com/")
self.assertTrue(not hh.req.has_header("Cookie"))
def test_redirect_fragment(self):
redirected_url = 'http://www.example.com/index.html#OK\r\n\r\n'
hh = MockHTTPHandler(302, 'Location: ' + redirected_url)
hdeh = urllib2.HTTPDefaultErrorHandler()
hrh = urllib2.HTTPRedirectHandler()
o = build_test_opener(hh, hdeh, hrh)
fp = o.open('http://www.example.com')
self.assertEqual(fp.geturl(), redirected_url.strip())
def test_proxy(self):
o = OpenerDirector()
ph = urllib2.ProxyHandler(dict(http="proxy.example.com:3128"))
o.add_handler(ph)
meth_spec = [
[("http_open", "return response")]
]
handlers = add_ordered_mock_handlers(o, meth_spec)
req = Request("http://acme.example.com/")
self.assertEqual(req.get_host(), "acme.example.com")
r = o.open(req)
self.assertEqual(req.get_host(), "proxy.example.com:3128")
self.assertEqual([(handlers[0], "http_open")],
[tup[0:2] for tup in o.calls])
def test_proxy_no_proxy(self):
os.environ['no_proxy'] = 'python.org'
o = OpenerDirector()
ph = urllib2.ProxyHandler(dict(http="proxy.example.com"))
o.add_handler(ph)
req = Request("http://www.perl.org/")
self.assertEqual(req.get_host(), "www.perl.org")
r = o.open(req)
self.assertEqual(req.get_host(), "proxy.example.com")
req = Request("http://www.python.org")
self.assertEqual(req.get_host(), "www.python.org")
r = o.open(req)
self.assertEqual(req.get_host(), "www.python.org")
del os.environ['no_proxy']
def test_proxy_https(self):
o = OpenerDirector()
ph = urllib2.ProxyHandler(dict(https='proxy.example.com:3128'))
o.add_handler(ph)
meth_spec = [
[("https_open","return response")]
]
handlers = add_ordered_mock_handlers(o, meth_spec)
req = Request("https://www.example.com/")
self.assertEqual(req.get_host(), "www.example.com")
r = o.open(req)
self.assertEqual(req.get_host(), "proxy.example.com:3128")
self.assertEqual([(handlers[0], "https_open")],
[tup[0:2] for tup in o.calls])
def test_proxy_https_proxy_authorization(self):
o = OpenerDirector()
ph = urllib2.ProxyHandler(dict(https='proxy.example.com:3128'))
o.add_handler(ph)
https_handler = MockHTTPSHandler()
o.add_handler(https_handler)
req = Request("https://www.example.com/")
req.add_header("Proxy-Authorization","FooBar")
req.add_header("User-Agent","Grail")
self.assertEqual(req.get_host(), "www.example.com")
self.assertIsNone(req._tunnel_host)
r = o.open(req)
# Verify Proxy-Authorization gets tunneled to request.
# httpsconn req_headers do not have the Proxy-Authorization header but
# the req will have.
self.assertNotIn(("Proxy-Authorization","FooBar"),
https_handler.httpconn.req_headers)
self.assertIn(("User-Agent","Grail"),
https_handler.httpconn.req_headers)
self.assertIsNotNone(req._tunnel_host)
self.assertEqual(req.get_host(), "proxy.example.com:3128")
self.assertEqual(req.get_header("Proxy-authorization"),"FooBar")
def test_basic_auth(self, quote_char='"'):
opener = OpenerDirector()
password_manager = MockPasswordManager()
auth_handler = urllib2.HTTPBasicAuthHandler(password_manager)
realm = "ACME Widget Store"
http_handler = MockHTTPHandler(
401, 'WWW-Authenticate: Basic realm=%s%s%s\r\n\r\n' %
(quote_char, realm, quote_char) )
opener.add_handler(auth_handler)
opener.add_handler(http_handler)
self._test_basic_auth(opener, auth_handler, "Authorization",
realm, http_handler, password_manager,
"http://acme.example.com/protected",
"http://acme.example.com/protected"
)
def test_basic_auth_with_single_quoted_realm(self):
self.test_basic_auth(quote_char="'")
def test_basic_auth_with_unquoted_realm(self):
opener = OpenerDirector()
password_manager = MockPasswordManager()
auth_handler = urllib2.HTTPBasicAuthHandler(password_manager)
realm = "ACME Widget Store"
http_handler = MockHTTPHandler(
401, 'WWW-Authenticate: Basic realm=%s\r\n\r\n' % realm)
opener.add_handler(auth_handler)
opener.add_handler(http_handler)
msg = "Basic Auth Realm was unquoted"
with test_support.check_warnings((msg, UserWarning)):
self._test_basic_auth(opener, auth_handler, "Authorization",
realm, http_handler, password_manager,
"http://acme.example.com/protected",
"http://acme.example.com/protected"
)
def test_proxy_basic_auth(self):
opener = OpenerDirector()
ph = urllib2.ProxyHandler(dict(http="proxy.example.com:3128"))
opener.add_handler(ph)
password_manager = MockPasswordManager()
auth_handler = urllib2.ProxyBasicAuthHandler(password_manager)
realm = "ACME Networks"
http_handler = MockHTTPHandler(
407, 'Proxy-Authenticate: Basic realm="%s"\r\n\r\n' % realm)
opener.add_handler(auth_handler)
opener.add_handler(http_handler)
self._test_basic_auth(opener, auth_handler, "Proxy-authorization",
realm, http_handler, password_manager,
"http://acme.example.com:3128/protected",
"proxy.example.com:3128",
)
def test_basic_and_digest_auth_handlers(self):
# HTTPDigestAuthHandler raised an exception if it couldn't handle a 40*
# response (http://python.org/sf/1479302), where it should instead
# return None to allow another handler (especially
# HTTPBasicAuthHandler) to handle the response.
# Also (http://python.org/sf/14797027, RFC 2617 section 1.2), we must
# try digest first (since it's the strongest auth scheme), so we record
# order of calls here to check digest comes first:
class RecordingOpenerDirector(OpenerDirector):
def __init__(self):
OpenerDirector.__init__(self)
self.recorded = []
def record(self, info):
self.recorded.append(info)
class TestDigestAuthHandler(urllib2.HTTPDigestAuthHandler):
def http_error_401(self, *args, **kwds):
self.parent.record("digest")
urllib2.HTTPDigestAuthHandler.http_error_401(self,
*args, **kwds)
class TestBasicAuthHandler(urllib2.HTTPBasicAuthHandler):
def http_error_401(self, *args, **kwds):
self.parent.record("basic")
urllib2.HTTPBasicAuthHandler.http_error_401(self,
*args, **kwds)
opener = RecordingOpenerDirector()
password_manager = MockPasswordManager()
digest_handler = TestDigestAuthHandler(password_manager)
basic_handler = TestBasicAuthHandler(password_manager)
realm = "ACME Networks"
http_handler = MockHTTPHandler(
401, 'WWW-Authenticate: Basic realm="%s"\r\n\r\n' % realm)
opener.add_handler(basic_handler)
opener.add_handler(digest_handler)
opener.add_handler(http_handler)
# check basic auth isn't blocked by digest handler failing
self._test_basic_auth(opener, basic_handler, "Authorization",
realm, http_handler, password_manager,
"http://acme.example.com/protected",
"http://acme.example.com/protected",
)
# check digest was tried before basic (twice, because
# _test_basic_auth called .open() twice)
self.assertEqual(opener.recorded, ["digest", "basic"]*2)
def _test_basic_auth(self, opener, auth_handler, auth_header,
realm, http_handler, password_manager,
request_url, protected_url):
import base64
user, password = "wile", "coyote"
# .add_password() fed through to password manager
auth_handler.add_password(realm, request_url, user, password)
self.assertEqual(realm, password_manager.realm)
self.assertEqual(request_url, password_manager.url)
self.assertEqual(user, password_manager.user)
self.assertEqual(password, password_manager.password)
r = opener.open(request_url)
# should have asked the password manager for the username/password
self.assertEqual(password_manager.target_realm, realm)
self.assertEqual(password_manager.target_url, protected_url)
# expect one request without authorization, then one with
self.assertEqual(len(http_handler.requests), 2)
self.assertFalse(http_handler.requests[0].has_header(auth_header))
userpass = '%s:%s' % (user, password)
auth_hdr_value = 'Basic '+base64.encodestring(userpass).strip()
self.assertEqual(http_handler.requests[1].get_header(auth_header),
auth_hdr_value)
self.assertEqual(http_handler.requests[1].unredirected_hdrs[auth_header],
auth_hdr_value)
# if the password manager can't find a password, the handler won't
# handle the HTTP auth error
password_manager.user = password_manager.password = None
http_handler.reset()
r = opener.open(request_url)
self.assertEqual(len(http_handler.requests), 1)
self.assertFalse(http_handler.requests[0].has_header(auth_header))
class MiscTests(unittest.TestCase):
def test_build_opener(self):
class MyHTTPHandler(urllib2.HTTPHandler): pass
class FooHandler(urllib2.BaseHandler):
def foo_open(self): pass
class BarHandler(urllib2.BaseHandler):
def bar_open(self): pass
build_opener = urllib2.build_opener
o = build_opener(FooHandler, BarHandler)
self.opener_has_handler(o, FooHandler)
self.opener_has_handler(o, BarHandler)
# can take a mix of classes and instances
o = build_opener(FooHandler, BarHandler())
self.opener_has_handler(o, FooHandler)
self.opener_has_handler(o, BarHandler)
# subclasses of default handlers override default handlers
o = build_opener(MyHTTPHandler)
self.opener_has_handler(o, MyHTTPHandler)
# a particular case of overriding: default handlers can be passed
# in explicitly
o = build_opener()
self.opener_has_handler(o, urllib2.HTTPHandler)
o = build_opener(urllib2.HTTPHandler)
self.opener_has_handler(o, urllib2.HTTPHandler)
o = build_opener(urllib2.HTTPHandler())
self.opener_has_handler(o, urllib2.HTTPHandler)
# Issue2670: multiple handlers sharing the same base class
class MyOtherHTTPHandler(urllib2.HTTPHandler): pass
o = build_opener(MyHTTPHandler, MyOtherHTTPHandler)
self.opener_has_handler(o, MyHTTPHandler)
self.opener_has_handler(o, MyOtherHTTPHandler)
def opener_has_handler(self, opener, handler_class):
for h in opener.handlers:
if h.__class__ == handler_class:
break
else:
self.assertTrue(False)
class RequestTests(unittest.TestCase):
def setUp(self):
self.get = urllib2.Request("http://www.python.org/~jeremy/")
self.post = urllib2.Request("http://www.python.org/~jeremy/",
"data",
headers={"X-Test": "test"})
def test_method(self):
self.assertEqual("POST", self.post.get_method())
self.assertEqual("GET", self.get.get_method())
def test_add_data(self):
self.assertTrue(not self.get.has_data())
self.assertEqual("GET", self.get.get_method())
self.get.add_data("spam")
self.assertTrue(self.get.has_data())
self.assertEqual("POST", self.get.get_method())
def test_get_full_url(self):
self.assertEqual("http://www.python.org/~jeremy/",
self.get.get_full_url())
def test_selector(self):
self.assertEqual("/~jeremy/", self.get.get_selector())
req = urllib2.Request("http://www.python.org/")
self.assertEqual("/", req.get_selector())
def test_get_type(self):
self.assertEqual("http", self.get.get_type())
def test_get_host(self):
self.assertEqual("www.python.org", self.get.get_host())
def test_get_host_unquote(self):
req = urllib2.Request("http://www.%70ython.org/")
self.assertEqual("www.python.org", req.get_host())
def test_proxy(self):
self.assertTrue(not self.get.has_proxy())
self.get.set_proxy("www.perl.org", "http")
self.assertTrue(self.get.has_proxy())
self.assertEqual("www.python.org", self.get.get_origin_req_host())
self.assertEqual("www.perl.org", self.get.get_host())
def test_wrapped_url(self):
req = Request("<URL:http://www.python.org>")
self.assertEqual("www.python.org", req.get_host())
def test_url_fragment(self):
req = Request("http://www.python.org/?qs=query#fragment=true")
self.assertEqual("/?qs=query", req.get_selector())
req = Request("http://www.python.org/#fun=true")
self.assertEqual("/", req.get_selector())
# Issue 11703: geturl() omits fragment in the original URL.
url = 'http://docs.python.org/library/urllib2.html#OK'
req = Request(url)
self.assertEqual(req.get_full_url(), url)
def test_HTTPError_interface(self):
"""
Issue 13211 reveals that HTTPError didn't implement the URLError
interface even though HTTPError is a subclass of URLError.
>>> err = urllib2.HTTPError(msg='something bad happened', url=None, code=None, hdrs=None, fp=None)
>>> assert hasattr(err, 'reason')
>>> err.reason
'something bad happened'
"""
def test_HTTPError_interface_call(self):
"""
Issue 15701= - HTTPError interface has info method available from URLError.
"""
err = urllib2.HTTPError(msg='something bad happened', url=None,
code=None, hdrs='Content-Length:42', fp=None)
self.assertTrue(hasattr(err, 'reason'))
assert hasattr(err, 'reason')
assert hasattr(err, 'info')
assert callable(err.info)
try:
err.info()
except AttributeError:
self.fail("err.info() failed")
self.assertEqual(err.info(), "Content-Length:42")
def test_main(verbose=None):
from test import test_urllib2
test_support.run_doctest(test_urllib2, verbose)
test_support.run_doctest(urllib2, verbose)
tests = (TrivialTests,
OpenerDirectorTests,
HandlerTests,
MiscTests,
RequestTests)
test_support.run_unittest(*tests)
if __name__ == "__main__":
test_main(verbose=True)
|
jlspyaozhongkai/Uter
|
third_party_build/Python-2.7.9/lib/python2.7/test/test_urllib2.py
|
Python
|
gpl-3.0
| 54,618
|
"""SCons.Tool.javah
Tool-specific initialization for javah.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/javah.py 3842 2008/12/20 22:59:52 scons"
import os.path
import string
import SCons.Action
import SCons.Builder
import SCons.Node.FS
import SCons.Tool.javac
import SCons.Util
def emit_java_headers(target, source, env):
"""Create and return lists of Java stub header files that will
be created from a set of class files.
"""
class_suffix = env.get('JAVACLASSSUFFIX', '.class')
classdir = env.get('JAVACLASSDIR')
if not classdir:
try:
s = source[0]
except IndexError:
classdir = '.'
else:
try:
classdir = s.attributes.java_classdir
except AttributeError:
classdir = '.'
classdir = env.Dir(classdir).rdir()
if str(classdir) == '.':
c_ = None
else:
c_ = str(classdir) + os.sep
slist = []
for src in source:
try:
classname = src.attributes.java_classname
except AttributeError:
classname = str(src)
if c_ and classname[:len(c_)] == c_:
classname = classname[len(c_):]
if class_suffix and classname[-len(class_suffix):] == class_suffix:
classname = classname[:-len(class_suffix)]
classname = SCons.Tool.javac.classname(classname)
s = src.rfile()
s.attributes.java_classname = classname
slist.append(s)
s = source[0].rfile()
if not hasattr(s.attributes, 'java_classdir'):
s.attributes.java_classdir = classdir
if target[0].__class__ is SCons.Node.FS.File:
tlist = target
else:
if not isinstance(target[0], SCons.Node.FS.Dir):
target[0].__class__ = SCons.Node.FS.Dir
target[0]._morph()
tlist = []
for s in source:
fname = string.replace(s.attributes.java_classname, '.', '_') + '.h'
t = target[0].File(fname)
t.attributes.java_lookupdir = target[0]
tlist.append(t)
return tlist, source
def JavaHOutFlagGenerator(target, source, env, for_signature):
try:
t = target[0]
except (AttributeError, TypeError):
t = target
try:
return '-d ' + str(t.attributes.java_lookupdir)
except AttributeError:
return '-o ' + str(t)
def getJavaHClassPath(env,target, source, for_signature):
path = "${SOURCE.attributes.java_classdir}"
if env.has_key('JAVACLASSPATH') and env['JAVACLASSPATH']:
path = SCons.Util.AppendPath(path, env['JAVACLASSPATH'])
return "-classpath %s" % (path)
def generate(env):
"""Add Builders and construction variables for javah to an Environment."""
java_javah = SCons.Tool.CreateJavaHBuilder(env)
java_javah.emitter = emit_java_headers
env['_JAVAHOUTFLAG'] = JavaHOutFlagGenerator
env['JAVAH'] = 'javah'
env['JAVAHFLAGS'] = SCons.Util.CLVar('')
env['_JAVAHCLASSPATH'] = getJavaHClassPath
env['JAVAHCOM'] = '$JAVAH $JAVAHFLAGS $_JAVAHOUTFLAG $_JAVAHCLASSPATH ${SOURCES.attributes.java_classname}'
env['JAVACLASSSUFFIX'] = '.class'
def exists(env):
return env.Detect('javah')
|
carlos-lopez-garces/mapnik-trunk
|
scons/scons-local-1.2.0/SCons/Tool/javah.py
|
Python
|
lgpl-2.1
| 4,559
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from collections import defaultdict
from twitter.common.collections import OrderedSet
from pants.backend.jvm.tasks.jvm_dependency_analyzer import JvmDependencyAnalyzer
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.java.distribution.distribution import DistributionLocator
from pants.task.task import Task
class JvmDependencyCheck(Task):
"""Checks true dependencies of a JVM target and ensures that they are consistent with BUILD files."""
@classmethod
def register_options(cls, register):
super(JvmDependencyCheck, cls).register_options(register)
register('--missing-deps', choices=['off', 'warn', 'fatal'], default='off',
fingerprint=True,
help='Check for missing dependencies in compiled code. Reports actual '
'dependencies A -> B where there is no transitive BUILD file dependency path '
'from A to B. If fatal, missing deps are treated as a build error.')
register('--missing-direct-deps', choices=['off', 'warn', 'fatal'],
default='off',
fingerprint=True,
help='Check for missing direct dependencies in compiled code. Reports actual '
'dependencies A -> B where there is no direct BUILD file dependency path from '
'A to B. This is a very strict check; In practice it is common to rely on '
'transitive, indirect dependencies, e.g., due to type inference or when the main '
'target in a BUILD file is modified to depend on other targets in the same BUILD '
'file, as an implementation detail. However it may still be useful to use this '
'on occasion. ')
register('--missing-deps-whitelist', type=list, default=[],
fingerprint=True,
help="Don't report these targets even if they have missing deps.")
register('--unnecessary-deps', choices=['off', 'warn', 'fatal'], default='off',
fingerprint=True,
help='Check for declared dependencies in compiled code that are not needed. '
'This is a very strict check. For example, generated code will often '
'legitimately have BUILD dependencies that are unused in practice.')
@classmethod
def skip(cls, options):
"""Return true if the task should be entirely skipped, and thus have no product requirements."""
values = [options.missing_deps, options.missing_direct_deps, options.unnecessary_deps]
return all(v == 'off' for v in values)
@classmethod
def prepare(cls, options, round_manager):
super(JvmDependencyCheck, cls).prepare(options, round_manager)
if not cls.skip(options):
round_manager.require_data('runtime_classpath')
round_manager.require_data('product_deps_by_src')
def __init__(self, *args, **kwargs):
super(JvmDependencyCheck, self).__init__(*args, **kwargs)
# Set up dep checking if needed.
def munge_flag(flag):
flag_value = self.get_options().get(flag, None)
return None if flag_value == 'off' else flag_value
self._check_missing_deps = munge_flag('missing_deps')
self._check_missing_direct_deps = munge_flag('missing_direct_deps')
self._check_unnecessary_deps = munge_flag('unnecessary_deps')
self._target_whitelist = self.get_options().missing_deps_whitelist
@property
def cache_target_dirs(self):
return True
def execute(self):
if self.skip(self.get_options()):
return
with self.invalidated(self.context.targets(),
invalidate_dependents=True) as invalidation_check:
for vt in invalidation_check.invalid_vts:
product_deps_by_src = self.context.products.get_data('product_deps_by_src').get(vt.target)
if product_deps_by_src is not None:
self.check(vt.target, product_deps_by_src)
def check(self, src_tgt, actual_deps):
"""Check for missing deps.
See docstring for _compute_missing_deps for details.
"""
if self._check_missing_deps or self._check_missing_direct_deps or self._check_unnecessary_deps:
missing_file_deps, missing_tgt_deps, missing_direct_tgt_deps = \
self._compute_missing_deps(src_tgt, actual_deps)
buildroot = get_buildroot()
def shorten(path): # Make the output easier to read.
if path.startswith(buildroot):
return os.path.relpath(path, buildroot)
return path
def filter_whitelisted(missing_deps):
# Removing any targets that exist in the whitelist from the list of dependency issues.
return [(tgt_pair, evidence) for (tgt_pair, evidence) in missing_deps
if tgt_pair[0].address.reference() not in self._target_whitelist]
missing_tgt_deps = filter_whitelisted(missing_tgt_deps)
if self._check_missing_deps and (missing_file_deps or missing_tgt_deps):
log_fn = (self.context.log.error if self._check_missing_deps == 'fatal'
else self.context.log.warn)
for (tgt_pair, evidence) in missing_tgt_deps:
evidence_str = '\n'.join([' {} uses {}'.format(shorten(e[0]), shorten(e[1]))
for e in evidence])
log_fn('Missing BUILD dependency {} -> {} because:\n{}'
.format(tgt_pair[0].address.reference(), tgt_pair[1].address.reference(),
evidence_str))
for (src_tgt, dep) in missing_file_deps:
log_fn('Missing BUILD dependency {} -> {}'
.format(src_tgt.address.reference(), shorten(dep)))
if self._check_missing_deps == 'fatal':
raise TaskError('Missing deps.')
missing_direct_tgt_deps = filter_whitelisted(missing_direct_tgt_deps)
if self._check_missing_direct_deps and missing_direct_tgt_deps:
log_fn = (self.context.log.error if self._check_missing_direct_deps == 'fatal'
else self.context.log.warn)
for (tgt_pair, evidence) in missing_direct_tgt_deps:
evidence_str = '\n'.join([' {} uses {}'.format(shorten(e[0]), shorten(e[1]))
for e in evidence])
log_fn('Missing direct BUILD dependency {} -> {} because:\n{}'
.format(tgt_pair[0].address, tgt_pair[1].address, evidence_str))
if self._check_missing_direct_deps == 'fatal':
raise TaskError('Missing direct deps.')
if self._check_unnecessary_deps:
raise TaskError('Unnecessary dep warnings not implemented yet.')
def _compute_missing_deps(self, src_tgt, actual_deps):
"""Computes deps that are used by the compiler but not specified in a BUILD file.
These deps are bugs waiting to happen: the code may happen to compile because the dep was
brought in some other way (e.g., by some other root target), but that is obviously fragile.
Note that in practice we're OK with reliance on indirect deps that are only brought in
transitively. E.g., in Scala type inference can bring in such a dep subtly. Fortunately these
cases aren't as fragile as a completely missing dependency. It's still a good idea to have
explicit direct deps where relevant, so we optionally warn about indirect deps, to make them
easy to find and reason about.
- actual_deps: a map src -> list of actual deps (source, class or jar file) as noted by the
compiler.
Returns a triple (missing_file_deps, missing_tgt_deps, missing_direct_tgt_deps) where:
- missing_file_deps: a list of dep_files where src_tgt requires dep_file, and we're unable
to map to a target (because its target isn't in the total set of targets in play,
and we don't want to parse every BUILD file in the workspace just to find it).
- missing_tgt_deps: a list of dep_tgt where src_tgt is missing a necessary transitive
dependency on dep_tgt.
- missing_direct_tgt_deps: a list of dep_tgts where src_tgt is missing a direct dependency
on dep_tgt but has a transitive dep on it.
All paths in the input and output are absolute.
"""
analyzer = JvmDependencyAnalyzer(get_buildroot(),
self.context.products.get_data('runtime_classpath'),
self.context.products.get_data('product_deps_by_src'))
def must_be_explicit_dep(dep):
# We don't require explicit deps on the java runtime, so we shouldn't consider that
# a missing dep.
return (dep not in analyzer.bootstrap_jar_classfiles
and not dep.startswith(DistributionLocator.cached().real_home))
def target_or_java_dep_in_targets(target, targets):
# We want to check if the target is in the targets collection
#
# However, for the special case of scala_library that has a java_sources
# reference we're ok if that exists in targets even if the scala_library does not.
if target in targets:
return True
elif target.is_scala:
return any(t in targets for t in target.java_sources)
else:
return False
# TODO: If recomputing these every time becomes a performance issue, memoize for
# already-seen targets and incrementally compute for new targets not seen in a previous
# partition, in this or a previous chunk.
transitive_deps_by_target = analyzer.compute_transitive_deps_by_target(self.context.targets())
# Find deps that are actual but not specified.
missing_file_deps = OrderedSet() # (src, src).
missing_tgt_deps_map = defaultdict(list) # (tgt, tgt) -> a list of (src, src) as evidence.
missing_direct_tgt_deps_map = defaultdict(list) # The same, but for direct deps.
targets_by_file = analyzer.targets_by_file(self.context.targets())
buildroot = get_buildroot()
abs_srcs = [os.path.join(buildroot, src) for src in src_tgt.sources_relative_to_buildroot()]
for src in abs_srcs:
for actual_dep in filter(must_be_explicit_dep, actual_deps.get(src, [])):
actual_dep_tgts = targets_by_file.get(actual_dep)
# actual_dep_tgts is usually a singleton. If it's not, we only need one of these
# to be in our declared deps to be OK.
if actual_dep_tgts is None:
missing_file_deps.add((src_tgt, actual_dep))
elif not target_or_java_dep_in_targets(src_tgt, actual_dep_tgts):
# Obviously intra-target deps are fine.
canonical_actual_dep_tgt = next(iter(actual_dep_tgts))
if actual_dep_tgts.isdisjoint(transitive_deps_by_target.get(src_tgt, [])):
missing_tgt_deps_map[(src_tgt, canonical_actual_dep_tgt)].append((src, actual_dep))
elif canonical_actual_dep_tgt not in src_tgt.dependencies:
# The canonical dep is the only one a direct dependency makes sense on.
missing_direct_tgt_deps_map[(src_tgt, canonical_actual_dep_tgt)].append(
(src, actual_dep))
return (list(missing_file_deps),
missing_tgt_deps_map.items(),
missing_direct_tgt_deps_map.items())
|
pombredanne/pants
|
src/python/pants/backend/jvm/tasks/jvm_dependency_check.py
|
Python
|
apache-2.0
| 11,454
|
from .provider import S3Provider # noqa
|
TomBaxter/waterbutler
|
waterbutler/providers/s3/__init__.py
|
Python
|
apache-2.0
| 41
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Red Hat, Inc.
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import BigInteger, Column, MetaData, Table
from sqlalchemy.types import NullType
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def upgrade(migrate_engine):
meta = MetaData(migrate_engine)
meta.reflect(migrate_engine)
table_names = meta.tables.keys()
meta.bind = migrate_engine
for table_name in table_names:
if table_name.startswith('shadow'):
continue
table = Table(table_name, meta, autoload=True)
columns = []
for column in table.columns:
column_copy = None
# NOTE(boris-42): BigInteger is not supported by sqlite, so
# after copy it will have NullType, other
# types that are used in Nova are supported by
# sqlite.
if isinstance(column.type, NullType):
column_copy = Column(column.name, BigInteger(), default=0)
else:
column_copy = column.copy()
columns.append(column_copy)
shadow_table_name = 'shadow_' + table_name
shadow_table = Table(shadow_table_name, meta, *columns,
mysql_engine='InnoDB')
try:
shadow_table.create()
except Exception:
LOG.info(repr(shadow_table))
LOG.exception(_('Exception while creating table.'))
raise
def downgrade(migrate_engine):
meta = MetaData(migrate_engine)
meta.reflect(migrate_engine)
table_names = meta.tables.keys()
meta.bind = migrate_engine
for table_name in table_names:
if table_name.startswith('shadow'):
continue
shadow_table_name = 'shadow_' + table_name
shadow_table = Table(shadow_table_name, meta, autoload=True)
try:
shadow_table.drop()
except Exception:
LOG.error(_("table '%s' not dropped") % shadow_table_name)
|
sridevikoushik31/openstack
|
nova/db/sqlalchemy/migrate_repo/versions/154_add_shadow_tables.py
|
Python
|
apache-2.0
| 2,666
|
import re
from docutils import nodes, utils
from docutils.parsers.rst import roles
pubmed_uri_pattern = "http://www.ncbi.nlm.nih.gov/pubmed/%i"
doi_uri_pattern = "http://dx.doi.org/%s"
def pmid_reference_role(role, rawtext, text, lineno, inliner,
options={}, content=[]):
try:
pmid = int(text)
if pmid <= 0:
raise ValueError
except ValueError:
msg = inliner.reporter.error(
'pmid number must be a number greater than or equal to 1; '
'"%s" is invalid.' % text, line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
ref = pubmed_uri_pattern % pmid
nodelist = []
nodelist.append(nodes.inline(text='PMID:'))
nodelist.append(nodes.reference(rawtext, utils.unescape(text), refuri=ref,
**options))
return nodelist, []
def doi_reference_role(role, rawtext, text, lineno, inliner,
options={}, content=[]):
ref = doi_uri_pattern % text
nodelist = []
nodelist.append(nodes.inline(text='doi:'))
nodelist.append(nodes.reference(rawtext, utils.unescape(text), refuri=ref,
**options))
return nodelist, []
def setup(app):
app.add_role('pmid', pmid_reference_role)
app.add_role('doi', doi_reference_role)
|
sorgerlab/belpy
|
doc/ext/citations.py
|
Python
|
mit
| 1,373
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = """
---
module: nxos_nxapi
version_added: "2.1"
author: "Peter Sprygada (@privateip)"
short_description: Manage NXAPI configuration on an NXOS device.
description:
- Configures the NXAPI feature on devices running Cisco NXOS. The
NXAPI feature is absent from the configuration by default. Since
this module manages the NXAPI feature it only supports the use
of the C(Cli) transport.
extends_documentation_fragment: nxos
options:
http_port:
description:
- Configure the port with which the HTTP server will listen on
for requests. By default, NXAPI will bind the HTTP service
to the standard HTTP port 80. This argument accepts valid
port values in the range of 1 to 65535.
required: false
default: 80
http:
description:
- Controls the operating state of the HTTP protocol as one of the
underlying transports for NXAPI. By default, NXAPI will enable
the HTTP transport when the feature is first configured. To
disable the use of the HTTP transport, set the value of this
argument to False.
required: false
default: yes
choices: ['yes', 'no']
aliases: ['enable_http']
https_port:
description:
- Configure the port with which the HTTPS server will listen on
for requests. By default, NXAPI will bind the HTTPS service
to the standard HTTPS port 443. This argument accepts valid
port values in the range of 1 to 65535.
required: false
default: 443
https:
description:
- Controls the operating state of the HTTPS protocol as one of the
underlying transports for NXAPI. By default, NXAPI will disable
the HTTPS transport when the feature is first configured. To
enable the use of the HTTPS transport, set the value of this
argument to True.
required: false
default: no
choices: ['yes', 'no']
aliases: ['enable_https']
sandbox:
description:
- The NXAPI feature provides a web base UI for developers for
entering commands. This feature is initially disabled when
the NXAPI feature is configured for the first time. When the
C(sandbox) argument is set to True, the developer sandbox URL
will accept requests and when the value is set to False, the
sandbox URL is unavailable.
required: false
default: no
choices: ['yes', 'no']
aliases: ['enable_sandbox']
config:
description:
- The C(config) argument provides an optional argument to
specify the device running-config to used as the basis for
configuring the remote system. The C(config) argument accepts
a string value that represents the device configuration.
required: false
default: null
version_added: "2.2"
state:
description:
- The C(state) argument controls whether or not the NXAPI
feature is configured on the remote device. When the value
is C(present) the NXAPI feature configuration is present in
the device running-config. When the values is C(absent) the
feature configuration is removed from the running-config.
choices: ['present', 'absent']
required: false
default: present
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
vars:
cli:
host: "{{ inventory_hostname }}"
username: admin
password: admin
- name: Enable NXAPI access with default configuration
nxos_nxapi:
provider: {{ cli }}
- name: Enable NXAPI with no HTTP, HTTPS at port 9443 and sandbox disabled
nxos_nxapi:
enable_http: false
https_port: 9443
https: yes
enable_sandbox: no
provider: {{ cli }}
- name: remove NXAPI configuration
nxos_nxapi:
state: absent
provider: {{ cli }}
"""
RETURN = """
updates:
description:
- Returns the list of commands that need to be pushed into the remote
device to satisfy the arguments
returned: always
type: list
sample: ['no feature nxapi']
"""
import re
import time
from ansible.module_utils.netcfg import NetworkConfig, dumps
from ansible.module_utils.nxos import NetworkModule, NetworkError
from ansible.module_utils.basic import get_exception
PRIVATE_KEYS_RE = re.compile('__.+__')
def invoke(name, *args, **kwargs):
func = globals().get(name)
if func:
return func(*args, **kwargs)
def get_instance(module):
instance = dict(state='absent')
try:
resp = module.cli('show nxapi', 'json')
except NetworkError:
return instance
instance['state'] = 'present'
instance['http'] = 'http_port' in resp[0]
instance['http_port'] = resp[0].get('http_port') or 80
instance['https'] = 'https_port' in resp[0]
instance['https_port'] = resp[0].get('https_port') or 443
instance['sandbox'] = resp[0]['sandbox_status']
return instance
def present(module, instance, commands):
commands.append('feature nxapi')
setters = set()
for key, value in module.argument_spec.iteritems():
setter = value.get('setter') or 'set_%s' % key
if setter not in setters:
setters.add(setter)
if module.params[key] is not None:
invoke(setter, module, instance, commands)
def absent(module, instance, commands):
if instance['state'] != 'absent':
commands.append('no feature nxapi')
def set_http(module, instance, commands):
port = module.params['http_port']
if not 0 <= port <= 65535:
module.fail_json(msg='http_port must be between 1 and 65535')
elif module.params['http'] is True:
commands.append('nxapi http port %s' % port)
elif module.params['http'] is False:
commands.append('no nxapi http')
def set_https(module, instance, commands):
port = module.params['https_port']
if not 0 <= port <= 65535:
module.fail_json(msg='https_port must be between 1 and 65535')
elif module.params['https'] is True:
commands.append('nxapi https port %s' % port)
elif module.params['https'] is False:
commands.append('no nxapi https')
def set_sandbox(module, instance, commands):
if module.params['sandbox'] is True:
commands.append('nxapi sandbox')
elif module.params['sandbox'] is False:
commands.append('no nxapi sandbox')
def get_config(module):
contents = module.params['config']
if not contents:
try:
contents = module.cli(['show running-config nxapi all'])[0]
except NetworkError:
contents = None
config = NetworkConfig(indent=2)
if contents:
config.load(contents)
return config
def load_checkpoint(module, result):
try:
checkpoint = result['__checkpoint__']
module.cli(['rollback running-config checkpoint %s' % checkpoint,
'no checkpoint %s' % checkpoint], output='text')
except KeyError:
module.fail_json(msg='unable to rollback, checkpoint not found')
except NetworkError:
exc = get_exception()
msg = 'unable to rollback configuration'
module.fail_json(msg=msg, checkpoint=checkpoint, **exc.kwargs)
def load_config(module, commands, result):
# create a config checkpoint
checkpoint = 'ansible_%s' % int(time.time())
module.cli(['checkpoint %s' % checkpoint], output='text')
result['__checkpoint__'] = checkpoint
# load the config into the device
module.config.load_config(commands)
# load was successfully, remove the config checkpoint
module.cli(['no checkpoint %s' % checkpoint])
def load(module, commands, result):
candidate = NetworkConfig(indent=2, contents='\n'.join(commands))
config = get_config(module)
configobjs = candidate.difference(config)
if configobjs:
commands = dumps(configobjs, 'commands').split('\n')
result['updates'] = commands
if not module.check_mode:
load_config(module, commands, result)
result['changed'] = True
def clean_result(result):
# strip out any keys that have two leading and two trailing
# underscore characters
for key in result.keys():
if PRIVATE_KEYS_RE.match(key):
del result[key]
def main():
""" main entry point for module execution
"""
argument_spec = dict(
http=dict(aliases=['enable_http'], default=True, type='bool', setter='set_http'),
http_port=dict(default=80, type='int', setter='set_http'),
https=dict(aliases=['enable_https'], default=False, type='bool', setter='set_https'),
https_port=dict(default=443, type='int', setter='set_https'),
sandbox=dict(aliases=['enable_sandbox'], default=False, type='bool'),
# Only allow configuration of NXAPI using cli transport
transport=dict(required=True, choices=['cli']),
config=dict(),
# Support for started and stopped is for backwards capability only and
# will be removed in a future version
state=dict(default='present', choices=['started', 'stopped', 'present', 'absent'])
)
module = NetworkModule(argument_spec=argument_spec,
connect_on_load=False,
supports_check_mode=True)
state = module.params['state']
warnings = list()
result = dict(changed=False, warnings=warnings)
if state == 'started':
state = 'present'
warnings.append('state=started is deprecated and will be removed in a '
'a future release. Please use state=present instead')
elif state == 'stopped':
state = 'absent'
warnings.append('state=stopped is deprecated and will be removed in a '
'a future release. Please use state=absent instead')
commands = list()
instance = get_instance(module)
invoke(state, module, instance, commands)
try:
load(module, commands, result)
except (ValueError, NetworkError):
load_checkpoint(module, result)
exc = get_exception()
module.fail_json(msg=str(exc), **exc.kwargs)
clean_result(result)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
jtyr/ansible-modules-core
|
network/nxos/nxos_nxapi.py
|
Python
|
gpl-3.0
| 10,922
|
#!/bin/sh
#Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
""":"
work_dir=$(dirname $0)
base_name=$(basename $0)
cd $work_dir
if [ $HOD_PYTHON_HOME ]; then
exec $HOD_PYTHON_HOME -OO -u $base_name ${1+"$@"}
elif [ -e /usr/bin/python ]; then
exec /usr/bin/python -OO -u $base_name ${1+"$@"}
elif [ -e /usr/local/bin/python ]; then
exec /usr/local/bin/python -OO -u $base_name ${1+"$@"}
else
exec python -OO -u $base_name ${1+"$@"}
fi
":"""
from os import popen3
import os, sys
import re
import time
from datetime import datetime
from optparse import OptionParser
myName = os.path.basename(sys.argv[0])
myName = re.sub(".*/", "", myName)
reVersion = re.compile(".*(\d+_\d+).*")
VERSION = '$HeadURL: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.20/src/contrib/hod/support/logcondense.py $'
reMatch = reVersion.match(VERSION)
if reMatch:
VERSION = reMatch.group(1)
VERSION = re.sub("_", ".", VERSION)
else:
VERSION = 'DEV'
options = ( {'short' : "-p",
'long' : "--package",
'type' : "string",
'action' : "store",
'dest' : "package",
'metavar' : " ",
'default' : 'hadoop',
'help' : "Bin file for hadoop"},
{'short' : "-d",
'long' : "--days",
'type' : "int",
'action' : "store",
'dest' : "days",
'metavar' : " ",
'default' : 7,
'help' : "Number of days before logs are deleted"},
{'short' : "-c",
'long' : "--config",
'type' : "string",
'action' : "store",
'dest' : "config",
'metavar' : " ",
'default' : None,
'help' : "config directory for hadoop"},
{'short' : "-l",
'long' : "--logs",
'type' : "string",
'action' : "store",
'dest' : "log",
'metavar' : " ",
'default' : "/user",
'help' : "directory prefix under which logs are stored per user"},
{'short' : "-n",
'long' : "--dynamicdfs",
'type' : "string",
'action' : "store",
'dest' : "dynamicdfs",
'metavar' : " ",
'default' : "false",
'help' : "'true', if the cluster is used to bring up dynamic dfs clusters, 'false' otherwise"}
)
def getDfsCommand(options, args):
if (options.config == None):
cmd = options.package + " " + "dfs " + args
else:
cmd = options.package + " " + "--config " + options.config + " dfs " + args
return cmd
def runcondense():
import shutil
options = process_args()
# if the cluster is used to bring up dynamic dfs, we must leave NameNode and JobTracker logs,
# otherwise only JobTracker logs. Likewise, in case of dynamic dfs, we must also look for
# deleting datanode logs
filteredNames = ['jobtracker']
deletedNamePrefixes = ['*-tasktracker-*']
if options.dynamicdfs == 'true':
filteredNames.append('namenode')
deletedNamePrefixes.append('*-datanode-*')
filepath = '%s/\*/hod-logs/' % (options.log)
cmd = getDfsCommand(options, "-lsr " + filepath)
(stdin, stdout, stderr) = popen3(cmd)
lastjobid = 'none'
toPurge = { }
for line in stdout:
try:
m = re.match("^.*\s(.*)\n$", line)
filename = m.group(1)
# file name format: <prefix>/<user>/hod-logs/<jobid>/[0-9]*-[jobtracker|tasktracker|datanode|namenode|]-hostname-YYYYMMDDtime-random.tar.gz
# first strip prefix:
if filename.startswith(options.log):
filename = filename.lstrip(options.log)
if not filename.startswith('/'):
filename = '/' + filename
else:
continue
# Now get other details from filename.
k = re.match("/(.*)/hod-logs/(.*)/.*-.*-([0-9][0-9][0-9][0-9])([0-9][0-9])([0-9][0-9]).*$", filename)
if k:
username = k.group(1)
jobid = k.group(2)
datetimefile = datetime(int(k.group(3)), int(k.group(4)), int(k.group(5)))
datetimenow = datetime.utcnow()
diff = datetimenow - datetimefile
filedate = k.group(3) + k.group(4) + k.group(5)
newdate = datetimenow.strftime("%Y%m%d")
print "%s %s %s %d" % (filename, filedate, newdate, diff.days)
# if the cluster is used to bring up dynamic dfs, we must also leave NameNode logs.
foundFilteredName = False
for name in filteredNames:
if filename.find(name) >= 0:
foundFilteredName = True
break
if foundFilteredName:
continue
if (diff.days > options.days):
desttodel = filename
if not toPurge.has_key(jobid):
toPurge[jobid] = options.log.rstrip("/") + "/" + username + "/hod-logs/" + jobid
except Exception, e:
print >> sys.stderr, e
for job in toPurge.keys():
try:
for prefix in deletedNamePrefixes:
cmd = getDfsCommand(options, "-rm " + toPurge[job] + '/' + prefix)
print cmd
ret = 0
ret = os.system(cmd)
if (ret != 0):
print >> sys.stderr, "Command failed to delete file " + cmd
except Exception, e:
print >> sys.stderr, e
def process_args():
global options, myName, VERSION
usage = "usage: %s <ARGS>" % (myName)
version = "%s %s" % (myName, VERSION)
argParser = OptionParser(usage=usage, version=VERSION)
for option_element in options:
argParser.add_option(option_element['short'], option_element['long'],
type=option_element['type'], action=option_element['action'],
dest=option_element['dest'], default=option_element['default'],
metavar=option_element['metavar'], help=option_element['help'])
(parsedOptions, args) = argParser.parse_args()
if not os.path.exists(parsedOptions.package):
argParser.error("Could not find path to hadoop binary: %s" % parsedOptions.package)
if not os.path.exists(parsedOptions.config):
argParser.error("Could not find config: %s" % parsedOptions.config)
if parsedOptions.days <= 0:
argParser.error("Invalid number of days specified, must be > 0: %s" % parsedOptions.config)
if parsedOptions.dynamicdfs!='true' and parsedOptions.dynamicdfs!='false':
argParser.error("Invalid option for dynamicdfs, must be true or false: %s" % parsedOptions.dynamicdfs)
return parsedOptions
if __name__ == '__main__':
runcondense()
|
hanhlh/hadoop-0.20.2_FatBTree
|
src/contrib/hod/support/logcondense.py
|
Python
|
apache-2.0
| 7,097
|
# -*- coding: utf-8 -*-
from module.plugins.internal.DeadHoster import DeadHoster, create_getInfo
class BillionuploadsCom(DeadHoster):
__name__ = "BillionuploadsCom"
__type__ = "hoster"
__version__ = "0.07"
__status__ = "testing"
__pattern__ = r'http://(?:www\.)?billionuploads\.com/\w{12}'
__config__ = [] #@TODO: Remove in 0.4.10
__description__ = """Billionuploads.com hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
getInfo = create_getInfo(BillionuploadsCom)
|
LePastis/pyload
|
module/plugins/hoster/BillionuploadsCom.py
|
Python
|
gpl-3.0
| 567
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This module contains classes that help to emulate xcodebuild behavior on top of
other build systems, such as make and ninja.
"""
import gyp.common
import os.path
import re
import shlex
class XcodeSettings(object):
"""A class that understands the gyp 'xcode_settings' object."""
# Computed lazily by _GetSdkBaseDir(). Shared by all XcodeSettings, so cached
# at class-level for efficiency.
_sdk_base_dir = None
def __init__(self, spec):
self.spec = spec
# Per-target 'xcode_settings' are pushed down into configs earlier by gyp.
# This means self.xcode_settings[config] always contains all settings
# for that config -- the per-target settings as well. Settings that are
# the same for all configs are implicitly per-target settings.
self.xcode_settings = {}
configs = spec['configurations']
for configname, config in configs.iteritems():
self.xcode_settings[configname] = config.get('xcode_settings', {})
# This is only non-None temporarily during the execution of some methods.
self.configname = None
# Used by _AdjustLibrary to match .a and .dylib entries in libraries.
self.library_re = re.compile(r'^lib([^/]+)\.(a|dylib)$')
def _Settings(self):
assert self.configname
return self.xcode_settings[self.configname]
def _Test(self, test_key, cond_key, default):
return self._Settings().get(test_key, default) == cond_key
def _Appendf(self, lst, test_key, format_str, default=None):
if test_key in self._Settings():
lst.append(format_str % str(self._Settings()[test_key]))
elif default:
lst.append(format_str % str(default))
def _WarnUnimplemented(self, test_key):
if test_key in self._Settings():
print 'Warning: Ignoring not yet implemented key "%s".' % test_key
def _IsBundle(self):
return int(self.spec.get('mac_bundle', 0)) != 0
def GetFrameworkVersion(self):
"""Returns the framework version of the current target. Only valid for
bundles."""
assert self._IsBundle()
return self.GetPerTargetSetting('FRAMEWORK_VERSION', default='A')
def GetWrapperExtension(self):
"""Returns the bundle extension (.app, .framework, .plugin, etc). Only
valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('loadable_module', 'shared_library'):
default_wrapper_extension = {
'loadable_module': 'bundle',
'shared_library': 'framework',
}[self.spec['type']]
wrapper_extension = self.GetPerTargetSetting(
'WRAPPER_EXTENSION', default=default_wrapper_extension)
return '.' + self.spec.get('product_extension', wrapper_extension)
elif self.spec['type'] == 'executable':
return '.app'
else:
assert False, "Don't know extension for '%s', target '%s'" % (
self.spec['type'], self.spec['target_name'])
def GetProductName(self):
"""Returns PRODUCT_NAME."""
return self.spec.get('product_name', self.spec['target_name'])
def GetFullProductName(self):
"""Returns FULL_PRODUCT_NAME."""
if self._IsBundle():
return self.GetWrapperName()
else:
return self._GetStandaloneBinaryPath()
def GetWrapperName(self):
"""Returns the directory name of the bundle represented by this target.
Only valid for bundles."""
assert self._IsBundle()
return self.GetProductName() + self.GetWrapperExtension()
def GetBundleContentsFolderPath(self):
"""Returns the qualified path to the bundle's contents folder. E.g.
Chromium.app/Contents or Foo.bundle/Versions/A. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] == 'shared_library':
return os.path.join(
self.GetWrapperName(), 'Versions', self.GetFrameworkVersion())
else:
# loadable_modules have a 'Contents' folder like executables.
return os.path.join(self.GetWrapperName(), 'Contents')
def GetBundleResourceFolder(self):
"""Returns the qualified path to the bundle's resource folder. E.g.
Chromium.app/Contents/Resources. Only valid for bundles."""
assert self._IsBundle()
return os.path.join(self.GetBundleContentsFolderPath(), 'Resources')
def GetBundlePlistPath(self):
"""Returns the qualified path to the bundle's plist file. E.g.
Chromium.app/Contents/Info.plist. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('executable', 'loadable_module'):
return os.path.join(self.GetBundleContentsFolderPath(), 'Info.plist')
else:
return os.path.join(self.GetBundleContentsFolderPath(),
'Resources', 'Info.plist')
def GetProductType(self):
"""Returns the PRODUCT_TYPE of this target."""
if self._IsBundle():
return {
'executable': 'com.apple.product-type.application',
'loadable_module': 'com.apple.product-type.bundle',
'shared_library': 'com.apple.product-type.framework',
}[self.spec['type']]
else:
return {
'executable': 'com.apple.product-type.tool',
'loadable_module': 'com.apple.product-type.library.dynamic',
'shared_library': 'com.apple.product-type.library.dynamic',
'static_library': 'com.apple.product-type.library.static',
}[self.spec['type']]
def GetMachOType(self):
"""Returns the MACH_O_TYPE of this target."""
# Weird, but matches Xcode.
if not self._IsBundle() and self.spec['type'] == 'executable':
return ''
return {
'executable': 'mh_execute',
'static_library': 'staticlib',
'shared_library': 'mh_dylib',
'loadable_module': 'mh_bundle',
}[self.spec['type']]
def _GetBundleBinaryPath(self):
"""Returns the name of the bundle binary of by this target.
E.g. Chromium.app/Contents/MacOS/Chromium. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('shared_library'):
path = self.GetBundleContentsFolderPath()
elif self.spec['type'] in ('executable', 'loadable_module'):
path = os.path.join(self.GetBundleContentsFolderPath(), 'MacOS')
return os.path.join(path, self.GetExecutableName())
def _GetStandaloneExecutableSuffix(self):
if 'product_extension' in self.spec:
return '.' + self.spec['product_extension']
return {
'executable': '',
'static_library': '.a',
'shared_library': '.dylib',
'loadable_module': '.so',
}[self.spec['type']]
def _GetStandaloneExecutablePrefix(self):
return self.spec.get('product_prefix', {
'executable': '',
'static_library': 'lib',
'shared_library': 'lib',
# Non-bundled loadable_modules are called foo.so for some reason
# (that is, .so and no prefix) with the xcode build -- match that.
'loadable_module': '',
}[self.spec['type']])
def _GetStandaloneBinaryPath(self):
"""Returns the name of the non-bundle binary represented by this target.
E.g. hello_world. Only valid for non-bundles."""
assert not self._IsBundle()
assert self.spec['type'] in (
'executable', 'shared_library', 'static_library', 'loadable_module'), (
'Unexpected type %s' % self.spec['type'])
target = self.spec['target_name']
if self.spec['type'] == 'static_library':
if target[:3] == 'lib':
target = target[3:]
elif self.spec['type'] in ('loadable_module', 'shared_library'):
if target[:3] == 'lib':
target = target[3:]
target_prefix = self._GetStandaloneExecutablePrefix()
target = self.spec.get('product_name', target)
target_ext = self._GetStandaloneExecutableSuffix()
return target_prefix + target + target_ext
def GetExecutableName(self):
"""Returns the executable name of the bundle represented by this target.
E.g. Chromium."""
if self._IsBundle():
return self.spec.get('product_name', self.spec['target_name'])
else:
return self._GetStandaloneBinaryPath()
def GetExecutablePath(self):
"""Returns the directory name of the bundle represented by this target. E.g.
Chromium.app/Contents/MacOS/Chromium."""
if self._IsBundle():
return self._GetBundleBinaryPath()
else:
return self._GetStandaloneBinaryPath()
def _GetSdkBaseDir(self):
"""Returns the root of the 'Developer' directory. On Xcode 4.2 and prior,
this is usually just /Developer. Xcode 4.3 moved that folder into the Xcode
bundle."""
if not XcodeSettings._sdk_base_dir:
import subprocess
job = subprocess.Popen(['xcode-select', '-print-path'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, err = job.communicate()
if job.returncode != 0:
print out
raise Exception('Error %d running xcode-select' % job.returncode)
# The Developer folder moved in Xcode 4.3.
xcode43_sdk_path = os.path.join(
out.rstrip(), 'Platforms/MacOSX.platform/Developer/SDKs')
if os.path.isdir(xcode43_sdk_path):
XcodeSettings._sdk_base_dir = xcode43_sdk_path
else:
XcodeSettings._sdk_base_dir = os.path.join(out.rstrip(), 'SDKs')
return XcodeSettings._sdk_base_dir
def _SdkPath(self):
sdk_root = self.GetPerTargetSetting('SDKROOT', default='macosx10.5')
if sdk_root.startswith('macosx'):
return os.path.join(self._GetSdkBaseDir(),
'MacOSX' + sdk_root[len('macosx'):] + '.sdk')
return sdk_root
def GetCflags(self, configname):
"""Returns flags that need to be added to .c, .cc, .m, and .mm
compilations."""
# This functions (and the similar ones below) do not offer complete
# emulation of all xcode_settings keys. They're implemented on demand.
self.configname = configname
cflags = []
sdk_root = self._SdkPath()
if 'SDKROOT' in self._Settings():
cflags.append('-isysroot %s' % sdk_root)
if self._Test('GCC_CHAR_IS_UNSIGNED_CHAR', 'YES', default='NO'):
cflags.append('-funsigned-char')
if self._Test('GCC_CW_ASM_SYNTAX', 'YES', default='YES'):
cflags.append('-fasm-blocks')
if 'GCC_DYNAMIC_NO_PIC' in self._Settings():
if self._Settings()['GCC_DYNAMIC_NO_PIC'] == 'YES':
cflags.append('-mdynamic-no-pic')
else:
pass
# TODO: In this case, it depends on the target. xcode passes
# mdynamic-no-pic by default for executable and possibly static lib
# according to mento
if self._Test('GCC_ENABLE_PASCAL_STRINGS', 'YES', default='YES'):
cflags.append('-mpascal-strings')
self._Appendf(cflags, 'GCC_OPTIMIZATION_LEVEL', '-O%s', default='s')
if self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES'):
dbg_format = self._Settings().get('DEBUG_INFORMATION_FORMAT', 'dwarf')
if dbg_format == 'dwarf':
cflags.append('-gdwarf-2')
elif dbg_format == 'stabs':
raise NotImplementedError('stabs debug format is not supported yet.')
elif dbg_format == 'dwarf-with-dsym':
cflags.append('-gdwarf-2')
else:
raise NotImplementedError('Unknown debug format %s' % dbg_format)
if self._Test('GCC_SYMBOLS_PRIVATE_EXTERN', 'YES', default='NO'):
cflags.append('-fvisibility=hidden')
if self._Test('GCC_TREAT_WARNINGS_AS_ERRORS', 'YES', default='NO'):
cflags.append('-Werror')
if self._Test('GCC_WARN_ABOUT_MISSING_NEWLINE', 'YES', default='NO'):
cflags.append('-Wnewline-eof')
self._Appendf(cflags, 'MACOSX_DEPLOYMENT_TARGET', '-mmacosx-version-min=%s')
# TODO:
if self._Test('COPY_PHASE_STRIP', 'YES', default='NO'):
self._WarnUnimplemented('COPY_PHASE_STRIP')
self._WarnUnimplemented('GCC_DEBUGGING_SYMBOLS')
self._WarnUnimplemented('GCC_ENABLE_OBJC_EXCEPTIONS')
# TODO: This is exported correctly, but assigning to it is not supported.
self._WarnUnimplemented('MACH_O_TYPE')
self._WarnUnimplemented('PRODUCT_TYPE')
archs = self._Settings().get('ARCHS', ['i386'])
if len(archs) != 1:
# TODO: Supporting fat binaries will be annoying.
self._WarnUnimplemented('ARCHS')
archs = ['i386']
cflags.append('-arch ' + archs[0])
if archs[0] in ('i386', 'x86_64'):
if self._Test('GCC_ENABLE_SSE3_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse3')
if self._Test('GCC_ENABLE_SUPPLEMENTAL_SSE3_INSTRUCTIONS', 'YES',
default='NO'):
cflags.append('-mssse3') # Note 3rd 's'.
if self._Test('GCC_ENABLE_SSE41_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse4.1')
if self._Test('GCC_ENABLE_SSE42_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse4.2')
cflags += self._Settings().get('WARNING_CFLAGS', [])
config = self.spec['configurations'][self.configname]
framework_dirs = config.get('mac_framework_dirs', [])
for directory in framework_dirs:
cflags.append('-F' + directory.replace('$(SDKROOT)', sdk_root))
self.configname = None
return cflags
def GetCflagsC(self, configname):
"""Returns flags that need to be added to .c, and .m compilations."""
self.configname = configname
cflags_c = []
self._Appendf(cflags_c, 'GCC_C_LANGUAGE_STANDARD', '-std=%s')
cflags_c += self._Settings().get('OTHER_CFLAGS', [])
self.configname = None
return cflags_c
def GetCflagsCC(self, configname):
"""Returns flags that need to be added to .cc, and .mm compilations."""
self.configname = configname
cflags_cc = []
if self._Test('GCC_ENABLE_CPP_RTTI', 'NO', default='YES'):
cflags_cc.append('-fno-rtti')
if self._Test('GCC_ENABLE_CPP_EXCEPTIONS', 'NO', default='YES'):
cflags_cc.append('-fno-exceptions')
if self._Test('GCC_INLINES_ARE_PRIVATE_EXTERN', 'YES', default='NO'):
cflags_cc.append('-fvisibility-inlines-hidden')
if self._Test('GCC_THREADSAFE_STATICS', 'NO', default='YES'):
cflags_cc.append('-fno-threadsafe-statics')
if self._Test('GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO', 'NO', default='YES'):
cflags_cc.append('-Wno-invalid-offsetof')
other_ccflags = []
for flag in self._Settings().get('OTHER_CPLUSPLUSFLAGS', ['$(inherited)']):
# TODO: More general variable expansion. Missing in many other places too.
if flag in ('$inherited', '$(inherited)', '${inherited}'):
flag = '$OTHER_CFLAGS'
if flag in ('$OTHER_CFLAGS', '$(OTHER_CFLAGS)', '${OTHER_CFLAGS}'):
other_ccflags += self._Settings().get('OTHER_CFLAGS', [])
else:
other_ccflags.append(flag)
cflags_cc += other_ccflags
self.configname = None
return cflags_cc
def _AddObjectiveCGarbageCollectionFlags(self, flags):
gc_policy = self._Settings().get('GCC_ENABLE_OBJC_GC', 'unsupported')
if gc_policy == 'supported':
flags.append('-fobjc-gc')
elif gc_policy == 'required':
flags.append('-fobjc-gc-only')
def GetCflagsObjC(self, configname):
"""Returns flags that need to be added to .m compilations."""
self.configname = configname
cflags_objc = []
self._AddObjectiveCGarbageCollectionFlags(cflags_objc)
self.configname = None
return cflags_objc
def GetCflagsObjCC(self, configname):
"""Returns flags that need to be added to .mm compilations."""
self.configname = configname
cflags_objcc = []
self._AddObjectiveCGarbageCollectionFlags(cflags_objcc)
if self._Test('GCC_OBJC_CALL_CXX_CDTORS', 'YES', default='NO'):
cflags_objcc.append('-fobjc-call-cxx-cdtors')
self.configname = None
return cflags_objcc
def GetInstallNameBase(self):
"""Return DYLIB_INSTALL_NAME_BASE for this target."""
# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
if (self.spec['type'] != 'shared_library' and
(self.spec['type'] != 'loadable_module' or self._IsBundle())):
return None
install_base = self.GetPerTargetSetting(
'DYLIB_INSTALL_NAME_BASE',
default='/Library/Frameworks' if self._IsBundle() else '/usr/local/lib')
return install_base
def _StandardizePath(self, path):
"""Do :standardizepath processing for path."""
# I'm not quite sure what :standardizepath does. Just call normpath(),
# but don't let @executable_path/../foo collapse to foo.
if '/' in path:
prefix, rest = '', path
if path.startswith('@'):
prefix, rest = path.split('/', 1)
rest = os.path.normpath(rest) # :standardizepath
path = os.path.join(prefix, rest)
return path
def GetInstallName(self):
"""Return LD_DYLIB_INSTALL_NAME for this target."""
# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
if (self.spec['type'] != 'shared_library' and
(self.spec['type'] != 'loadable_module' or self._IsBundle())):
return None
default_install_name = \
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)/$(EXECUTABLE_PATH)'
install_name = self.GetPerTargetSetting(
'LD_DYLIB_INSTALL_NAME', default=default_install_name)
# Hardcode support for the variables used in chromium for now, to
# unblock people using the make build.
if '$' in install_name:
assert install_name in ('$(DYLIB_INSTALL_NAME_BASE:standardizepath)/'
'$(WRAPPER_NAME)/$(PRODUCT_NAME)', default_install_name), (
'Variables in LD_DYLIB_INSTALL_NAME are not generally supported '
'yet in target \'%s\' (got \'%s\')' %
(self.spec['target_name'], install_name))
install_name = install_name.replace(
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)',
self._StandardizePath(self.GetInstallNameBase()))
if self._IsBundle():
# These are only valid for bundles, hence the |if|.
install_name = install_name.replace(
'$(WRAPPER_NAME)', self.GetWrapperName())
install_name = install_name.replace(
'$(PRODUCT_NAME)', self.GetProductName())
else:
assert '$(WRAPPER_NAME)' not in install_name
assert '$(PRODUCT_NAME)' not in install_name
install_name = install_name.replace(
'$(EXECUTABLE_PATH)', self.GetExecutablePath())
return install_name
def _MapLinkerFlagFilename(self, ldflag, gyp_to_build_path):
"""Checks if ldflag contains a filename and if so remaps it from
gyp-directory-relative to build-directory-relative."""
# This list is expanded on demand.
# They get matched as:
# -exported_symbols_list file
# -Wl,exported_symbols_list file
# -Wl,exported_symbols_list,file
LINKER_FILE = '(\S+)'
WORD = '\S+'
linker_flags = [
['-exported_symbols_list', LINKER_FILE], # Needed for NaCl.
['-unexported_symbols_list', LINKER_FILE],
['-reexported_symbols_list', LINKER_FILE],
['-sectcreate', WORD, WORD, LINKER_FILE], # Needed for remoting.
]
for flag_pattern in linker_flags:
regex = re.compile('(?:-Wl,)?' + '[ ,]'.join(flag_pattern))
m = regex.match(ldflag)
if m:
ldflag = ldflag[:m.start(1)] + gyp_to_build_path(m.group(1)) + \
ldflag[m.end(1):]
# Required for ffmpeg (no idea why they don't use LIBRARY_SEARCH_PATHS,
# TODO(thakis): Update ffmpeg.gyp):
if ldflag.startswith('-L'):
ldflag = '-L' + gyp_to_build_path(ldflag[len('-L'):])
return ldflag
def GetLdflags(self, configname, product_dir, gyp_to_build_path):
"""Returns flags that need to be passed to the linker.
Args:
configname: The name of the configuration to get ld flags for.
product_dir: The directory where products such static and dynamic
libraries are placed. This is added to the library search path.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry.
"""
self.configname = configname
ldflags = []
# The xcode build is relative to a gyp file's directory, and OTHER_LDFLAGS
# can contain entries that depend on this. Explicitly absolutify these.
for ldflag in self._Settings().get('OTHER_LDFLAGS', []):
ldflags.append(self._MapLinkerFlagFilename(ldflag, gyp_to_build_path))
if self._Test('DEAD_CODE_STRIPPING', 'YES', default='NO'):
ldflags.append('-Wl,-dead_strip')
if self._Test('PREBINDING', 'YES', default='NO'):
ldflags.append('-Wl,-prebind')
self._Appendf(
ldflags, 'DYLIB_COMPATIBILITY_VERSION', '-compatibility_version %s')
self._Appendf(
ldflags, 'DYLIB_CURRENT_VERSION', '-current_version %s')
self._Appendf(
ldflags, 'MACOSX_DEPLOYMENT_TARGET', '-mmacosx-version-min=%s')
if 'SDKROOT' in self._Settings():
ldflags.append('-isysroot ' + self._SdkPath())
for library_path in self._Settings().get('LIBRARY_SEARCH_PATHS', []):
ldflags.append('-L' + gyp_to_build_path(library_path))
if 'ORDER_FILE' in self._Settings():
ldflags.append('-Wl,-order_file ' +
'-Wl,' + gyp_to_build_path(
self._Settings()['ORDER_FILE']))
archs = self._Settings().get('ARCHS', ['i386'])
if len(archs) != 1:
# TODO: Supporting fat binaries will be annoying.
self._WarnUnimplemented('ARCHS')
archs = ['i386']
ldflags.append('-arch ' + archs[0])
# Xcode adds the product directory by default.
ldflags.append('-L' + product_dir)
install_name = self.GetInstallName()
if install_name:
ldflags.append('-install_name ' + install_name.replace(' ', r'\ '))
for rpath in self._Settings().get('LD_RUNPATH_SEARCH_PATHS', []):
ldflags.append('-Wl,-rpath,' + rpath)
config = self.spec['configurations'][self.configname]
framework_dirs = config.get('mac_framework_dirs', [])
for directory in framework_dirs:
ldflags.append('-F' + directory.replace('$(SDKROOT)', self._SdkPath()))
self.configname = None
return ldflags
def GetPerTargetSettings(self):
"""Gets a list of all the per-target settings. This will only fetch keys
whose values are the same across all configurations."""
first_pass = True
result = {}
for configname in sorted(self.xcode_settings.keys()):
if first_pass:
result = dict(self.xcode_settings[configname])
first_pass = False
else:
for key, value in self.xcode_settings[configname].iteritems():
if key not in result:
continue
elif result[key] != value:
del result[key]
return result
def GetPerTargetSetting(self, setting, default=None):
"""Tries to get xcode_settings.setting from spec. Assumes that the setting
has the same value in all configurations and throws otherwise."""
first_pass = True
result = None
for configname in sorted(self.xcode_settings.keys()):
if first_pass:
result = self.xcode_settings[configname].get(setting, None)
first_pass = False
else:
assert result == self.xcode_settings[configname].get(setting, None), (
"Expected per-target setting for '%s', got per-config setting "
"(target %s)" % (setting, spec['target_name']))
if result is None:
return default
return result
def _GetStripPostbuilds(self, configname, output_binary, quiet):
"""Returns a list of shell commands that contain the shell commands
neccessary to strip this target's binary. These should be run as postbuilds
before the actual postbuilds run."""
self.configname = configname
result = []
if (self._Test('DEPLOYMENT_POSTPROCESSING', 'YES', default='NO') and
self._Test('STRIP_INSTALLED_PRODUCT', 'YES', default='NO')):
default_strip_style = 'debugging'
if self._IsBundle():
default_strip_style = 'non-global'
elif self.spec['type'] == 'executable':
default_strip_style = 'all'
strip_style = self._Settings().get('STRIP_STYLE', default_strip_style)
strip_flags = {
'all': '',
'non-global': '-x',
'debugging': '-S',
}[strip_style]
explicit_strip_flags = self._Settings().get('STRIPFLAGS', '')
if explicit_strip_flags:
strip_flags += ' ' + _NormalizeEnvVarReferences(explicit_strip_flags)
if not quiet:
result.append('echo STRIP\\(%s\\)' % self.spec['target_name'])
result.append('strip %s %s' % (strip_flags, output_binary))
self.configname = None
return result
def _GetDebugInfoPostbuilds(self, configname, output, output_binary, quiet):
"""Returns a list of shell commands that contain the shell commands
neccessary to massage this target's debug information. These should be run
as postbuilds before the actual postbuilds run."""
self.configname = configname
# For static libraries, no dSYMs are created.
result = []
if (self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES') and
self._Test(
'DEBUG_INFORMATION_FORMAT', 'dwarf-with-dsym', default='dwarf') and
self.spec['type'] != 'static_library'):
if not quiet:
result.append('echo DSYMUTIL\\(%s\\)' % self.spec['target_name'])
result.append('dsymutil %s -o %s' % (output_binary, output + '.dSYM'))
self.configname = None
return result
def GetTargetPostbuilds(self, configname, output, output_binary, quiet=False):
"""Returns a list of shell commands that contain the shell commands
to run as postbuilds for this target, before the actual postbuilds."""
# dSYMs need to build before stripping happens.
return (
self._GetDebugInfoPostbuilds(configname, output, output_binary, quiet) +
self._GetStripPostbuilds(configname, output_binary, quiet))
def _AdjustLibrary(self, library):
if library.endswith('.framework'):
l = '-framework ' + os.path.splitext(os.path.basename(library))[0]
else:
m = self.library_re.match(library)
if m:
l = '-l' + m.group(1)
else:
l = library
return l.replace('$(SDKROOT)', self._SdkPath())
def AdjustLibraries(self, libraries):
"""Transforms entries like 'Cocoa.framework' in libraries into entries like
'-framework Cocoa', 'libcrypto.dylib' into '-lcrypto', etc.
"""
libraries = [ self._AdjustLibrary(library) for library in libraries]
return libraries
class MacPrefixHeader(object):
"""A class that helps with emulating Xcode's GCC_PREFIX_HEADER feature.
This feature consists of several pieces:
* If GCC_PREFIX_HEADER is present, all compilations in that project get an
additional |-include path_to_prefix_header| cflag.
* If GCC_PRECOMPILE_PREFIX_HEADER is present too, then the prefix header is
instead compiled, and all other compilations in the project get an
additional |-include path_to_compiled_header| instead.
+ Compiled prefix headers have the extension gch. There is one gch file for
every language used in the project (c, cc, m, mm), since gch files for
different languages aren't compatible.
+ gch files themselves are built with the target's normal cflags, but they
obviously don't get the |-include| flag. Instead, they need a -x flag that
describes their language.
+ All o files in the target need to depend on the gch file, to make sure
it's built before any o file is built.
This class helps with some of these tasks, but it needs help from the build
system for writing dependencies to the gch files, for writing build commands
for the gch files, and for figuring out the location of the gch files.
"""
def __init__(self, xcode_settings,
gyp_path_to_build_path, gyp_path_to_build_output):
"""If xcode_settings is None, all methods on this class are no-ops.
Args:
gyp_path_to_build_path: A function that takes a gyp-relative path,
and returns a path relative to the build directory.
gyp_path_to_build_output: A function that takes a gyp-relative path and
a language code ('c', 'cc', 'm', or 'mm'), and that returns a path
to where the output of precompiling that path for that language
should be placed (without the trailing '.gch').
"""
# This doesn't support per-configuration prefix headers. Good enough
# for now.
self.header = None
self.compile_headers = False
if xcode_settings:
self.header = xcode_settings.GetPerTargetSetting('GCC_PREFIX_HEADER')
self.compile_headers = xcode_settings.GetPerTargetSetting(
'GCC_PRECOMPILE_PREFIX_HEADER', default='NO') != 'NO'
self.compiled_headers = {}
if self.header:
if self.compile_headers:
for lang in ['c', 'cc', 'm', 'mm']:
self.compiled_headers[lang] = gyp_path_to_build_output(
self.header, lang)
self.header = gyp_path_to_build_path(self.header)
def GetInclude(self, lang):
"""Gets the cflags to include the prefix header for language |lang|."""
if self.compile_headers and lang in self.compiled_headers:
return '-include %s' % self.compiled_headers[lang]
elif self.header:
return '-include %s' % self.header
else:
return ''
def _Gch(self, lang):
"""Returns the actual file name of the prefix header for language |lang|."""
assert self.compile_headers
return self.compiled_headers[lang] + '.gch'
def GetObjDependencies(self, sources, objs):
"""Given a list of source files and the corresponding object files, returns
a list of (source, object, gch) tuples, where |gch| is the build-directory
relative path to the gch file each object file depends on. |compilable[i]|
has to be the source file belonging to |objs[i]|."""
if not self.header or not self.compile_headers:
return []
result = []
for source, obj in zip(sources, objs):
ext = os.path.splitext(source)[1]
lang = {
'.c': 'c',
'.cpp': 'cc', '.cc': 'cc', '.cxx': 'cc',
'.m': 'm',
'.mm': 'mm',
}.get(ext, None)
if lang:
result.append((source, obj, self._Gch(lang)))
return result
def GetPchBuildCommands(self):
"""Returns [(path_to_gch, language_flag, language, header)].
|path_to_gch| and |header| are relative to the build directory.
"""
if not self.header or not self.compile_headers:
return []
return [
(self._Gch('c'), '-x c-header', 'c', self.header),
(self._Gch('cc'), '-x c++-header', 'cc', self.header),
(self._Gch('m'), '-x objective-c-header', 'm', self.header),
(self._Gch('mm'), '-x objective-c++-header', 'mm', self.header),
]
def MergeGlobalXcodeSettingsToSpec(global_dict, spec):
"""Merges the global xcode_settings dictionary into each configuration of the
target represented by spec. For keys that are both in the global and the local
xcode_settings dict, the local key gets precendence.
"""
# The xcode generator special-cases global xcode_settings and does something
# that amounts to merging in the global xcode_settings into each local
# xcode_settings dict.
global_xcode_settings = global_dict.get('xcode_settings', {})
for config in spec['configurations'].values():
if 'xcode_settings' in config:
new_settings = global_xcode_settings.copy()
new_settings.update(config['xcode_settings'])
config['xcode_settings'] = new_settings
def IsMacBundle(flavor, spec):
"""Returns if |spec| should be treated as a bundle.
Bundles are directories with a certain subdirectory structure, instead of
just a single file. Bundle rules do not produce a binary but also package
resources into that directory."""
is_mac_bundle = (int(spec.get('mac_bundle', 0)) != 0 and flavor == 'mac')
if is_mac_bundle:
assert spec['type'] != 'none', (
'mac_bundle targets cannot have type none (target "%s")' %
spec['target_name'])
return is_mac_bundle
def GetMacBundleResources(product_dir, xcode_settings, resources):
"""Yields (output, resource) pairs for every resource in |resources|.
Only call this for mac bundle targets.
Args:
product_dir: Path to the directory containing the output bundle,
relative to the build directory.
xcode_settings: The XcodeSettings of the current target.
resources: A list of bundle resources, relative to the build directory.
"""
dest = os.path.join(product_dir,
xcode_settings.GetBundleResourceFolder())
for res in resources:
output = dest
# The make generator doesn't support it, so forbid it everywhere
# to keep the generators more interchangable.
assert ' ' not in res, (
"Spaces in resource filenames not supported (%s)" % res)
# Split into (path,file).
res_parts = os.path.split(res)
# Now split the path into (prefix,maybe.lproj).
lproj_parts = os.path.split(res_parts[0])
# If the resource lives in a .lproj bundle, add that to the destination.
if lproj_parts[1].endswith('.lproj'):
output = os.path.join(output, lproj_parts[1])
output = os.path.join(output, res_parts[1])
# Compiled XIB files are referred to by .nib.
if output.endswith('.xib'):
output = output[0:-3] + 'nib'
yield output, res
def GetMacInfoPlist(product_dir, xcode_settings, gyp_path_to_build_path):
"""Returns (info_plist, dest_plist, defines, extra_env), where:
* |info_plist| is the sourc plist path, relative to the
build directory,
* |dest_plist| is the destination plist path, relative to the
build directory,
* |defines| is a list of preprocessor defines (empty if the plist
shouldn't be preprocessed,
* |extra_env| is a dict of env variables that should be exported when
invoking |mac_tool copy-info-plist|.
Only call this for mac bundle targets.
Args:
product_dir: Path to the directory containing the output bundle,
relative to the build directory.
xcode_settings: The XcodeSettings of the current target.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry.
"""
info_plist = xcode_settings.GetPerTargetSetting('INFOPLIST_FILE')
if not info_plist:
return None, None, [], {}
# The make generator doesn't support it, so forbid it everywhere
# to keep the generators more interchangable.
assert ' ' not in info_plist, (
"Spaces in Info.plist filenames not supported (%s)" % info_plist)
info_plist = gyp_path_to_build_path(info_plist)
# If explicitly set to preprocess the plist, invoke the C preprocessor and
# specify any defines as -D flags.
if xcode_settings.GetPerTargetSetting(
'INFOPLIST_PREPROCESS', default='NO') == 'YES':
# Create an intermediate file based on the path.
defines = shlex.split(xcode_settings.GetPerTargetSetting(
'INFOPLIST_PREPROCESSOR_DEFINITIONS', default=''))
else:
defines = []
dest_plist = os.path.join(product_dir, xcode_settings.GetBundlePlistPath())
extra_env = xcode_settings.GetPerTargetSettings()
return info_plist, dest_plist, defines, extra_env
def _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
additional_settings=None):
"""Return the environment variables that Xcode would set. See
http://developer.apple.com/library/mac/#documentation/DeveloperTools/Reference/XcodeBuildSettingRef/1-Build_Setting_Reference/build_setting_ref.html#//apple_ref/doc/uid/TP40003931-CH3-SW153
for a full list.
Args:
xcode_settings: An XcodeSettings object. If this is None, this function
returns an empty dict.
built_products_dir: Absolute path to the built products dir.
srcroot: Absolute path to the source root.
configuration: The build configuration name.
additional_settings: An optional dict with more values to add to the
result.
"""
if not xcode_settings: return {}
# This function is considered a friend of XcodeSettings, so let it reach into
# its implementation details.
spec = xcode_settings.spec
# These are filled in on a as-needed basis.
env = {
'BUILT_PRODUCTS_DIR' : built_products_dir,
'CONFIGURATION' : configuration,
'PRODUCT_NAME' : xcode_settings.GetProductName(),
# See /Developer/Platforms/MacOSX.platform/Developer/Library/Xcode/Specifications/MacOSX\ Product\ Types.xcspec for FULL_PRODUCT_NAME
'SRCROOT' : srcroot,
'SOURCE_ROOT': '${SRCROOT}',
# This is not true for static libraries, but currently the env is only
# written for bundles:
'TARGET_BUILD_DIR' : built_products_dir,
'TEMP_DIR' : '${TMPDIR}',
}
if spec['type'] in (
'executable', 'static_library', 'shared_library', 'loadable_module'):
env['EXECUTABLE_NAME'] = xcode_settings.GetExecutableName()
env['EXECUTABLE_PATH'] = xcode_settings.GetExecutablePath()
env['FULL_PRODUCT_NAME'] = xcode_settings.GetFullProductName()
mach_o_type = xcode_settings.GetMachOType()
if mach_o_type:
env['MACH_O_TYPE'] = mach_o_type
env['PRODUCT_TYPE'] = xcode_settings.GetProductType()
if xcode_settings._IsBundle():
env['CONTENTS_FOLDER_PATH'] = \
xcode_settings.GetBundleContentsFolderPath()
env['UNLOCALIZED_RESOURCES_FOLDER_PATH'] = \
xcode_settings.GetBundleResourceFolder()
env['INFOPLIST_PATH'] = xcode_settings.GetBundlePlistPath()
env['WRAPPER_NAME'] = xcode_settings.GetWrapperName()
install_name = xcode_settings.GetInstallName()
if install_name:
env['LD_DYLIB_INSTALL_NAME'] = install_name
install_name_base = xcode_settings.GetInstallNameBase()
if install_name_base:
env['DYLIB_INSTALL_NAME_BASE'] = install_name_base
if not additional_settings:
additional_settings = {}
else:
# Flatten lists to strings.
for k in additional_settings:
if not isinstance(additional_settings[k], str):
additional_settings[k] = ' '.join(additional_settings[k])
additional_settings.update(env)
for k in additional_settings:
additional_settings[k] = _NormalizeEnvVarReferences(additional_settings[k])
return additional_settings
def _NormalizeEnvVarReferences(str):
"""Takes a string containing variable references in the form ${FOO}, $(FOO),
or $FOO, and returns a string with all variable references in the form ${FOO}.
"""
# $FOO -> ${FOO}
str = re.sub(r'\$([a-zA-Z_][a-zA-Z0-9_]*)', r'${\1}', str)
# $(FOO) -> ${FOO}
matches = re.findall(r'(\$\(([a-zA-Z0-9\-_]+)\))', str)
for match in matches:
to_replace, variable = match
assert '$(' not in match, '$($(FOO)) variables not supported: ' + match
str = str.replace(to_replace, '${' + variable + '}')
return str
def ExpandEnvVars(string, expansions):
"""Expands ${VARIABLES}, $(VARIABLES), and $VARIABLES in string per the
expansions list. If the variable expands to something that references
another variable, this variable is expanded as well if it's in env --
until no variables present in env are left."""
for k, v in reversed(expansions):
string = string.replace('${' + k + '}', v)
string = string.replace('$(' + k + ')', v)
string = string.replace('$' + k, v)
return string
def _TopologicallySortedEnvVarKeys(env):
"""Takes a dict |env| whose values are strings that can refer to other keys,
for example env['foo'] = '$(bar) and $(baz)'. Returns a list L of all keys of
env such that key2 is after key1 in L if env[key2] refers to env[key1].
Throws an Exception in case of dependency cycles.
"""
# Since environment variables can refer to other variables, the evaluation
# order is important. Below is the logic to compute the dependency graph
# and sort it.
regex = re.compile(r'\$\{([a-zA-Z0-9\-_]+)\}')
def GetEdges(node):
# Use a definition of edges such that user_of_variable -> used_varible.
# This happens to be easier in this case, since a variable's
# definition contains all variables it references in a single string.
# We can then reverse the result of the topological sort at the end.
# Since: reverse(topsort(DAG)) = topsort(reverse_edges(DAG))
matches = set([v for v in regex.findall(env[node]) if v in env])
for dependee in matches:
assert '${' not in dependee, 'Nested variables not supported: ' + dependee
return matches
try:
# Topologically sort, and then reverse, because we used an edge definition
# that's inverted from the expected result of this function (see comment
# above).
order = gyp.common.TopologicallySorted(env.keys(), GetEdges)
order.reverse()
return order
except gyp.common.CycleError, e:
raise Exception(
'Xcode environment variables are cyclically dependent: ' + str(e.nodes))
def GetSortedXcodeEnv(xcode_settings, built_products_dir, srcroot,
configuration, additional_settings=None):
env = _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
additional_settings)
return [(key, env[key]) for key in _TopologicallySortedEnvVarKeys(env)]
def GetSpecPostbuildCommands(spec, quiet=False):
"""Returns the list of postbuilds explicitly defined on |spec|, in a form
executable by a shell."""
postbuilds = []
for postbuild in spec.get('postbuilds', []):
if not quiet:
postbuilds.append('echo POSTBUILD\\(%s\\) %s' % (
spec['target_name'], postbuild['postbuild_name']))
postbuilds.append(gyp.common.EncodePOSIXShellList(postbuild['action']))
return postbuilds
|
boudewijnrempt/breakpad
|
src/tools/gyp/pylib/gyp/xcode_emulation.py
|
Python
|
bsd-3-clause
| 41,719
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_gtm_topology_record
short_description: Manages GTM Topology Records
description:
- Manages GTM Topology Records. Once created, only topology record C(weight) can be modified.
version_added: 2.8
options:
source:
description:
- Specifies the origination of an incoming DNS request.
suboptions:
negate:
description:
- When set to c(yes) the system selects this topology record, when the request source does not match.
type: bool
default: no
subnet:
description:
- An IP address and network mask in the CIDR format.
region:
description:
- Specifies the name of region already defined in the configuration.
continent:
description:
- Specifies one of the seven continents, along with the C(Unknown) setting.
- Specifying C(Unknown) forces the system to use a default resolution
if the system cannot determine the location of the local DNS making the request.
- Full continent names and their abbreviated versions are supported.
country:
description:
- Specifies a country.
- In addition to the country full names, you may also specify their abbreviated
form, such as C(US) instead of C(United States).
- Valid country codes can be found here https://countrycode.org/.
state:
description:
- Specifies a state in a given country.
- This parameter requires country option to be provided.
isp:
description:
- Specifies an Internet service provider.
choices:
- AOL
- BeijingCNC
- CNC
- ChinaEducationNetwork
- ChinaMobilNetwork
- ChinaRailwayTelcom
- ChinaTelecom
- ChinaUnicom
- Comcast
- Earthlink
- ShanghaiCNC
- ShanghaiTelecom
geo_isp:
description:
- Specifies a geolocation ISP
required: True
destination:
description:
- Specifies where the system directs the incoming DNS request.
suboptions:
negate:
description:
- When set to c(yes) the system selects this topology record, when the request destination does not match.
type: bool
default: no
subnet:
description:
- An IP address and network mask in the CIDR format.
region:
description:
- Specifies the name of region already defined in the configuration.
continent:
description:
- Specifies one of the seven continents, along with the C(Unknown) setting.
- Specifying C(Unknown) forces the system to use a default resolution
if the system cannot determine the location of the local DNS making the request.
- Full continent names and their abbreviated versions are supported.
country:
description:
- Specifies a country.
- Full continent names and their abbreviated versions are supported.
state:
description:
- Specifies a state in a given country.
- This parameter requires country option to be provided.
pool:
description:
- Specifies the name of GTM pool already defined in the configuration.
datacenter:
description:
- Specifies the name of GTM data center already defined in the configuration.
isp:
description:
- Specifies an Internet service provider.
choices:
- AOL
- BeijingCNC
- CNC
- ChinaEducationNetwork
- ChinaMobilNetwork
- ChinaRailwayTelcom
- ChinaTelecom
- ChinaUnicom
- Comcast
- Earthlink
- ShanghaiCNC
- ShanghaiTelecom
geo_isp:
description:
- Specifies a geolocation ISP
required: True
weight:
description:
- Specifies the weight of the topology record.
- The system finds the weight of the first topology record that matches the server object (pool or pool member)
and the local DNS. The system then assigns that weight as the topology score for that server object.
- The system load balances to the server object with the highest topology score.
- If the system finds no topology record that matches both the server object and the local DNS,
then the system assigns that server object a zero score.
- If the option is not specified when the record is created the system will set it at a default value of C(1)
- Valid range is (0 - 4294967295)
type: int
partition:
description:
- Device partition to manage resources on.
- Partition parameter is taken into account when used in conjunction with C(pool), C(data_center),
and C(region) parameters, it is ignored otherwise.
default: Common
state:
description:
- When C(state) is C(present), ensures that the record exists.
- When C(state) is C(absent), ensures that the record is removed.
choices:
- present
- absent
default: present
extends_documentation_fragment: f5
author:
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create an IP Subnet and an ISP based topology record
bigip_gtm_topology_record:
source:
- subnet: 192.168.1.0/24
destination:
- isp: AOL
weight: 10
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Create a region and a pool based topology record
bigip_gtm_topology_record:
source:
- region: Foo
destination:
- pool: FooPool
partition: FooBar
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Create a negative region and a negative data center based topology record
bigip_gtm_topology_record:
source:
- region: Baz
- negate: yes
destination:
- datacenter: Baz-DC
- negate: yes
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
weight:
description: The weight of the topology record.
returned: changed
type: int
sample: 20
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.six import iteritems
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.common import flatten_boolean
from library.module_utils.network.f5.ipaddress import is_valid_ip_network
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.common import flatten_boolean
from ansible.module_utils.network.f5.ipaddress import is_valid_ip_network
class Parameters(AnsibleF5Parameters):
api_map = {
'score': 'weight',
}
api_attributes = [
'score',
]
returnables = [
'weight',
'name'
]
updatables = [
'weight',
]
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
countries = {
'Afghanistan': 'AF',
'Aland Islands': 'AX',
'Albania': 'AL',
'Algeria': 'DZ',
'American Samoa': 'AS',
'Andorra': 'AD',
'Angola': 'AO',
'Anguilla': 'AI',
'Antarctica': 'AQ',
'Antigua and Barbuda': 'AG',
'Argentina': 'AR',
'Armenia': 'AM',
'Aruba': 'AW',
'Australia': 'AU',
'Austria': 'AT',
'Azerbaijan': 'AZ',
'Bahamas': 'BS',
'Bahrain': 'BH',
'Bangladesh': 'BD',
'Barbados': 'BB',
'Belarus': 'BY',
'Belgium': 'BE',
'Belize': 'BZ',
'Benin': 'BJ',
'Bermuda': 'BM',
'Bhutan': 'BT',
'Bolivia': 'BO',
'Bonaire, Sint Eustatius and Saba': 'BQ',
'Bosnia and Herzegovina': 'BA',
'Botswana': 'BW',
'Bouvet Island': 'BV',
'Brazil': 'BR',
'British Indian Ocean Territory': 'IO',
'Brunei Darussalam': 'BN',
'Bulgaria': 'BG',
'Burkina Faso': 'BF',
'Burundi': 'BI',
'Cape Verde': 'CV',
'Cambodia': 'KH',
'Cameroon': 'CM',
'Canada': 'CA',
'Cayman Islands': 'KY',
'Central African Republic': 'CF',
'Chad': 'TD',
'Chile': 'CL',
'China': 'CN',
'Christmas Island': 'CX',
'Cocos (Keeling) Islands': 'CC',
'Colombia': 'CO',
'Comoros': 'KM',
'Congo': 'CG',
'Congo, The Democratic Republic of the': 'CD',
'Cook Islands': 'CK',
'Costa Rica': 'CR',
"Cote D'Ivoire": 'CI',
'Croatia': 'HR',
'Cuba': 'CU',
'Curaçao': 'CW',
'Cyprus': 'CY',
'Czech Republic': 'CZ',
'Denmark': 'DK',
'Djibouti': 'DJ',
'Dominica': 'DM',
'Dominican Republic': 'DO',
'Ecuador': 'EC',
'Egypt': 'EG',
'El Salvador': 'SV',
'Equatorial Guinea': 'GQ',
'Eritrea': 'ER',
'Estonia': 'EE',
'Ethiopia': 'ET',
'Falkland Islands (Malvinas)': 'FK',
'Faroe Islands': 'FO',
'Fiji': 'FJ',
'Finland': 'FI',
'France': 'FR',
'French Guiana': 'GF',
'French Polynesia': 'PF',
'French Southern Territories': 'TF',
'Gabon': 'GA',
'Gambia': 'GM',
'Georgia': 'GE',
'Germany': 'DE',
'Ghana': 'GH',
'Gibraltar': 'GI',
'Greece': 'GR',
'Greenland': 'GL',
'Grenada': 'GD',
'Guadeloupe': 'GP',
'Guam': 'GU',
'Guatemala': 'GT',
'Guernsey': 'GG',
'Guinea': 'GN',
'Guinea-Bissau': 'GW',
'Guyana': 'GY',
'Haiti': 'HT',
'Heard Island and McDonald Islands': 'HM',
'Holy See (Vatican City State)': 'VA',
'Honduras': 'HN',
'Hong Kong': 'HK',
'Hungary': 'HU',
'Iceland': 'IS',
'India': 'IN',
'Indonesia': 'ID',
'Iran, Islamic Republic of': 'IR',
'Iraq': 'IQ',
'Ireland': 'IE',
'Isle of Man': 'IM',
'Israel': 'IL',
'Italy': 'IT',
'Jamaica': 'JM',
'Japan': 'JP',
'Jersey': 'JE',
'Jordan': 'JO',
'Kazakhstan': 'KZ',
'Kenya': 'KE',
'Kiribati': 'KI',
"Korea, Democratic People's Republic of": 'KP',
'Korea, Republic of': 'KR',
'Kuwait': 'KW',
'Kyrgyzstan': 'KG',
"Lao People's Democratic Republic": 'LA',
'Latvia': 'LV',
'Lebanon': 'LB',
'Lesotho': 'LS',
'Liberia': 'LR',
'Libyan Arab Jamahiriya': 'LY',
'Liechtenstein': 'LI',
'Lithuania': 'LT',
'Luxembourg': 'LU',
'Macau': 'MO',
'Macedonia': 'MK',
'Madagascar': 'MG',
'Malawi': 'MW',
'Malaysia': 'MY',
'Maldives': 'MV',
'Mali': 'ML',
'Malta': 'MT',
'Marshall Islands': 'MH',
'Martinique': 'MQ',
'Mauritania': 'MR',
'Mauritius': 'MU',
'Mayotte': 'YT',
'Mexico': 'MX',
'Micronesia, Federated States of': 'FM',
'Moldova, Republic of': 'MD',
'Monaco': 'MC',
'Mongolia': 'MN',
'Montenegro': 'ME',
'Montserrat': 'MS',
'Morocco': 'MA',
'Mozambique': 'MZ',
'Myanmar': 'MM',
'Namibia': 'NA',
'Nauru': 'NR',
'Nepal': 'NP',
'Netherlands': 'NL',
'New Caledonia': 'NC',
'New Zealand': 'NZ',
'Nicaragua': 'NI',
'Niger': 'NE',
'Nigeria': 'NG',
'Niue': 'NU',
'Norfolk Island': 'NF',
'Northern Mariana Islands': 'MP',
'Norway': 'NO',
'Oman': 'OM',
'Pakistan': 'PK',
'Palau': 'PW',
'Palestinian Territory': 'PS',
'Panama': 'PA',
'Papua New Guinea': 'PG',
'Paraguay': 'PY',
'Peru': 'PE',
'Philippines': 'PH',
'Pitcairn Islands': 'PN',
'Poland': 'PL',
'Portugal': 'PT',
'Puerto Rico': 'PR',
'Qatar': 'QA',
'Reunion': 'RE',
'Romania': 'RO',
'Russian Federation': 'RU',
'Rwanda': 'RW',
'Saint Barthelemy': 'BL',
'Saint Helena': 'SH',
'Saint Kitts and Nevis': 'KN',
'Saint Lucia': 'LC',
'Saint Martin': 'MF',
'Saint Pierre and Miquelon': 'PM',
'Saint Vincent and the Grenadines': 'VC',
'Samoa': 'WS',
'San Marino': 'SM',
'Sao Tome and Principe': 'ST',
'Saudi Arabia': 'SA',
'Senegal': 'SN',
'Serbia': 'RS',
'Seychelles': 'SC',
'Sierra Leone': 'SL',
'Singapore': 'SG',
'Sint Maarten (Dutch part)': 'SX',
'Slovakia': 'SK',
'Slovenia': 'SI',
'Solomon Islands': 'SB',
'Somalia': 'SO',
'South Africa': 'ZA',
'South Georgia and the South Sandwich Islands': 'GS',
'South Sudan': 'SS',
'Spain': 'ES',
'Sri Lanka': 'LK',
'Sudan': 'SD',
'Suriname': 'SR',
'Svalbard and Jan Mayen': 'SJ',
'Swaziland': 'SZ',
'Sweden': 'SE',
'Switzerland': 'CH',
'Syrian Arab Republic': 'SY',
'Taiwan': 'TW',
'Tajikistan': 'TJ',
'Tanzania, United Republic of': 'TZ',
'Thailand': 'TH',
'Timor-Leste': 'TL',
'Togo': 'TG',
'Tokelau': 'TK',
'Tonga': 'TO',
'Trinidad and Tobago': 'TT',
'Tunisia': 'TN',
'Turkey': 'TR',
'Turkmenistan': 'TM',
'Turks and Caicos Islands': 'TC',
'Tuvalu': 'TV',
'Uganda': 'UG',
'Ukraine': 'UA',
'United Arab Emirates': 'AE',
'United Kingdom': 'GB',
'United States': 'US',
'United States Minor Outlying Islands': 'UM',
'Uruguay': 'UY',
'Uzbekistan': 'UZ',
'Vanuatu': 'VU',
'Venezuela': 'VE',
'Vietnam': 'VN',
'Virgin Islands, British': 'VG',
'Virgin Islands, U.S.': 'VI',
'Wallis and Futuna': 'WF',
'Western Sahara': 'EH',
'Yemen': 'YE',
'Zambia': 'ZM',
'Zimbabwe': 'ZW',
'Unrecognized': 'N/A',
'Asia/Pacific Region': 'AP',
'Europe': 'EU',
'Netherlands Antilles': 'AN',
'France, Metropolitan': 'FX',
'Anonymous Proxy': 'A1',
'Satellite Provider': 'A2',
'Other': 'O1',
}
continents = {
'Antarctica': 'AN',
'Asia': 'AS',
'Africa': 'AF',
'Europe': 'EU',
'North America': 'NA',
'South America': 'SA',
'Oceania': 'OC',
'Unknown': '--',
}
@property
def src_negate(self):
src_negate = self._values['source'].get('negate', None)
result = flatten_boolean(src_negate)
if result == 'yes':
return 'not'
return None
@property
def src_subnet(self):
src_subnet = self._values['source'].get('subnet', None)
if src_subnet is None:
return None
if is_valid_ip_network(src_subnet):
return src_subnet
raise F5ModuleError(
"Specified 'subnet' is not a valid subnet."
)
@property
def src_region(self):
src_region = self._values['source'].get('region', None)
if src_region is None:
return None
return fq_name(self.partition, src_region)
@property
def src_continent(self):
src_continent = self._values['source'].get('continent', None)
if src_continent is None:
return None
result = self.continents.get(src_continent, src_continent)
return result
@property
def src_country(self):
src_country = self._values['source'].get('country', None)
if src_country is None:
return None
result = self.countries.get(src_country, src_country)
return result
@property
def src_state(self):
src_country = self._values['source'].get('country', None)
src_state = self._values['source'].get('state', None)
if src_state is None:
return None
if src_country is None:
raise F5ModuleError(
'Country needs to be provided when specifying state'
)
result = '{0}/{1}'.format(src_country, src_state)
return result
@property
def src_isp(self):
src_isp = self._values['source'].get('isp', None)
if src_isp is None:
return None
return fq_name('Common', src_isp)
@property
def src_geo_isp(self):
src_geo_isp = self._values['source'].get('geo_isp', None)
return src_geo_isp
@property
def dst_negate(self):
dst_negate = self._values['destination'].get('negate', None)
result = flatten_boolean(dst_negate)
if result == 'yes':
return 'not'
return None
@property
def dst_subnet(self):
dst_subnet = self._values['destination'].get('subnet', None)
if dst_subnet is None:
return None
if is_valid_ip_network(dst_subnet):
return dst_subnet
raise F5ModuleError(
"Specified 'subnet' is not a valid subnet."
)
@property
def dst_region(self):
dst_region = self._values['destination'].get('region', None)
if dst_region is None:
return None
return fq_name(self.partition, dst_region)
@property
def dst_continent(self):
dst_continent = self._values['destination'].get('continent', None)
if dst_continent is None:
return None
result = self.continents.get(dst_continent, dst_continent)
return result
@property
def dst_country(self):
dst_country = self._values['destination'].get('country', None)
if dst_country is None:
return None
result = self.countries.get(dst_country, dst_country)
return result
@property
def dst_state(self):
dst_country = self.dst_country
dst_state = self._values['destination'].get('state', None)
if dst_state is None:
return None
if dst_country is None:
raise F5ModuleError(
'Country needs to be provided when specifying state'
)
result = '{0}/{1}'.format(dst_country, dst_state)
return result
@property
def dst_isp(self):
dst_isp = self._values['destination'].get('isp', None)
if dst_isp is None:
return None
return fq_name('Common', dst_isp)
@property
def dst_geo_isp(self):
dst_geo_isp = self._values['destination'].get('geo_isp', None)
return dst_geo_isp
@property
def dst_pool(self):
dst_pool = self._values['destination'].get('pool', None)
if dst_pool is None:
return None
return fq_name(self.partition, dst_pool)
@property
def dst_datacenter(self):
dst_datacenter = self._values['destination'].get('datacenter', None)
if dst_datacenter is None:
return None
return fq_name(self.partition, dst_datacenter)
@property
def source(self):
options = {
'negate': self.src_negate,
'subnet': self.src_subnet,
'region': self.src_region,
'continent': self.src_continent,
'country': self.src_country,
'state': self.src_state,
'isp': self.src_isp,
'geoip-isp': self.src_geo_isp,
}
result = 'ldns: {0}'.format(self._format_options(options))
return result
@property
def destination(self):
options = {
'negate': self.dst_negate,
'subnet': self.dst_subnet,
'region': self.dst_region,
'continent': self.dst_continent,
'country': self.dst_country,
'state': self.dst_state,
'datacenter': self.dst_datacenter,
'pool': self.dst_pool,
'isp': self.dst_isp,
'geoip-isp': self.dst_geo_isp,
}
result = 'server: {0}'.format(self._format_options(options))
return result
@property
def name(self):
result = '{0} {1}'.format(self.source, self.destination)
return result
def _format_options(self, options):
negate = None
cleaned = dict((k, v) for k, v in iteritems(options) if v is not None)
if 'country' and 'state' in cleaned.keys():
del cleaned['country']
if 'negate' in cleaned.keys():
negate = cleaned['negate']
del cleaned['negate']
name, value = cleaned.popitem()
if negate:
result = '{0} {1} {2}'.format(negate, name, value)
return result
result = '{0} {1}'.format(name, value)
return result
@property
def weight(self):
weight = self._values['weight']
if weight is None:
return None
if 0 <= weight <= 4294967295:
return weight
raise F5ModuleError(
"Valid weight must be in range 0 - 4294967295"
)
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
if self.exists():
return self.remove()
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def exists(self):
name = self.want.name
uri = "https://{0}:{1}/mgmt/tm/gtm/topology/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
name.replace(' ', '%20').replace('/', '~')
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
uri = "https://{0}:{1}/mgmt/tm/gtm/topology/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return True
def update_on_device(self):
params = self.changes.api_params()
name = self.want.name
uri = "https://{0}:{1}/mgmt/tm/gtm/topology/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
name.replace(' ', '%20').replace('/', '~')
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def remove_from_device(self):
name = self.want.name
uri = "https://{0}:{1}/mgmt/tm/gtm/topology/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
name.replace(' ', '%20').replace('/', '~')
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
def read_current_from_device(self):
name = self.want.name
uri = "https://{0}:{1}/mgmt/tm/gtm/topology/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
name.replace(' ', '%20').replace('/', '~')
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
self.choices = [
'AOL', 'BeijingCNC', 'CNC', 'ChinaEducationNetwork',
'ChinaMobilNetwork', 'ChinaRailwayTelcom', 'ChinaTelecom',
'ChinaUnicom', 'Comcast', 'Earthlink', 'ShanghaiCNC',
'ShanghaiTelecom',
]
argument_spec = dict(
source=dict(
required=True,
type='dict',
options=dict(
subnet=dict(),
region=dict(),
continent=dict(),
country=dict(),
state=dict(),
isp=dict(
choices=self.choices
),
geo_isp=dict(),
negate=dict(
type='bool',
default='no'
),
),
mutually_exclusive=[
['subnet', 'region', 'continent', 'country', 'isp', 'geo_isp']
]
),
destination=dict(
required=True,
type='dict',
options=dict(
subnet=dict(),
region=dict(),
continent=dict(),
country=dict(),
state=dict(),
pool=dict(),
datacenter=dict(),
isp=dict(
choices=self.choices
),
geo_isp=dict(),
negate=dict(
type='bool',
default='no'
),
),
mutually_exclusive=[
['subnet', 'region', 'continent', 'country', 'pool', 'datacenter', 'isp', 'geo_isp']
]
),
weight=dict(type='int'),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
),
state=dict(
default='present',
choices=['present', 'absent']
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
client = F5RestClient(**module.params)
try:
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
exit_json(module, results, client)
except F5ModuleError as ex:
cleanup_tokens(client)
fail_json(module, ex, client)
if __name__ == '__main__':
main()
|
Jorge-Rodriguez/ansible
|
lib/ansible/modules/network/f5/bigip_gtm_topology_record.py
|
Python
|
gpl-3.0
| 33,022
|
##########################################################################
#
# Copyright (c) 2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import math
import unittest
import IECoreNuke
import IECore
import nuke
class StringUtilTest( IECoreNuke.TestCase ) :
def testNukeFileSequence( self ) :
self.assertEqual( IECoreNuke.nukeFileSequence( "/tmp/test.dpx" ), "/tmp/test.dpx" )
self.assertEqual( IECoreNuke.nukeFileSequence( "/tmp/test.#.dpx" ), "/tmp/test.%d.dpx" )
self.assertEqual( IECoreNuke.nukeFileSequence( "/tmp/test.##.dpx" ), "/tmp/test.%02d.dpx" )
self.assertEqual( IECoreNuke.nukeFileSequence( "/tmp/test_#########_test.dpx" ), "/tmp/test_%09d_test.dpx" )
def testIeCoreFileSequence( self ) :
self.assertEqual( IECoreNuke.ieCoreFileSequence( "/tmp/test.dpx" ), "/tmp/test.dpx" )
self.assertEqual( IECoreNuke.ieCoreFileSequence( "/tmp/test.%d.dpx" ), "/tmp/test.#.dpx" )
self.assertEqual( IECoreNuke.ieCoreFileSequence( "/tmp/test.%02d.dpx" ), "/tmp/test.##.dpx" )
self.assertEqual( IECoreNuke.ieCoreFileSequence( "/tmp/test_%09d_test.dpx" ), "/tmp/test_#########_test.dpx" )
self.assertRaises( RuntimeError, IECoreNuke.ieCoreFileSequence, "/tmp/test.%2d.dpx" )
if __name__ == "__main__":
unittest.main()
|
code-google-com/cortex-vfx
|
test/IECoreNuke/StringUtilTest.py
|
Python
|
bsd-3-clause
| 2,890
|
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth.views import redirect_to_login
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.utils.encoding import force_text
class AccessMixin:
"""
Abstract CBV mixin that gives access mixins the same customizable
functionality.
"""
login_url = None
permission_denied_message = ''
raise_exception = False
redirect_field_name = REDIRECT_FIELD_NAME
def get_login_url(self):
"""
Override this method to override the login_url attribute.
"""
login_url = self.login_url or settings.LOGIN_URL
if not login_url:
raise ImproperlyConfigured(
'{0} is missing the login_url attribute. Define {0}.login_url, settings.LOGIN_URL, or override '
'{0}.get_login_url().'.format(self.__class__.__name__)
)
return force_text(login_url)
def get_permission_denied_message(self):
"""
Override this method to override the permission_denied_message attribute.
"""
return self.permission_denied_message
def get_redirect_field_name(self):
"""
Override this method to override the redirect_field_name attribute.
"""
return self.redirect_field_name
def handle_no_permission(self):
if self.raise_exception:
raise PermissionDenied(self.get_permission_denied_message())
return redirect_to_login(self.request.get_full_path(), self.get_login_url(), self.get_redirect_field_name())
class LoginRequiredMixin(AccessMixin):
"""Verify that the current user is authenticated."""
def dispatch(self, request, *args, **kwargs):
if not request.user.is_authenticated:
return self.handle_no_permission()
return super().dispatch(request, *args, **kwargs)
class PermissionRequiredMixin(AccessMixin):
"""Verify that the current user has all specified permissions."""
permission_required = None
def get_permission_required(self):
"""
Override this method to override the permission_required attribute.
Must return an iterable.
"""
if self.permission_required is None:
raise ImproperlyConfigured(
'{0} is missing the permission_required attribute. Define {0}.permission_required, or override '
'{0}.get_permission_required().'.format(self.__class__.__name__)
)
if isinstance(self.permission_required, str):
perms = (self.permission_required, )
else:
perms = self.permission_required
return perms
def has_permission(self):
"""
Override this method to customize the way permissions are checked.
"""
perms = self.get_permission_required()
return self.request.user.has_perms(perms)
def dispatch(self, request, *args, **kwargs):
if not self.has_permission():
return self.handle_no_permission()
return super().dispatch(request, *args, **kwargs)
class UserPassesTestMixin(AccessMixin):
"""
Deny a request with a permission error if the test_func() method returns
False.
"""
def test_func(self):
raise NotImplementedError(
'{0} is missing the implementation of the test_func() method.'.format(self.__class__.__name__)
)
def get_test_func(self):
"""
Override this method to use a different test_func method.
"""
return self.test_func
def dispatch(self, request, *args, **kwargs):
user_test_result = self.get_test_func()()
if not user_test_result:
return self.handle_no_permission()
return super().dispatch(request, *args, **kwargs)
|
camilonova/django
|
django/contrib/auth/mixins.py
|
Python
|
bsd-3-clause
| 3,861
|
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exceptions used by Arista ML2 Mechanism Driver."""
from neutron.common import exceptions
class AristaRpcError(exceptions.NeutronException):
message = _('%(msg)s')
class AristaConfigError(exceptions.NeutronException):
message = _('%(msg)s')
class AristaServicePluginRpcError(exceptions.NeutronException):
message = _('%(msg)s')
class AristaSevicePluginConfigError(exceptions.NeutronException):
message = _('%(msg)s')
|
samsu/neutron
|
plugins/ml2/drivers/arista/exceptions.py
|
Python
|
apache-2.0
| 1,035
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
from erpnext.regional.united_arab_emirates.setup import make_custom_fields, add_print_formats
from erpnext.setup.setup_wizard.operations.taxes_setup import create_sales_tax
def setup(company=None, patch=True):
make_custom_fields()
add_print_formats()
if company:
create_sales_tax(company)
|
Zlash65/erpnext
|
erpnext/regional/saudi_arabia/setup.py
|
Python
|
gpl-3.0
| 464
|
import unittest
import json
import logging
from mock import Mock, patch
from webob.multidict import MultiDict
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from opaque_keys.edx.locations import Location, SlashSeparatedCourseKey
from xmodule.tests import get_test_system, get_test_descriptor_system
from xmodule.tests.test_util_open_ended import DummyModulestore
from xmodule.open_ended_grading_classes.peer_grading_service import MockPeerGradingService
from xmodule.peer_grading_module import PeerGradingModule, PeerGradingDescriptor, MAX_ALLOWED_FEEDBACK_LENGTH
from xmodule.modulestore.exceptions import ItemNotFoundError, NoPathToItem
log = logging.getLogger(__name__)
class PeerGradingModuleTest(unittest.TestCase, DummyModulestore):
"""
Test peer grading xmodule at the unit level. More detailed tests are difficult, as the module relies on an
external grading service.
"""
course_id = SlashSeparatedCourseKey('edX', 'open_ended', '2012_Fall')
problem_location = course_id.make_usage_key("peergrading", "PeerGradingSample")
coe_location = course_id.make_usage_key("combinedopenended", "SampleQuestion")
calibrated_dict = {'location': "blah"}
coe_dict = {'location': coe_location.to_deprecated_string()}
save_dict = MultiDict({
'location': "blah",
'submission_id': 1,
'submission_key': "",
'score': 1,
'feedback': "",
'submission_flagged': False,
'answer_unknown': False,
})
save_dict.extend(('rubric_scores[]', val) for val in (0, 1))
def get_module_system(self, descriptor):
test_system = get_test_system(self.course_id)
test_system.open_ended_grading_interface = None
return test_system
def setUp(self):
"""
Create a peer grading module from a test system
@return:
"""
super(PeerGradingModuleTest, self).setUp()
self.setup_modulestore(self.course_id.course)
self.peer_grading = self.get_module_from_location(self.problem_location)
self.coe = self.get_module_from_location(self.coe_location)
def test_module_closed(self):
"""
Test if peer grading is closed
@return:
"""
closed = self.peer_grading.closed()
self.assertFalse(closed)
def test_get_html(self):
"""
Test to see if the module can be rendered
@return:
"""
_html = self.peer_grading.get_html()
def test_get_data(self):
"""
Try getting data from the external grading service
@return:
"""
success, _data = self.peer_grading.query_data_for_location(self.problem_location)
self.assertTrue(success)
def test_get_score_none(self):
"""
Test getting the score.
"""
score = self.peer_grading.get_score()
# Score should be None.
self.assertIsNone(score['score'])
def test_get_max_score(self):
"""
Test getting the max score
@return:
"""
max_score = self.peer_grading.max_score()
self.assertEquals(max_score, None)
def get_next_submission(self):
"""
Test to see if we can get the next mock submission
@return:
"""
success, _next_submission = self.peer_grading.get_next_submission({'location': 'blah'})
self.assertEqual(success, True)
def test_save_grade(self):
"""
Test if we can save the grade
@return:
"""
response = self.peer_grading.save_grade(self.save_dict)
self.assertEqual(response['success'], True)
def test_is_student_calibrated(self):
"""
Check to see if the student has calibrated yet
@return:
"""
response = self.peer_grading.is_student_calibrated(self.calibrated_dict)
self.assertTrue(response['success'])
def test_show_calibration_essay(self):
"""
Test showing the calibration essay
@return:
"""
response = self.peer_grading.show_calibration_essay(self.calibrated_dict)
self.assertTrue(response['success'])
def test_save_calibration_essay(self):
"""
Test saving the calibration essay
@return:
"""
response = self.peer_grading.save_calibration_essay(self.save_dict)
self.assertTrue(response['success'])
def test_peer_grading_problem(self):
"""
See if we can render a single problem
@return:
"""
response = self.peer_grading.peer_grading_problem(self.coe_dict)
self.assertTrue(response['success'])
def test___find_corresponding_module_for_location_exceptions(self):
"""
Unit test for the exception cases of __find_corresponding_module_for_location
Mainly for diff coverage
@return:
"""
# pylint: disable=protected-access
with self.assertRaises(ItemNotFoundError):
self.peer_grading._find_corresponding_module_for_location(
Location('org', 'course', 'run', 'category', 'name', 'revision')
)
def test_get_instance_state(self):
"""
Get the instance state dict
@return:
"""
self.peer_grading.get_instance_state()
def test_save_grade_with_long_feedback(self):
"""
Test if feedback is too long save_grade() should return error message.
"""
feedback_fragment = "This is very long feedback."
self.save_dict["feedback"] = feedback_fragment * (
(MAX_ALLOWED_FEEDBACK_LENGTH / len(feedback_fragment) + 1)
)
response = self.peer_grading.save_grade(self.save_dict)
# Should not succeed.
self.assertEqual(response['success'], False)
self.assertEqual(
response['error'],
"Feedback is too long, Max length is {0} characters.".format(
MAX_ALLOWED_FEEDBACK_LENGTH
)
)
def test_get_score_success_fails(self):
"""
Test if query_data_for_location not succeed, their score is None.
"""
score_dict = self.get_score(False, 0, 0)
# Score dict should be None.
self.assertIsNone(score_dict)
def test_get_score(self):
"""
Test if the student has graded equal to required submissions,
their score is 1.0.
"""
score_dict = self.get_score(True, 3, 3)
# Score should be 1.0.
self.assertEqual(score_dict["score"], 1.0)
# Testing score after data is stored in student_data_for_location in xmodule.
_score_dict = self.peer_grading.get_score()
# Score should be 1.0.
self.assertEqual(_score_dict["score"], 1.0)
def test_get_score_zero(self):
"""
Test if the student has graded not equal to required submissions,
their score is 0.0.
"""
score_dict = self.get_score(True, 2, 3)
# Score should be 0.0.
self.assertEqual(score_dict["score"], 0.0)
def get_score(self, success, count_graded, count_required):
self.peer_grading.use_for_single_location_local = True
self.peer_grading.graded = True
# Patch for external grading service.
with patch('xmodule.peer_grading_module.PeerGradingModule.query_data_for_location') as mock_query_data_for_location:
mock_query_data_for_location.return_value = (
success,
{"count_graded": count_graded, "count_required": count_required}
)
# Returning score dict.
return self.peer_grading.get_score()
class MockPeerGradingServiceProblemList(MockPeerGradingService):
def get_problem_list(self, course_id, grader_id):
return {'success': True,
'problem_list': [
{
"num_graded": 3,
"num_pending": 681,
"num_required": 3,
"location": course_id.make_usage_key('combinedopenended', 'SampleQuestion'),
"problem_name": "Peer-Graded Essay"
},
]}
class PeerGradingModuleScoredTest(unittest.TestCase, DummyModulestore):
"""
Test peer grading xmodule at the unit level. More detailed tests are difficult, as the module relies on an
external grading service.
"""
course_id = SlashSeparatedCourseKey('edX', 'open_ended', '2012_Fall')
problem_location = course_id.make_usage_key("peergrading", "PeerGradingScored")
def get_module_system(self, descriptor):
test_system = get_test_system(self.course_id)
test_system.open_ended_grading_interface = None
return test_system
def setUp(self):
"""
Create a peer grading module from a test system
@return:
"""
super(PeerGradingModuleScoredTest, self).setUp()
self.setup_modulestore(self.course_id.course)
def test_metadata_load(self):
peer_grading = self.get_module_from_location(self.problem_location)
self.assertFalse(peer_grading.closed())
def test_problem_list(self):
"""
Test to see if a peer grading problem list can be correctly initialized.
"""
# Initialize peer grading module.
peer_grading = self.get_module_from_location(self.problem_location)
# Ensure that it cannot find any peer grading.
html = peer_grading.peer_grading()
self.assertNotIn("Peer-Graded", html)
# Swap for our mock class, which will find peer grading.
peer_grading.peer_gs = MockPeerGradingServiceProblemList()
html = peer_grading.peer_grading()
self.assertIn("Peer-Graded", html)
class PeerGradingModuleLinkedTest(unittest.TestCase, DummyModulestore):
"""
Test peer grading that is linked to an open ended module.
"""
course_id = SlashSeparatedCourseKey('edX', 'open_ended', '2012_Fall')
problem_location = course_id.make_usage_key("peergrading", "PeerGradingLinked")
coe_location = course_id.make_usage_key("combinedopenended", "SampleQuestion")
def get_module_system(self, descriptor):
test_system = get_test_system(self.course_id)
test_system.open_ended_grading_interface = None
return test_system
def setUp(self):
"""
Create a peer grading module from a test system.
"""
super(PeerGradingModuleLinkedTest, self).setUp()
self.setup_modulestore(self.course_id.course)
@property
def field_data(self):
"""
Setup the proper field data for a peer grading module.
"""
return DictFieldData({
'data': '<peergrading/>',
'location': self.problem_location,
'use_for_single_location': True,
'link_to_location': self.coe_location.to_deprecated_string(),
'graded': True,
})
@property
def scope_ids(self):
"""
Return the proper scope ids for the peer grading module.
"""
return ScopeIds(None, None, self.problem_location, self.problem_location)
def _create_peer_grading_descriptor_with_linked_problem(self):
# Initialize the peer grading module.
system = get_test_descriptor_system()
return system.construct_xblock_from_class(
PeerGradingDescriptor,
field_data=self.field_data,
scope_ids=self.scope_ids
)
def _create_peer_grading_with_linked_problem(self, location, valid_linked_descriptor=True):
"""
Create a peer grading problem with a linked location.
"""
# Mock the linked problem descriptor.
linked_descriptor = Mock()
linked_descriptor.location = location
# Mock the peer grading descriptor.
pg_descriptor = Mock()
pg_descriptor.location = self.problem_location
if valid_linked_descriptor:
pg_descriptor.get_required_module_descriptors = lambda: [linked_descriptor, ]
else:
pg_descriptor.get_required_module_descriptors = lambda: []
test_system = self.get_module_system(pg_descriptor)
# Initialize the peer grading module.
peer_grading = PeerGradingModule(
pg_descriptor,
test_system,
self.field_data,
self.scope_ids,
)
return peer_grading
def _get_descriptor_with_invalid_link(self, exception_to_raise):
"""
Ensure that a peer grading descriptor with an invalid link will return an empty list.
"""
# Create a descriptor, and make loading an item throw an error.
descriptor = self._create_peer_grading_descriptor_with_linked_problem()
descriptor.system.load_item = Mock(side_effect=exception_to_raise)
# Ensure that modules is a list of length 0.
modules = descriptor.get_required_module_descriptors()
self.assertIsInstance(modules, list)
self.assertEqual(len(modules), 0)
def test_descriptor_with_nopath(self):
"""
Test to see if a descriptor with a NoPathToItem error when trying to get
its linked module behaves properly.
"""
self._get_descriptor_with_invalid_link(NoPathToItem)
def test_descriptor_with_item_not_found(self):
"""
Test to see if a descriptor with an ItemNotFound error when trying to get
its linked module behaves properly.
"""
self._get_descriptor_with_invalid_link(ItemNotFoundError)
def test_invalid_link(self):
"""
Ensure that a peer grading problem with no linked locations stays in panel mode.
"""
# Setup the peer grading module with no linked locations.
peer_grading = self._create_peer_grading_with_linked_problem(self.coe_location, valid_linked_descriptor=False)
self.assertFalse(peer_grading.use_for_single_location_local)
self.assertTrue(peer_grading.use_for_single_location)
def test_linked_problem(self):
"""
Ensure that a peer grading problem with a linked location loads properly.
"""
# Setup the peer grading module with the proper linked location.
peer_grading = self._create_peer_grading_with_linked_problem(self.coe_location)
# Ensure that it is properly setup.
self.assertTrue(peer_grading.use_for_single_location)
def test_linked_ajax(self):
"""
Ensure that a peer grading problem with a linked location responds to ajax calls.
"""
# Setup the peer grading module with the proper linked location.
peer_grading = self._create_peer_grading_with_linked_problem(self.coe_location)
# If we specify a location, it will render the problem for that location.
data = peer_grading.handle_ajax('problem', {'location': self.coe_location.to_deprecated_string()})
self.assertTrue(json.loads(data)['success'])
# If we don't specify a location, it should use the linked location.
data = peer_grading.handle_ajax('problem', {})
self.assertTrue(json.loads(data)['success'])
def test_linked_score(self):
"""
Ensure that a peer grading problem with a linked location is properly scored.
"""
# Setup the peer grading module with the proper linked location.
peer_grading = self._create_peer_grading_with_linked_problem(self.coe_location)
score_dict = peer_grading.get_score()
self.assertEqual(score_dict['score'], 1)
self.assertEqual(score_dict['total'], 1)
def test_get_next_submission(self):
"""
Ensure that a peer grading problem with a linked location can get a submission to score.
"""
# Setup the peer grading module with the proper linked location.
peer_grading = self._create_peer_grading_with_linked_problem(self.coe_location)
data = peer_grading.handle_ajax('get_next_submission', {'location': self.coe_location})
self.assertEqual(json.loads(data)['submission_id'], 1)
|
beni55/edx-platform
|
common/lib/xmodule/xmodule/tests/test_peer_grading.py
|
Python
|
agpl-3.0
| 16,222
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Stock Transfer Split Multi module for Odoo
# Copyright (C) 2014 Akretion (http://www.akretion.com)
# @author Alexis de Lattre <alexis.delattre@akretion.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api, _
import openerp.addons.decimal_precision as dp
from openerp.exceptions import Warning
class StockTransferSplitMulti(models.TransientModel):
_name = "stock.transfer.split.multi"
_description = "Split by multi units on stock transfer wizard"
split_qty = fields.Float(
string="Quantity to Extract",
digits=dp.get_precision('Product Unit of Measure'), required=True)
@api.multi
def split_multi_quantities(self):
self.ensure_one()
assert self.env.context.get('active_model') == \
'stock.transfer_details_items', 'Wrong underlying model'
trf_line = self.env['stock.transfer_details_items'].browse(
self.env.context['active_id'])
split_qty = self[0].split_qty
if split_qty > 0:
if split_qty >= trf_line.quantity:
raise Warning(
_("The Quantity to extract (%s) cannot be superior or "
"equal to the quantity of the line (%s)")
% (split_qty, trf_line.quantity))
new_line = trf_line.copy()
new_line.write({'quantity': split_qty, 'packop_id': False})
trf_line.quantity -= split_qty
action = trf_line.transfer_id.wizard_view()
return action
@api.multi
def cancel(self):
"""We have to re-call the wizard when the user clicks on Cancel"""
self.ensure_one()
assert self.env.context.get('active_model') == \
'stock.transfer_details_items', 'Wrong underlying model'
trf_line = self.env['stock.transfer_details_items'].browse(
self.env.context['active_id'])
action = trf_line.transfer_id.wizard_view()
return action
|
vrenaville/stock-logistics-workflow
|
stock_transfer_split_multi/wizard/stock_transfer_split_multi.py
|
Python
|
agpl-3.0
| 2,813
|
# Copyright 2011 Google Inc. All Rights Reserved.
"""Multi-credential file store with lock support.
This module implements a JSON credential store where multiple
credentials can be stored in one file. That file supports locking
both in a single process and across processes.
The credential themselves are keyed off of:
* client_id
* user_agent
* scope
The format of the stored data is like so:
{
'file_version': 1,
'data': [
{
'key': {
'clientId': '<client id>',
'userAgent': '<user agent>',
'scope': '<scope>'
},
'credential': {
# JSON serialized Credentials.
}
}
]
}
"""
__author__ = 'jbeda@google.com (Joe Beda)'
import base64
import errno
import fcntl
import logging
import os
import threading
from anyjson import simplejson
from client import Storage as BaseStorage
from client import Credentials
logger = logging.getLogger(__name__)
# A dict from 'filename'->_MultiStore instances
_multistores = {}
_multistores_lock = threading.Lock()
class Error(Exception):
"""Base error for this module."""
pass
class NewerCredentialStoreError(Error):
"""The credential store is a newer version that supported."""
pass
def get_credential_storage(filename, client_id, user_agent, scope,
warn_on_readonly=True):
"""Get a Storage instance for a credential.
Args:
filename: The JSON file storing a set of credentials
client_id: The client_id for the credential
user_agent: The user agent for the credential
scope: string or list of strings, Scope(s) being requested
warn_on_readonly: if True, log a warning if the store is readonly
Returns:
An object derived from client.Storage for getting/setting the
credential.
"""
filename = os.path.realpath(os.path.expanduser(filename))
_multistores_lock.acquire()
try:
multistore = _multistores.setdefault(
filename, _MultiStore(filename, warn_on_readonly))
finally:
_multistores_lock.release()
if type(scope) is list:
scope = ' '.join(scope)
return multistore._get_storage(client_id, user_agent, scope)
class _MultiStore(object):
"""A file backed store for multiple credentials."""
def __init__(self, filename, warn_on_readonly=True):
"""Initialize the class.
This will create the file if necessary.
"""
self._filename = filename
self._thread_lock = threading.Lock()
self._file_handle = None
self._read_only = False
self._warn_on_readonly = warn_on_readonly
self._create_file_if_needed()
# Cache of deserialized store. This is only valid after the
# _MultiStore is locked or _refresh_data_cache is called. This is
# of the form of:
#
# (client_id, user_agent, scope) -> OAuth2Credential
#
# If this is None, then the store hasn't been read yet.
self._data = None
class _Storage(BaseStorage):
"""A Storage object that knows how to read/write a single credential."""
def __init__(self, multistore, client_id, user_agent, scope):
self._multistore = multistore
self._client_id = client_id
self._user_agent = user_agent
self._scope = scope
def acquire_lock(self):
"""Acquires any lock necessary to access this Storage.
This lock is not reentrant.
"""
self._multistore._lock()
def release_lock(self):
"""Release the Storage lock.
Trying to release a lock that isn't held will result in a
RuntimeError.
"""
self._multistore._unlock()
def locked_get(self):
"""Retrieve credential.
The Storage lock must be held when this is called.
Returns:
oauth2client.client.Credentials
"""
credential = self._multistore._get_credential(
self._client_id, self._user_agent, self._scope)
if credential:
credential.set_store(self)
return credential
def locked_put(self, credentials):
"""Write a credential.
The Storage lock must be held when this is called.
Args:
credentials: Credentials, the credentials to store.
"""
self._multistore._update_credential(credentials, self._scope)
def _create_file_if_needed(self):
"""Create an empty file if necessary.
This method will not initialize the file. Instead it implements a
simple version of "touch" to ensure the file has been created.
"""
if not os.path.exists(self._filename):
old_umask = os.umask(0177)
try:
open(self._filename, 'a+b').close()
finally:
os.umask(old_umask)
def _lock(self):
"""Lock the entire multistore."""
self._thread_lock.acquire()
# Check to see if the file is writeable.
try:
self._file_handle = open(self._filename, 'r+b')
fcntl.lockf(self._file_handle.fileno(), fcntl.LOCK_EX)
except IOError, e:
if e.errno != errno.EACCES:
raise e
self._file_handle = open(self._filename, 'rb')
self._read_only = True
if self._warn_on_readonly:
logger.warn('The credentials file (%s) is not writable. Opening in '
'read-only mode. Any refreshed credentials will only be '
'valid for this run.' % self._filename)
if os.path.getsize(self._filename) == 0:
logger.debug('Initializing empty multistore file')
# The multistore is empty so write out an empty file.
self._data = {}
self._write()
elif not self._read_only or self._data is None:
# Only refresh the data if we are read/write or we haven't
# cached the data yet. If we are readonly, we assume is isn't
# changing out from under us and that we only have to read it
# once. This prevents us from whacking any new access keys that
# we have cached in memory but were unable to write out.
self._refresh_data_cache()
def _unlock(self):
"""Release the lock on the multistore."""
if not self._read_only:
fcntl.lockf(self._file_handle.fileno(), fcntl.LOCK_UN)
self._file_handle.close()
self._thread_lock.release()
def _locked_json_read(self):
"""Get the raw content of the multistore file.
The multistore must be locked when this is called.
Returns:
The contents of the multistore decoded as JSON.
"""
assert self._thread_lock.locked()
self._file_handle.seek(0)
return simplejson.load(self._file_handle)
def _locked_json_write(self, data):
"""Write a JSON serializable data structure to the multistore.
The multistore must be locked when this is called.
Args:
data: The data to be serialized and written.
"""
assert self._thread_lock.locked()
if self._read_only:
return
self._file_handle.seek(0)
simplejson.dump(data, self._file_handle, sort_keys=True, indent=2)
self._file_handle.truncate()
def _refresh_data_cache(self):
"""Refresh the contents of the multistore.
The multistore must be locked when this is called.
Raises:
NewerCredentialStoreError: Raised when a newer client has written the
store.
"""
self._data = {}
try:
raw_data = self._locked_json_read()
except Exception:
logger.warn('Credential data store could not be loaded. '
'Will ignore and overwrite.')
return
version = 0
try:
version = raw_data['file_version']
except Exception:
logger.warn('Missing version for credential data store. It may be '
'corrupt or an old version. Overwriting.')
if version > 1:
raise NewerCredentialStoreError(
'Credential file has file_version of %d. '
'Only file_version of 1 is supported.' % version)
credentials = []
try:
credentials = raw_data['data']
except (TypeError, KeyError):
pass
for cred_entry in credentials:
try:
(key, credential) = self._decode_credential_from_json(cred_entry)
self._data[key] = credential
except:
# If something goes wrong loading a credential, just ignore it
logger.info('Error decoding credential, skipping', exc_info=True)
def _decode_credential_from_json(self, cred_entry):
"""Load a credential from our JSON serialization.
Args:
cred_entry: A dict entry from the data member of our format
Returns:
(key, cred) where the key is the key tuple and the cred is the
OAuth2Credential object.
"""
raw_key = cred_entry['key']
client_id = raw_key['clientId']
user_agent = raw_key['userAgent']
scope = raw_key['scope']
key = (client_id, user_agent, scope)
credential = None
credential = Credentials.new_from_json(simplejson.dumps(cred_entry['credential']))
return (key, credential)
def _write(self):
"""Write the cached data back out.
The multistore must be locked.
"""
raw_data = {'file_version': 1}
raw_creds = []
raw_data['data'] = raw_creds
for (cred_key, cred) in self._data.items():
raw_key = {
'clientId': cred_key[0],
'userAgent': cred_key[1],
'scope': cred_key[2]
}
raw_cred = simplejson.loads(cred.to_json())
raw_creds.append({'key': raw_key, 'credential': raw_cred})
self._locked_json_write(raw_data)
def _get_credential(self, client_id, user_agent, scope):
"""Get a credential from the multistore.
The multistore must be locked.
Args:
client_id: The client_id for the credential
user_agent: The user agent for the credential
scope: A string for the scope(s) being requested
Returns:
The credential specified or None if not present
"""
key = (client_id, user_agent, scope)
return self._data.get(key, None)
def _update_credential(self, cred, scope):
"""Update a credential and write the multistore.
This must be called when the multistore is locked.
Args:
cred: The OAuth2Credential to update/set
scope: The scope(s) that this credential covers
"""
key = (cred.client_id, cred.user_agent, scope)
self._data[key] = cred
self._write()
def _get_storage(self, client_id, user_agent, scope):
"""Get a Storage object to get/set a credential.
This Storage is a 'view' into the multistore.
Args:
client_id: The client_id for the credential
user_agent: The user agent for the credential
scope: A string for the scope(s) being requested
Returns:
A Storage object that can be used to get/set this cred
"""
return self._Storage(self, client_id, user_agent, scope)
|
ychen820/microblog
|
y/google-cloud-sdk/platform/google_appengine/lib/google-api-python-client/oauth2client/multistore_file.py
|
Python
|
bsd-3-clause
| 10,605
|
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_profile_analytics import ApiParameters
from library.modules.bigip_profile_analytics import ModuleParameters
from library.modules.bigip_profile_analytics import ModuleManager
from library.modules.bigip_profile_analytics import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_profile_analytics import ApiParameters
from ansible.modules.network.f5.bigip_profile_analytics import ModuleParameters
from ansible.modules.network.f5.bigip_profile_analytics import ModuleManager
from ansible.modules.network.f5.bigip_profile_analytics import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
parent='bar',
description='foo',
collect_geo=True,
collect_ip=True,
)
p = ModuleParameters(params=args)
assert p.name == 'foo'
assert p.parent == '/Common/bar'
assert p.description == 'foo'
assert p.collect_geo == 'yes'
assert p.collect_ip == 'yes'
def test_api_parameters(self):
args = load_fixture('load_ltm_profile_analytics_1.json')
p = ApiParameters(params=args)
assert p.name == 'foo'
assert p.collect_geo == 'no'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create(self, *args):
# Configure the arguments that would be sent to the Ansible module
set_module_args(dict(
name='foo',
parent='bar',
description='foo',
collect_geo=True,
collect_ip=True,
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(return_value=False)
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
|
kvar/ansible
|
test/units/modules/network/f5/test_bigip_profile_analytics.py
|
Python
|
gpl-3.0
| 3,514
|
# -*- coding: utf-8 -*-
#
#
# Authors: Guewen Baconnier
# Copyright 2015 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from openerp import models, fields, api, exceptions, _
class SaleOrder(models.Model):
_inherit = 'sale.order'
@api.multi
def _prepare_interest_line(self, interest_amount):
self.ensure_one()
product = self.env.ref('sale_payment_term_interest.'
'product_product_sale_order_interest')
values = {'product_uom_qty': 1,
'order_id': self.id,
'product_id': product.id,
'interest_line': True,
'sequence': 99999,
}
onchanged = self.env['sale.order.line'].product_id_change(
self.pricelist_id.id,
product.id,
qty=1,
uom=product.uom_id.id,
partner_id=self.partner_id.id,
fiscal_position=self.fiscal_position.id)
values.update(onchanged['value'])
values['price_unit'] = interest_amount
return values
@api.multi
def get_interest_value(self):
self.ensure_one()
term = self.payment_term
if not term:
return 0.
if not any(line.interest_rate for line in term.line_ids):
return 0.
line = self._get_interest_line()
if line:
price = line.price_unit * (1 - (line.discount or 0.0) / 100.0)
taxes = line.tax_id.compute_all(price,
line.product_uom_qty,
product=line.product_id,
partner=self.partner_id)
# remove the interest value from the total if there is a value yet
current_interest = taxes['total_included']
else:
current_interest = 0.
interest = term.compute_total_interest(
self.amount_total - current_interest,
)
return interest
@api.multi
def _get_interest_line(self):
for line in self.order_line:
if line.interest_line:
return line
return self.env['sale.order.line'].browse()
@api.multi
def update_interest_line(self):
for order in self:
interest_line = order._get_interest_line()
interest_amount = order.get_interest_value()
values = order._prepare_interest_line(interest_amount)
if interest_line:
if interest_amount:
values.pop('name', None) # keep the current name
interest_line.write(values)
else:
interest_line.unlink()
elif interest_amount:
self.env['sale.order.line'].create(values)
@api.multi
def check_interest_line(self):
self.ensure_one()
interest_amount = self.get_interest_value()
currency = self.currency_id
interest_line = self._get_interest_line()
current_amount = interest_line.price_unit
if currency.compare_amounts(current_amount, interest_amount) != 0:
raise exceptions.Warning(
_('Interest amount differs. Click on "(update interests)" '
'next to the payment terms.')
)
@api.multi
def action_button_confirm(self):
result = super(SaleOrder, self).action_button_confirm()
self.check_interest_line()
return result
@api.model
def create(self, vals):
record = super(SaleOrder, self).create(vals)
record.update_interest_line()
return record
@api.multi
def write(self, vals):
result = super(SaleOrder, self).write(vals)
self.update_interest_line()
return result
class SaleOrderLine(models.Model):
_inherit = 'sale.order.line'
interest_line = fields.Boolean()
|
damdam-s/sale-workflow
|
sale_payment_term_interest/model/sale_order.py
|
Python
|
agpl-3.0
| 4,591
|
def main(req, res):
return ([
(b'Cache-Control', b'no-cache, must-revalidate'),
(b'Pragma', b'no-cache'),
(b'Content-Type', b'application/javascript')],
b'echo_output = "%s";\n' % req.GET[b'msg'])
|
scheib/chromium
|
third_party/blink/web_tests/external/wpt/service-workers/service-worker/resources/import-scripts-echo.py
|
Python
|
bsd-3-clause
| 231
|
import unittest
class X(unittest.TestCase):
def test1(self):
self.assertEqual()
def test2(self):
self.assertEqual()
def test3(self):
self.assertEqual()
def test4(self):
self.assertEqual()
def test5(self):
self.assertEqual()
def test6(self):
self.assertEqual()
def test7(self):
self.assertEqual()
def test8(self):
self.assertEqual()
def test9(self):
self.assertEqual()
def test10(self):
self.assertEqual()
def test11(self):
self.assertEqual()
def test12(self):
self.assertEqual()
def test13(self):
self.assertEqual()
def test14(self):
self.assertEqual()
def test15(self):
self.assertEqual()
def test16(self):
self.assertEqual()
def test17(self):
self.assertEqual()
def test18(self):
self.assertEqual()
|
paplorinc/intellij-community
|
python/testData/editing/pairedParenthesesMultipleCalls.after.py
|
Python
|
apache-2.0
| 934
|
import os
import site
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings_local")
wsgidir = os.path.dirname(__file__)
for path in ['../',
'../..',
'../../apps']:
site.addsitedir(os.path.abspath(os.path.join(wsgidir, path)))
from ..theme_update import application
|
muffinresearch/addons-server
|
services/wsgi/theme_update.py
|
Python
|
bsd-3-clause
| 302
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from code import Code
from model import PropertyType
import cpp_util
from json_parse import OrderedDict
import schema_util
class _TypeDependency(object):
"""Contains information about a dependency a namespace has on a type: the
type's model, and whether that dependency is "hard" meaning that it cannot be
forward declared.
"""
def __init__(self, type_, hard=False):
self.type_ = type_
self.hard = hard
def GetSortKey(self):
return '%s.%s' % (self.type_.namespace.name, self.type_.name)
class CppTypeGenerator(object):
"""Manages the types of properties and provides utilities for getting the
C++ type out of a model.Property
"""
def __init__(self, model, schema_loader, default_namespace=None):
"""Creates a cpp_type_generator. The given root_namespace should be of the
format extensions::api::sub. The generator will generate code suitable for
use in the given model's namespace.
"""
self._default_namespace = default_namespace
if self._default_namespace is None:
self._default_namespace = model.namespaces.values()[0]
self._schema_loader = schema_loader
def GetCppNamespaceName(self, namespace):
"""Gets the mapped C++ namespace name for the given namespace relative to
the root namespace.
"""
return namespace.unix_name
def GetNamespaceStart(self):
"""Get opening self._default_namespace namespace declaration.
"""
return Code().Append('namespace %s {' %
self.GetCppNamespaceName(self._default_namespace))
def GetNamespaceEnd(self):
"""Get closing self._default_namespace namespace declaration.
"""
return Code().Append('} // %s' %
self.GetCppNamespaceName(self._default_namespace))
def GetEnumNoneValue(self, type_):
"""Gets the enum value in the given model.Property indicating no value has
been set.
"""
return '%s_NONE' % self.FollowRef(type_).unix_name.upper()
def GetEnumValue(self, type_, enum_value):
"""Gets the enum value of the given model.Property of the given type.
e.g VAR_STRING
"""
value = '%s_%s' % (self.FollowRef(type_).unix_name.upper(),
cpp_util.Classname(enum_value.name.upper()))
# To avoid collisions with built-in OS_* preprocessor definitions, we add a
# trailing slash to enum names that start with OS_.
if value.startswith("OS_"):
value += "_"
return value
def GetCppType(self, type_, is_ptr=False, is_in_container=False):
"""Translates a model.Property or model.Type into its C++ type.
If REF types from different namespaces are referenced, will resolve
using self._schema_loader.
Use |is_ptr| if the type is optional. This will wrap the type in a
scoped_ptr if possible (it is not possible to wrap an enum).
Use |is_in_container| if the type is appearing in a collection, e.g. a
std::vector or std::map. This will wrap it in the correct type with spacing.
"""
cpp_type = None
if type_.property_type == PropertyType.REF:
ref_type = self._FindType(type_.ref_type)
if ref_type is None:
raise KeyError('Cannot find referenced type: %s' % type_.ref_type)
if self._default_namespace is ref_type.namespace:
cpp_type = ref_type.name
else:
cpp_type = '%s::%s' % (ref_type.namespace.unix_name, ref_type.name)
elif type_.property_type == PropertyType.BOOLEAN:
cpp_type = 'bool'
elif type_.property_type == PropertyType.INTEGER:
cpp_type = 'int'
elif type_.property_type == PropertyType.INT64:
cpp_type = 'int64'
elif type_.property_type == PropertyType.DOUBLE:
cpp_type = 'double'
elif type_.property_type == PropertyType.STRING:
cpp_type = 'std::string'
elif type_.property_type == PropertyType.ENUM:
cpp_type = cpp_util.Classname(type_.name)
elif type_.property_type == PropertyType.ANY:
cpp_type = 'base::Value'
elif (type_.property_type == PropertyType.OBJECT or
type_.property_type == PropertyType.CHOICES):
cpp_type = cpp_util.Classname(type_.name)
elif type_.property_type == PropertyType.FUNCTION:
# Functions come into the json schema compiler as empty objects. We can
# record these as empty DictionaryValues so that we know if the function
# was passed in or not.
cpp_type = 'base::DictionaryValue'
elif type_.property_type == PropertyType.ARRAY:
item_cpp_type = self.GetCppType(type_.item_type, is_in_container=True)
cpp_type = 'std::vector<%s>' % cpp_util.PadForGenerics(item_cpp_type)
elif type_.property_type == PropertyType.BINARY:
cpp_type = 'std::string'
else:
raise NotImplementedError('Cannot get type of %s' % type_.property_type)
# HACK: optional ENUM is represented elsewhere with a _NONE value, so it
# never needs to be wrapped in pointer shenanigans.
# TODO(kalman): change this - but it's an exceedingly far-reaching change.
if not self.FollowRef(type_).property_type == PropertyType.ENUM:
if is_in_container and (is_ptr or not self.IsCopyable(type_)):
cpp_type = 'linked_ptr<%s>' % cpp_util.PadForGenerics(cpp_type)
elif is_ptr:
cpp_type = 'scoped_ptr<%s>' % cpp_util.PadForGenerics(cpp_type)
return cpp_type
def IsCopyable(self, type_):
return not (self.FollowRef(type_).property_type in (PropertyType.ANY,
PropertyType.ARRAY,
PropertyType.OBJECT,
PropertyType.CHOICES))
def GenerateForwardDeclarations(self):
"""Returns the forward declarations for self._default_namespace.
"""
c = Code()
for namespace, dependencies in self._NamespaceTypeDependencies().items():
c.Append('namespace %s {' % namespace.unix_name)
for dependency in dependencies:
# No point forward-declaring hard dependencies.
if dependency.hard:
continue
# Add more ways to forward declare things as necessary.
if dependency.type_.property_type in (PropertyType.CHOICES,
PropertyType.OBJECT):
c.Append('struct %s;' % dependency.type_.name)
c.Append('}')
return c
def GenerateIncludes(self, include_soft=False):
"""Returns the #include lines for self._default_namespace.
"""
c = Code()
for namespace, dependencies in self._NamespaceTypeDependencies().items():
for dependency in dependencies:
if dependency.hard or include_soft:
c.Append('#include "%s/%s.h"' % (namespace.source_file_dir,
namespace.unix_name))
return c
def _FindType(self, full_name):
"""Finds the model.Type with name |qualified_name|. If it's not from
|self._default_namespace| then it needs to be qualified.
"""
namespace = self._schema_loader.ResolveType(full_name,
self._default_namespace)
if namespace is None:
raise KeyError('Cannot resolve type %s. Maybe it needs a prefix '
'if it comes from another namespace?' % full_name)
return namespace.types[schema_util.StripNamespace(full_name)]
def FollowRef(self, type_):
"""Follows $ref link of types to resolve the concrete type a ref refers to.
If the property passed in is not of type PropertyType.REF, it will be
returned unchanged.
"""
if type_.property_type != PropertyType.REF:
return type_
return self.FollowRef(self._FindType(type_.ref_type))
def _NamespaceTypeDependencies(self):
"""Returns a dict ordered by namespace name containing a mapping of
model.Namespace to every _TypeDependency for |self._default_namespace|,
sorted by the type's name.
"""
dependencies = set()
for function in self._default_namespace.functions.values():
for param in function.params:
dependencies |= self._TypeDependencies(param.type_,
hard=not param.optional)
if function.callback:
for param in function.callback.params:
dependencies |= self._TypeDependencies(param.type_,
hard=not param.optional)
for type_ in self._default_namespace.types.values():
for prop in type_.properties.values():
dependencies |= self._TypeDependencies(prop.type_,
hard=not prop.optional)
for event in self._default_namespace.events.values():
for param in event.params:
dependencies |= self._TypeDependencies(param.type_,
hard=not param.optional)
# Make sure that the dependencies are returned in alphabetical order.
dependency_namespaces = OrderedDict()
for dependency in sorted(dependencies, key=_TypeDependency.GetSortKey):
namespace = dependency.type_.namespace
if namespace is self._default_namespace:
continue
if namespace not in dependency_namespaces:
dependency_namespaces[namespace] = []
dependency_namespaces[namespace].append(dependency)
return dependency_namespaces
def _TypeDependencies(self, type_, hard=False):
"""Gets all the type dependencies of a property.
"""
deps = set()
if type_.property_type == PropertyType.REF:
deps.add(_TypeDependency(self._FindType(type_.ref_type), hard=hard))
elif type_.property_type == PropertyType.ARRAY:
# Non-copyable types are not hard because they are wrapped in linked_ptrs
# when generated. Otherwise they're typedefs, so they're hard (though we
# could generate those typedefs in every dependent namespace, but that
# seems weird).
deps = self._TypeDependencies(type_.item_type,
hard=self.IsCopyable(type_.item_type))
elif type_.property_type == PropertyType.CHOICES:
for type_ in type_.choices:
deps |= self._TypeDependencies(type_, hard=self.IsCopyable(type_))
elif type_.property_type == PropertyType.OBJECT:
for p in type_.properties.values():
deps |= self._TypeDependencies(p.type_, hard=not p.optional)
return deps
def GeneratePropertyValues(self, property, line, nodoc=False):
"""Generates the Code to display all value-containing properties.
"""
c = Code()
if not nodoc:
c.Comment(property.description)
if property.value is not None:
c.Append(line % {
"type": self.GetCppType(property.type_),
"name": property.name,
"value": property.value
})
else:
has_child_code = False
c.Sblock('namespace %s {' % property.name)
for child_property in property.type_.properties.values():
child_code = self.GeneratePropertyValues(child_property,
line,
nodoc=nodoc)
if child_code:
has_child_code = True
c.Concat(child_code)
c.Eblock('} // namespace %s' % property.name)
if not has_child_code:
c = None
return c
|
DirtyUnicorns/android_external_chromium-org
|
tools/json_schema_compiler/cpp_type_generator.py
|
Python
|
bsd-3-clause
| 11,425
|
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script is copied from
https://chromium.googlesource.com/infra/infra.git/+/master/bootstrap
"""
import datetime
import logging
import optparse
import os
import re
import shutil
import sys
import time
import tempfile
import urllib2
import zipfile
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
def get_gae_sdk_version(gae_path):
"""Returns the installed GAE SDK version or None."""
version_path = os.path.join(gae_path, 'VERSION')
if os.path.isfile(version_path):
values = dict(
map(lambda x: x.strip(), l.split(':'))
for l in open(version_path) if ':' in l)
if 'release' in values:
return values['release'].strip('"')
def get_latest_gae_sdk_url(name):
"""Returns the url to get the latest GAE SDK and its version."""
url = 'https://cloud.google.com/appengine/downloads.html'
logging.debug('%s', url)
content = urllib2.urlopen(url).read()
regexp = (
r'(https\:\/\/storage.googleapis.com\/appengine-sdks\/featured\/'
+ re.escape(name) + r'[0-9\.]+?\.zip)')
m = re.search(regexp, content)
url = m.group(1)
# Calculate the version from the url.
new_version = re.search(re.escape(name) + r'(.+?).zip', url).group(1)
# Upgrade to https
return url.replace('http://', 'https://'), new_version
def extract_zip(z, root_path):
"""Extracts files in a zipfile but keep the executable bits."""
count = 0
for f in z.infolist():
perm = (f.external_attr >> 16L) & 0777
mtime = time.mktime(datetime.datetime(*f.date_time).timetuple())
filepath = os.path.join(root_path, f.filename)
logging.debug('Extracting %s', f.filename)
if f.filename.endswith('/'):
os.mkdir(filepath, perm)
else:
z.extract(f, root_path)
os.chmod(filepath, perm)
count += 1
os.utime(filepath, (mtime, mtime))
print('Extracted %d files' % count)
def install_latest_gae_sdk(root_path, fetch_go, dry_run):
if fetch_go:
rootdir = 'go_appengine'
if sys.platform == 'darwin':
name = 'go_appengine_sdk_darwin_amd64-'
else:
# Add other platforms as needed.
name = 'go_appengine_sdk_linux_amd64-'
else:
rootdir = 'google_appengine'
name = 'google_appengine_'
# The zip file already contains 'google_appengine' (for python) or
# 'go_appengine' (for go) in its path so it's a bit
# awkward to unzip otherwise. Hard code the path in for now.
gae_path = os.path.join(root_path, rootdir)
print('Looking up path %s' % gae_path)
version = get_gae_sdk_version(gae_path)
if version:
print('Found installed version %s' % version)
else:
print('Didn\'t find an SDK')
url, new_version = get_latest_gae_sdk_url(name)
print('New version is %s' % new_version)
if version == new_version:
return 0
if os.path.isdir(gae_path):
print('Removing previous version')
if not dry_run:
shutil.rmtree(gae_path)
print('Fetching %s' % url)
if not dry_run:
u = urllib2.urlopen(url)
with tempfile.NamedTemporaryFile() as f:
while True:
chunk = u.read(2 ** 20)
if not chunk:
break
f.write(chunk)
# Assuming we're extracting there. In fact, we have no idea.
print('Extracting into %s' % gae_path)
z = zipfile.ZipFile(f, 'r')
try:
extract_zip(z, root_path)
finally:
z.close()
return 0
def main():
parser = optparse.OptionParser(prog='python -m %s' % __package__)
parser.add_option('-v', '--verbose', action='store_true')
parser.add_option(
'-g', '--go', action='store_true', help='Defaults to python SDK')
parser.add_option(
'-d', '--dest', default=os.path.dirname(BASE_DIR), help='Output')
parser.add_option('--dry-run', action='store_true', help='Do not download')
options, args = parser.parse_args()
if args:
parser.error('Unsupported args: %s' % ' '.join(args))
logging.basicConfig(level=logging.DEBUG if options.verbose else logging.ERROR)
return install_latest_gae_sdk(
os.path.abspath(options.dest), options.go, options.dry_run)
if __name__ == '__main__':
sys.exit(main())
|
Teamxrtc/webrtc-streaming-node
|
third_party/depot_tools/testing_support/get_appengine.py
|
Python
|
mit
| 4,274
|
#!/usr/bin/python
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_elb_facts
short_description: Gather facts about EC2 Elastic Load Balancers in AWS
description:
- Gather facts about EC2 Elastic Load Balancers in AWS
version_added: "2.0"
author:
- "Michael Schultz (github.com/mjschultz)"
- "Fernando Jose Pando (@nand0p)"
options:
names:
description:
- List of ELB names to gather facts about. Pass this option to gather facts about a set of ELBs, otherwise, all ELBs are returned.
aliases: ['elb_ids', 'ec2_elbs']
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Output format tries to match ec2_elb_lb module input parameters
# Gather facts about all ELBs
- action:
module: ec2_elb_facts
register: elb_facts
- action:
module: debug
msg: "{{ item.dns_name }}"
with_items: "{{ elb_facts.elbs }}"
# Gather facts about a particular ELB
- action:
module: ec2_elb_facts
names: frontend-prod-elb
register: elb_facts
- action:
module: debug
msg: "{{ elb_facts.elbs.0.dns_name }}"
# Gather facts about a set of ELBs
- action:
module: ec2_elb_facts
names:
- frontend-prod-elb
- backend-prod-elb
register: elb_facts
- action:
module: debug
msg: "{{ item.dns_name }}"
with_items: "{{ elb_facts.elbs }}"
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (
AWSRetry,
connect_to_aws,
ec2_argument_spec,
get_aws_connection_info,
)
try:
import boto.ec2.elb
from boto.ec2.tag import Tag
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
class ElbInformation(object):
"""Handles ELB information."""
def __init__(self,
module,
names,
region,
**aws_connect_params):
self.module = module
self.names = names
self.region = region
self.aws_connect_params = aws_connect_params
self.connection = self._get_elb_connection()
def _get_tags(self, elbname):
params = {'LoadBalancerNames.member.1': elbname}
elb_tags = self.connection.get_list('DescribeTags', params, [('member', Tag)])
return dict((tag.Key, tag.Value) for tag in elb_tags if hasattr(tag, 'Key'))
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def _get_elb_connection(self):
return connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params)
def _get_elb_listeners(self, listeners):
listener_list = []
for listener in listeners:
listener_dict = {
'load_balancer_port': listener[0],
'instance_port': listener[1],
'protocol': listener[2],
}
try:
ssl_certificate_id = listener[4]
except IndexError:
pass
else:
if ssl_certificate_id:
listener_dict['ssl_certificate_id'] = ssl_certificate_id
listener_list.append(listener_dict)
return listener_list
def _get_health_check(self, health_check):
protocol, port_path = health_check.target.split(':')
try:
port, path = port_path.split('/', 1)
path = '/{0}'.format(path)
except ValueError:
port = port_path
path = None
health_check_dict = {
'ping_protocol': protocol.lower(),
'ping_port': int(port),
'response_timeout': health_check.timeout,
'interval': health_check.interval,
'unhealthy_threshold': health_check.unhealthy_threshold,
'healthy_threshold': health_check.healthy_threshold,
}
if path:
health_check_dict['ping_path'] = path
return health_check_dict
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def _get_elb_info(self, elb):
elb_info = {
'name': elb.name,
'zones': elb.availability_zones,
'dns_name': elb.dns_name,
'canonical_hosted_zone_name': elb.canonical_hosted_zone_name,
'canonical_hosted_zone_name_id': elb.canonical_hosted_zone_name_id,
'hosted_zone_name': elb.canonical_hosted_zone_name,
'hosted_zone_id': elb.canonical_hosted_zone_name_id,
'instances': [instance.id for instance in elb.instances],
'listeners': self._get_elb_listeners(elb.listeners),
'scheme': elb.scheme,
'security_groups': elb.security_groups,
'health_check': self._get_health_check(elb.health_check),
'subnets': elb.subnets,
'instances_inservice': [],
'instances_inservice_count': 0,
'instances_outofservice': [],
'instances_outofservice_count': 0,
'instances_inservice_percent': 0.0,
'tags': self._get_tags(elb.name)
}
if elb.vpc_id:
elb_info['vpc_id'] = elb.vpc_id
if elb.instances:
instance_health = self.connection.describe_instance_health(elb.name)
elb_info['instances_inservice'] = [inst.instance_id for inst in instance_health if inst.state == 'InService']
elb_info['instances_inservice_count'] = len(elb_info['instances_inservice'])
elb_info['instances_outofservice'] = [inst.instance_id for inst in instance_health if inst.state == 'OutOfService']
elb_info['instances_outofservice_count'] = len(elb_info['instances_outofservice'])
try:
elb_info['instances_inservice_percent'] = (
float(elb_info['instances_inservice_count']) /
float(elb_info['instances_inservice_count'] + elb_info['instances_outofservice_count'])
) * 100.
except ZeroDivisionError:
elb_info['instances_inservice_percent'] = 0.
return elb_info
def list_elbs(self):
elb_array, token = [], None
get_elb_with_backoff = AWSRetry.backoff(tries=5, delay=5, backoff=2.0)(self.connection.get_all_load_balancers)
while True:
all_elbs = get_elb_with_backoff(marker=token)
token = all_elbs.next_marker
if all_elbs:
if self.names:
for existing_lb in all_elbs:
if existing_lb.name in self.names:
elb_array.append(existing_lb)
else:
elb_array.extend(all_elbs)
else:
break
if token is None:
break
return list(map(self._get_elb_info, elb_array))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
names={'default': [], 'type': 'list'}
)
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
try:
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg="region must be specified")
names = module.params['names']
elb_information = ElbInformation(
module, names, region, **aws_connect_params)
ec2_facts_result = dict(changed=False,
elbs=elb_information.list_elbs())
except BotoServerError as err:
module.fail_json(msg="{0}: {1}".format(err.error_code, err.error_message),
exception=traceback.format_exc())
module.exit_json(**ec2_facts_result)
if __name__ == '__main__':
main()
|
hryamzik/ansible
|
lib/ansible/modules/cloud/amazon/ec2_elb_facts.py
|
Python
|
gpl-3.0
| 8,629
|
# Reads a list of urls and an old list of services and outputs the list
# of hostnames that are missing from the old list of services.
#
# List of urls can be generated from the mitro db using:
#
# psql -P tuples_only=on -c "select hostname from secrets" mitro
#
from collections import defaultdict
import json
import operator
import subprocess
import sys
import urllib2
from urlparse import urlparse
def read_urls_file(filename):
f = open(filename, 'r')
urls_list = [line.strip() for line in f]
f.close()
return urls_list
def read_service_list(filename):
f = open(filename, 'r')
service_list = json.loads(f.read())
f.close()
return service_list
def write_hosts_file(filename, hosts):
f = open(filename, 'w')
for host in hosts:
f.write('http://' + host + '\n')
f.close()
def get_canonical_host(url):
host = urlparse(url).netloc
if host.startswith('www.'):
return host[4:]
else:
return host
def main():
if len(sys.argv) != 4:
print 'usage: old_service_list urls output_file'
urls_list = read_urls_file(sys.argv[2]);
service_list = read_service_list(sys.argv[1])
service_map = defaultdict(int)
for service in service_list:
service_map[get_canonical_host(service['login_url'])] += 1
hosts = []
for url in urls_list:
host = get_canonical_host(url)
if host not in service_map:
hosts.append(host)
service_map[host] += 1
write_hosts_file(sys.argv[3], hosts)
if __name__ == '__main__':
main()
|
ssgeejr/mitropm
|
mitro-core/tools/icons_crawler/build_update_list.py
|
Python
|
gpl-3.0
| 1,567
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the hybrid tensor forest model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
# pylint: disable=unused-import
import tensorflow as tf
from tensorflow.contrib.tensor_forest.hybrid.python.models import forest_to_data_then_nn
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.framework.ops import Operation
from tensorflow.python.framework.ops import Tensor
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import googletest
class ForestToDataThenNNTest(test_util.TensorFlowTestCase):
def setUp(self):
self.params = tensor_forest.ForestHParams(
num_classes=2,
num_features=31,
layer_size=11,
num_layers=13,
num_trees=3,
connection_probability=0.1,
hybrid_tree_depth=4,
regularization_strength=0.01,
regularization="",
base_random_seed=10,
feature_bagging_fraction=1.0,
learning_rate=0.01,
weight_init_mean=0.0,
weight_init_std=0.1)
self.params.regression = False
self.params.num_nodes = 2**self.params.hybrid_tree_depth - 1
self.params.num_leaves = 2**(self.params.hybrid_tree_depth - 1)
self.params.num_features_per_node = (
self.params.feature_bagging_fraction * self.params.num_features)
def testInferenceConstruction(self):
# pylint: disable=W0612
data = constant_op.constant(
[[random.uniform(-1, 1) for i in range(self.params.num_features)]
for _ in range(100)])
with variable_scope.variable_scope(
"ForestToDataThenNNTest_testInferenceContruction"):
graph_builder = forest_to_data_then_nn.ForestToDataThenNN(self.params)
graph = graph_builder.inference_graph(data, None)
self.assertTrue(isinstance(graph, Tensor))
def testTrainingConstruction(self):
# pylint: disable=W0612
data = constant_op.constant(
[[random.uniform(-1, 1) for i in range(self.params.num_features)]
for _ in range(100)])
labels = [1 for _ in range(100)]
with variable_scope.variable_scope(
"ForestToDataThenNNTest.testTrainingContruction"):
graph_builder = forest_to_data_then_nn.ForestToDataThenNN(self.params)
graph = graph_builder.training_graph(data, labels, None)
self.assertTrue(isinstance(graph, Operation))
if __name__ == "__main__":
googletest.main()
|
laosiaudi/tensorflow
|
tensorflow/contrib/tensor_forest/hybrid/python/models/forest_to_data_then_nn_test.py
|
Python
|
apache-2.0
| 3,269
|
"""
Useful form fields for use with SQLAlchemy ORM.
"""
import operator
from wtforms import widgets
from wtforms.fields import SelectFieldBase
from wtforms.validators import ValidationError
from .tools import get_primary_key
from flask_admin._compat import text_type, string_types
from flask_admin.form import FormOpts
from flask_admin.model.fields import InlineFieldList, InlineModelFormField
from flask_admin.model.widgets import InlineFormWidget
try:
from sqlalchemy.orm.util import identity_key
has_identity_key = True
except ImportError:
has_identity_key = False
class QuerySelectField(SelectFieldBase):
"""
Will display a select drop-down field to choose between ORM results in a
sqlalchemy `Query`. The `data` property actually will store/keep an ORM
model instance, not the ID. Submitting a choice which is not in the query
will result in a validation error.
This field only works for queries on models whose primary key column(s)
have a consistent string representation. This means it mostly only works
for those composed of string, unicode, and integer types. For the most
part, the primary keys will be auto-detected from the model, alternately
pass a one-argument callable to `get_pk` which can return a unique
comparable key.
The `query` property on the field can be set from within a view to assign
a query per-instance to the field. If the property is not set, the
`query_factory` callable passed to the field constructor will be called to
obtain a query.
Specify `get_label` to customize the label associated with each option. If
a string, this is the name of an attribute on the model object to use as
the label text. If a one-argument callable, this callable will be passed
model instance and expected to return the label text. Otherwise, the model
object's `__str__` or `__unicode__` will be used.
If `allow_blank` is set to `True`, then a blank choice will be added to the
top of the list. Selecting this choice will result in the `data` property
being `None`. The label for this blank choice can be set by specifying the
`blank_text` parameter.
"""
widget = widgets.Select()
def __init__(self, label=None, validators=None, query_factory=None,
get_pk=None, get_label=None, allow_blank=False,
blank_text=u'', **kwargs):
super(QuerySelectField, self).__init__(label, validators, **kwargs)
self.query_factory = query_factory
if get_pk is None:
if not has_identity_key:
raise Exception(u'The sqlalchemy identity_key function could not be imported.')
self.get_pk = get_pk_from_identity
else:
self.get_pk = get_pk
if get_label is None:
self.get_label = lambda x: x
elif isinstance(get_label, string_types):
self.get_label = operator.attrgetter(get_label)
else:
self.get_label = get_label
self.allow_blank = allow_blank
self.blank_text = blank_text
self.query = None
self._object_list = None
def _get_data(self):
if self._formdata is not None:
for pk, obj in self._get_object_list():
if pk == self._formdata:
self._set_data(obj)
break
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def _get_object_list(self):
if self._object_list is None:
query = self.query or self.query_factory()
get_pk = self.get_pk
self._object_list = [(text_type(get_pk(obj)), obj) for obj in query]
return self._object_list
def iter_choices(self):
if self.allow_blank:
yield (u'__None', self.blank_text, self.data is None)
for pk, obj in self._get_object_list():
yield (pk, self.get_label(obj), obj == self.data)
def process_formdata(self, valuelist):
if valuelist:
if self.allow_blank and valuelist[0] == u'__None':
self.data = None
else:
self._data = None
self._formdata = valuelist[0]
def pre_validate(self, form):
if not self.allow_blank or self.data is not None:
for pk, obj in self._get_object_list():
if self.data == obj:
break
else:
raise ValidationError(self.gettext(u'Not a valid choice'))
class QuerySelectMultipleField(QuerySelectField):
"""
Very similar to QuerySelectField with the difference that this will
display a multiple select. The data property will hold a list with ORM
model instances and will be an empty list when no value is selected.
If any of the items in the data list or submitted form data cannot be
found in the query, this will result in a validation error.
"""
widget = widgets.Select(multiple=True)
def __init__(self, label=None, validators=None, default=None, **kwargs):
if default is None:
default = []
super(QuerySelectMultipleField, self).__init__(label, validators, default=default, **kwargs)
self._invalid_formdata = False
def _get_data(self):
formdata = self._formdata
if formdata is not None:
data = []
for pk, obj in self._get_object_list():
if not formdata:
break
elif pk in formdata:
formdata.remove(pk)
data.append(obj)
if formdata:
self._invalid_formdata = True
self._set_data(data)
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def iter_choices(self):
for pk, obj in self._get_object_list():
yield (pk, self.get_label(obj), obj in self.data)
def process_formdata(self, valuelist):
self._formdata = set(valuelist)
def pre_validate(self, form):
if self._invalid_formdata:
raise ValidationError(self.gettext(u'Not a valid choice'))
elif self.data:
obj_list = list(x[1] for x in self._get_object_list())
for v in self.data:
if v not in obj_list:
raise ValidationError(self.gettext(u'Not a valid choice'))
class InlineModelFormList(InlineFieldList):
"""
Customized inline model form list field.
"""
form_field_type = InlineModelFormField
"""
Form field type. Override to use custom field for each inline form
"""
def __init__(self, form, session, model, prop, inline_view, **kwargs):
"""
Default constructor.
:param form:
Form for the related model
:param session:
SQLAlchemy session
:param model:
Related model
:param prop:
Related property name
:param inline_view:
Inline view
"""
self.form = form
self.session = session
self.model = model
self.prop = prop
self.inline_view = inline_view
self._pk = get_primary_key(model)
# Generate inline form field
form_opts = FormOpts(widget_args=getattr(inline_view, 'form_widget_args', None),
form_rules=inline_view._form_rules)
form_field = self.form_field_type(form, self._pk, form_opts=form_opts)
super(InlineModelFormList, self).__init__(form_field, **kwargs)
def display_row_controls(self, field):
return field.get_pk() is not None
def populate_obj(self, obj, name):
values = getattr(obj, name, None)
if values is None:
return
# Create primary key map
pk_map = dict((str(getattr(v, self._pk)), v) for v in values)
# Handle request data
for field in self.entries:
field_id = field.get_pk()
if field_id in pk_map:
model = pk_map[field_id]
if self.should_delete(field):
self.session.delete(model)
continue
else:
model = self.model()
values.append(model)
field.populate_obj(model, None)
self.inline_view.on_model_change(field, model)
def get_pk_from_identity(obj):
# TODO: Remove me
cls, key = identity_key(instance=obj)
return u':'.join(text_type(x) for x in key)
|
hexlism/css_platform
|
sleepyenv/lib/python2.7/site-packages/Flask_Admin-1.2.0-py2.7.egg/flask_admin/contrib/sqla/fields.py
|
Python
|
apache-2.0
| 8,740
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in array_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from math import ceil
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
@ops.RegisterGradient("Pack")
def _PackGrad(op, grad):
"""Gradient for pack op."""
return array_ops.unstack(grad, num=op.get_attr("N"), axis=op.get_attr("axis"))
@ops.RegisterGradient("Unpack")
def _UnpackGrad(op, *grads):
"""Gradient for unpack op."""
return array_ops.stack(grads, axis=op.get_attr("axis"))
def _ConcatGradHelper(op, grad, start_value_index, end_value_index, dim_index):
"""Gradient for concat op.
Args:
op: An operation.
grad: `Tensor` or `IndexedSlices` representing the gradients with respect
to each output of the op.
start_value_index: An integer index of the first value in the op.inputs.
end_value_index: An integer index of the last value in the op.inputs.
dim_index: An interger index of concat_dim or axis parameter in op.inputs.
Returns:
Tensors represending the partial gradients with respect to each input
of the op.
Raises:
ValueError: if concat_dim/axis is not statically known.
"""
def _CreateDenseMaskAndBegin(sizes, concat_dim):
"""Create variables for iteratively slicing a dense gradients tensor."""
# Since shape is 1-D, shape_of_shape = [rank-of-inputs]
shape_of_shape = array_ops.shape(sizes[0])
# Make a vector of length equal to the input's dimensions,
# with 0's everywhere and 1 in the concat dim position.
# Note: Can't use sparse_to_dense since it isn't GPU-capable (for now)
mask = array_ops.concat([
array_ops.fill(array_ops.expand_dims(concat_dim, 0), 0), [1],
array_ops.fill(shape_of_shape - concat_dim - 1, 0)
], 0)
begin = array_ops.fill(shape_of_shape, 0)
return mask, begin
def _ExtractInputShapes(inputs):
"""Extract the shapes of a set of input tensors."""
sizes = []
fully_known = True
for x in inputs:
input_shape = array_ops.shape(x)
if not isinstance(input_shape,
ops.Tensor) or input_shape.op.type != "Const":
fully_known = False
break
else:
sizes.append(input_shape)
if fully_known:
return sizes
else:
return array_ops.shape_n(inputs)
# Degenerate concatenation, just return grad.
if len(op.inputs) == 2:
return grad + [None] if end_value_index <= dim_index else [None] + grad
concat_dim = op.inputs[dim_index]
input_values = op.inputs[start_value_index:end_value_index]
# Using mod here for convenience since concat_dim is already verified
# in concat implementation to be within the allowed [-rank, rank) range.
non_neg_concat_dim = concat_dim % array_ops.rank(input_values[0])
out_grads = []
if isinstance(grad, ops.Tensor):
# Get the inputs' tensor shapes
sizes = _ExtractInputShapes(input_values)
# The magic number of 16 was found through benchmarking a range of sizes
# on CPUs and a Maxwell TitanX. A speedup was seen in a large majority of
# cases when switching implementations at N=16, but it is possible that
# there will be a small number of performance regressions.
# pylint: disable=protected-access
if len(sizes) > 16:
# extract the size of each input along the concat dimension
sizes = array_ops.squeeze(
array_ops.slice(
array_ops.stack(
sizes, axis=1), [non_neg_concat_dim, 0], [1, -1]))
out_grads = array_ops.split(grad, sizes, non_neg_concat_dim)
else:
offset = gen_array_ops._concat_offset(non_neg_concat_dim, sizes)
for (begin, size) in zip(offset, sizes):
out_grads.append(array_ops.slice(grad, begin, size))
# pylint: enable=protected-access
elif isinstance(grad, ops.IndexedSlices):
concat_dim_static = tensor_util.constant_value(concat_dim)
if concat_dim_static is None:
raise ValueError("Can only compute IndexedSlices gradient with "
"statically-known concat_dim")
if concat_dim_static < 0:
rank = tensor_util.constant_value(array_ops.rank(input_values[0]))
if rank is None:
raise ValueError("Can only compute IndexedSlices gradient with "
"negative concat_dim when first value rank is "
"statically-known.")
concat_dim_static %= rank
# Get the inputs' tensor shapes
sizes = [array_ops.shape(x) for x in input_values]
if concat_dim_static > 0:
# IndexedSlices, non_neg_concat_dim > 0. Each input gets IndexedSlices
# gradients with all the indices, but with grad.values sliced accordingly.
# This is like the Tensor case, except shape(grad.values)[0] is not equal
# to shape(sizes[i])[0], since only a subset of the dim-0 values are
# stored.
mask, begin = _CreateDenseMaskAndBegin(sizes, non_neg_concat_dim)
for size in sizes:
new_values = array_ops.slice(
grad.values, begin,
array_ops.concat([[-1], array_ops.slice(size, [1], [-1])], 0))
out_grads.append(
ops.IndexedSlices(new_values, grad.indices, size))
# Lint complains begin = begin + ...
begin = math_ops.add(begin, size * mask)
else:
# IndexedSlices, concat_dim == 0. Each input gets IndexedSlices gradients
# only for the relevant indices.
start = constant_op.constant(0, dtype=grad.indices.dtype)
for size in sizes:
size_concat_dim = array_ops.gather(size, non_neg_concat_dim)
if size_concat_dim.dtype != grad.indices.dtype:
size_concat_dim = math_ops.cast(size_concat_dim,
dtype=grad.indices.dtype)
end = start + size_concat_dim
# Compute the 1-D Tensor of indices relevant for this input.
indices_to_select = array_ops.squeeze(
array_ops.where(math_ops.logical_and(grad.indices >= start,
grad.indices < end)),
squeeze_dims=[1])
new_indices = array_ops.gather(grad.indices, indices_to_select) - start
new_values = array_ops.gather(grad.values, indices_to_select)
out_grads.append(
ops.IndexedSlices(new_values, new_indices, size))
start = end
else:
raise TypeError("Expected Tensor or IndexedSlices, got %s" % type(grad))
return (out_grads + [None] if end_value_index <= dim_index
else [None] + out_grads)
@ops.RegisterGradient("Concat")
def _ConcatGrad(op, grad):
return _ConcatGradHelper(
op, grad, start_value_index=1, end_value_index=len(op.inputs),
dim_index=0)
@ops.RegisterGradient("ConcatV2")
def _ConcatGradV2(op, grad):
return _ConcatGradHelper(
op, grad, start_value_index=0, end_value_index=-1, dim_index=-1)
ops.NotDifferentiable("ConcatOffset")
@ops.RegisterGradient("Slice")
def _SliceGrad(op, grad):
"""Gradient for Slice op."""
# Create an Nx2 padding where the first column represents how many
# zeros are to be prepended for each dimension, and the second
# column indicates how many zeros are appended.
#
# The number of zeros to append is the shape of the input
# elementwise-subtracted by both the begin vector and sizes vector.
#
# Some more reshaping is needed to assemble this tensor with the
# right dimensions.
input_vec = op.inputs[0]
begin_vec = op.inputs[1]
input_rank = array_ops.rank(input_vec)
slice_size = array_ops.shape(op.outputs[0])
shape = array_ops.stack([input_rank, 1])
before_pad = array_ops.reshape(begin_vec, shape)
after_pad = array_ops.reshape(
array_ops.shape(input_vec) - slice_size - begin_vec, shape)
paddings = array_ops.concat([before_pad, after_pad], 1)
return array_ops.pad(grad, paddings), None, None
@ops.RegisterGradient("StridedSlice")
def _StridedSliceGrad(op, grad):
"""Gradient for StridedSlice op."""
x = array_ops.shape(op.inputs[0])
begin = op.inputs[1]
end = op.inputs[2]
strides = op.inputs[3]
return array_ops.strided_slice_grad(
x,
begin,
end,
strides,
grad,
begin_mask=op.get_attr("begin_mask"),
end_mask=op.get_attr("end_mask"),
ellipsis_mask=op.get_attr("ellipsis_mask"),
new_axis_mask=op.get_attr("new_axis_mask"),
shrink_axis_mask=op.get_attr("shrink_axis_mask")), None, None, None
@ops.RegisterGradient("StridedSliceGrad")
def _StridedSliceGradGrad(op, grad):
"""Gradient for StridedSliceGrad op."""
begin = op.inputs[1]
end = op.inputs[2]
strides = op.inputs[3]
return None, None, None, None, array_ops.strided_slice(
grad,
begin,
end,
strides,
begin_mask=op.get_attr("begin_mask"),
end_mask=op.get_attr("end_mask"),
ellipsis_mask=op.get_attr("ellipsis_mask"),
new_axis_mask=op.get_attr("new_axis_mask"),
shrink_axis_mask=op.get_attr("shrink_axis_mask"))
@ops.RegisterGradient("Split")
def _SplitGrad(op, *grads):
return None, array_ops.concat(list(grads), op.inputs[0])
@ops.RegisterGradient("SplitV")
def _SplitVGrad(op, *grads):
returnval = array_ops.concat(list(grads), op.inputs[2])
returnval = [returnval] + [None,] * (len(op.inputs) - 1)
return returnval
ops.NotDifferentiable("Const")
@ops.RegisterGradient("Diag")
def _DiagGrad(_, grad):
return array_ops.diag_part(grad)
@ops.RegisterGradient("DiagPart")
def _DiagPartGrad(_, grad):
return array_ops.diag(grad)
@ops.RegisterGradient("MatrixDiag")
def _MatrixDiagGrad(_, grad):
return array_ops.matrix_diag_part(grad)
@ops.RegisterGradient("MatrixDiagPart")
def _MatrixDiagPartGrad(op, grad):
matrix_shape = op.inputs[0].get_shape()[-2:]
if matrix_shape.is_fully_defined() and matrix_shape[0] == matrix_shape[1]:
return array_ops.matrix_diag(grad)
else:
return array_ops.matrix_set_diag(array_ops.zeros_like(op.inputs[0]), grad)
@ops.RegisterGradient("MatrixSetDiag")
def _MatrixSetDiagGrad(op, grad):
"""Gradient for MatrixSetDiag."""
input_shape = op.inputs[0].get_shape().merge_with(grad.get_shape())
diag_shape = op.inputs[1].get_shape()
batch_shape = input_shape[:-2].merge_with(diag_shape[:-1])
matrix_shape = input_shape[-2:]
if batch_shape.is_fully_defined() and matrix_shape.is_fully_defined():
diag_shape = batch_shape.as_list() + [min(matrix_shape.as_list())]
else:
with ops.colocate_with(grad):
grad_shape = array_ops.shape(grad)
grad_rank = array_ops.rank(grad)
batch_shape = array_ops.slice(grad_shape, [0], [grad_rank - 2])
matrix_shape = array_ops.slice(grad_shape, [grad_rank - 2], [2])
min_dim = math_ops.reduce_min(matrix_shape)
diag_shape = array_ops.concat([batch_shape, [min_dim]], 0)
grad_input = array_ops.matrix_set_diag(
grad, array_ops.zeros(
diag_shape, dtype=grad.dtype))
grad_diag = array_ops.matrix_diag_part(grad)
return (grad_input, grad_diag)
@ops.RegisterGradient("MatrixBandPart")
def _MatrixBandPartGrad(op, grad):
num_lower = op.inputs[1]
num_upper = op.inputs[2]
return (array_ops.matrix_band_part(grad, num_lower, num_upper), None, None)
# Edit Distance has no gradient (but can be used to eval seq2seq or CTC).
ops.NotDifferentiable("EditDistance")
@ops.RegisterGradient("Fill")
def _FillGrad(_, grad):
return None, math_ops.reduce_sum(grad)
ops.NotDifferentiable("ZerosLike")
ops.NotDifferentiable("OnesLike")
@ops.RegisterGradient("PreventGradient")
def _PreventGradientGrad(op, _):
raise LookupError(
"Gradient explicitly disabled. Reason: %s" % op.get_attr("message"))
@ops.RegisterGradient("Gather")
def _GatherGrad(op, grad):
"""Gradient for Gather op."""
# params can be large, so colocate the shape calculation with it.
params = op.inputs[0]
with ops.colocate_with(params):
params_shape = array_ops.shape(params)
# Build appropriately shaped IndexedSlices
indices = op.inputs[1]
size = array_ops.expand_dims(array_ops.size(indices), 0)
values_shape = array_ops.concat([size, params_shape[1:]], 0)
values = array_ops.reshape(grad, values_shape)
indices = array_ops.reshape(indices, size)
return [ops.IndexedSlices(values, indices, params_shape), None]
@ops.RegisterGradient("GatherNd")
def _GatherNdGrad(op, grad):
ref = op.inputs[0]
indices = op.inputs[1]
ref_shape = array_ops.shape(ref, out_type=indices.dtype)
ref_grad = array_ops.scatter_nd(indices, grad, ref_shape)
return [ref_grad, None]
@ops.RegisterGradient("CheckNumerics")
def _CheckNumericsGrad(_, grad):
"""Gradient for check_numerics op."""
return array_ops.check_numerics(
grad, "Not a number (NaN) or infinity (Inf) values detected in gradient.")
@ops.RegisterGradient("PlaceholderWithDefault")
@ops.RegisterGradient("Identity")
def _IdGrad(_, grad):
return grad
@ops.RegisterGradient("RefIdentity")
def _RefIdGrad(_, grad):
return grad
ops.NotDifferentiable("StopGradient")
@ops.RegisterGradient("Reshape")
def _ReshapeGrad(op, grad):
return [array_ops.reshape(grad, array_ops.shape(op.inputs[0])), None]
ops.NotDifferentiable("InvertPermutation")
def _ReshapeToInput(op, grad):
"""Reshapes the gradient to the shape of the original input."""
return array_ops.reshape(grad, array_ops.shape(op.inputs[0]))
@ops.RegisterGradient("ExpandDims")
def _ExpandDimsGrad(op, grad):
return [_ReshapeToInput(op, grad), None]
@ops.RegisterGradient("Squeeze")
def _SqueezeGrad(op, grad):
return _ReshapeToInput(op, grad)
@ops.RegisterGradient("Transpose")
def _TransposeGrad(op, grad):
"""Returns unshuffle(grad)."""
p = op.inputs[1]
return [array_ops.transpose(grad, array_ops.invert_permutation(p)), None]
ops.NotDifferentiable("Shape")
ops.NotDifferentiable("ShapeN")
ops.NotDifferentiable("Rank")
ops.NotDifferentiable("Size")
@ops.RegisterGradient("Tile")
def _TileGrad(op, grad):
"""Sum reduces grad along the tiled dimensions."""
assert isinstance(grad, ops.Tensor)
input_shape = array_ops.shape(op.inputs[0])
# We interleave multiples and input_shape to get split_shape,
# reshape grad to split_shape, and reduce along all even
# dimensions (the tiled dimensions) to get the result
# with shape input_shape. For example
# input_shape = [20, 30, 40]
# multiples = [2, 3, 4]
# split_shape = [2, 20, 3, 30, 4, 40]
# axes = [0, 2, 4]
split_shape = array_ops.reshape(
array_ops.transpose(array_ops.stack([op.inputs[1], input_shape])), [-1])
axes = math_ops.range(0, array_ops.size(split_shape), 2)
input_grad = math_ops.reduce_sum(array_ops.reshape(grad, split_shape), axes)
# Fix shape inference
input_grad.set_shape(op.inputs[0].get_shape())
return [input_grad, None]
ops.NotDifferentiable("BroadcastGradientArgs")
@ops.RegisterGradient("Pad")
def _PadGrad(op, grad):
"""Gradient for Pad."""
# Pad introduces values around the original tensor, so the gradient function
# slices the original shape out of the gradient."""
x = op.inputs[0]
a = op.inputs[1] # [Rank(x), 2]
# Takes a slice of a. The 1st column. [Rank(x), 1].
pad_before = array_ops.slice(a, [0, 0],
array_ops.stack([array_ops.rank(x), 1]))
# Make it a 1-D tensor.
begin = array_ops.reshape(pad_before, [-1])
sizes = array_ops.shape(x)
return array_ops.slice(grad, begin, sizes), None
# ReverseSequence is just a permutation. The gradient permutes back.
@ops.RegisterGradient("ReverseSequence")
def _ReverseSequenceGrad(op, grad):
seq_lengths = op.inputs[1]
return [
array_ops.reverse_sequence(
grad,
batch_axis=op.get_attr("batch_dim"),
seq_axis=op.get_attr("seq_dim"),
seq_lengths=seq_lengths), None
]
@ops.RegisterGradient("Reverse")
def _ReverseGrad(op, grad):
reverse_dims = op.inputs[1]
# pylint: disable=protected-access
return gen_array_ops._reverse(grad, reverse_dims), None
# pylint: enable=protected-access
@ops.RegisterGradient("ReverseV2")
def _ReverseV2Grad(op, grad):
axis = op.inputs[1]
return array_ops.reverse_v2(grad, axis), None
@ops.RegisterGradient("SpaceToBatch")
def _SpaceToBatchGrad(op, grad):
# Its gradient is the opposite op: BatchToSpace.
block_size = op.get_attr("block_size")
return [array_ops.batch_to_space(grad, op.inputs[1], block_size=block_size),
None]
@ops.RegisterGradient("SpaceToBatchND")
def _SpaceToBatchNDGrad(op, grad):
# Its gradient is the opposite op: BatchToSpaceND.
return [array_ops.batch_to_space_nd(grad, op.inputs[1], op.inputs[2]),
None, None]
@ops.RegisterGradient("BatchToSpace")
def _BatchToSpaceGrad(op, grad):
# Its gradient is the opposite op: SpaceToBatch.
block_size = op.get_attr("block_size")
return [array_ops.space_to_batch(grad, op.inputs[1], block_size=block_size),
None]
@ops.RegisterGradient("BatchToSpaceND")
def _BatchToSpaceNDGrad(op, grad):
# Its gradient is the opposite op: SpaceToBatchND.
return [array_ops.space_to_batch_nd(grad, op.inputs[1], op.inputs[2]),
None, None]
@ops.RegisterGradient("SpaceToDepth")
def _SpaceToDepthGrad(op, grad):
# Its gradient is the opposite op: DepthToSpace.
block_size = op.get_attr("block_size")
return array_ops.depth_to_space(grad, block_size)
@ops.RegisterGradient("DepthToSpace")
def _DepthToSpaceGrad(op, grad):
# Its gradient is the opposite op: SpaceToDepth.
block_size = op.get_attr("block_size")
return array_ops.space_to_depth(grad, block_size)
ops.NotDifferentiable("OneHot")
@ops.RegisterGradient("MirrorPad")
def _MirrorPadGrad(op, grad):
mode = op.get_attr("mode")
# pylint: disable=protected-access
return [gen_array_ops._mirror_pad_grad(grad, op.inputs[1], mode=mode), None]
# pylint: enable=protected-access
@ops.RegisterGradient("MirrorPadGrad")
def _MirrorPadGradGrad(op, grad):
mode = op.get_attr("mode")
# pylint: disable=protected-access
return [gen_array_ops._mirror_pad(grad, op.inputs[1], mode=mode), None]
# pylint: enable=protected-access
@ops.RegisterGradient("QuantizeAndDequantize")
def _QuantizeAndDequantizeGrad(_, grad):
return grad
@ops.RegisterGradient("QuantizeAndDequantizeV2")
def _QuantizeAndDequantizeV2Grad(_, grad):
return [grad, None, None]
@ops.RegisterGradient("ExtractImagePatches")
def _ExtractImagePatchesGrad(op, grad):
batch_size, rows_in, cols_in, channels = [
dim.value for dim in op.inputs[0].get_shape()
]
input_bhwc = array_ops.shape(op.inputs[0])
batch_size = input_bhwc[0]
channels = input_bhwc[3]
_, rows_out, cols_out, _ = [
dim.value for dim in op.outputs[0].get_shape()
]
_, ksize_r, ksize_c, _ = op.get_attr('ksizes')
_, stride_r, stride_h, _ = op.get_attr('strides')
_, rate_r, rate_c, _ = op.get_attr('rates')
padding = op.get_attr('padding')
ksize_r_eff = ksize_r + (ksize_r - 1) * (rate_r - 1)
ksize_c_eff = ksize_c + (ksize_c - 1) * (rate_c - 1)
if padding == b'SAME':
rows_out = int(ceil(rows_in / stride_r))
cols_out = int(ceil(cols_in / stride_h))
pad_rows = ((rows_out - 1) * stride_r + ksize_r_eff - rows_in) // 2
pad_cols = ((cols_out - 1) * stride_h + ksize_c_eff - cols_in) // 2
elif padding == b'VALID':
rows_out = int(ceil((rows_in - ksize_r_eff + 1) / stride_r))
cols_out = int(ceil((cols_in - ksize_c_eff + 1) / stride_h))
pad_rows = (rows_out - 1) * stride_r + ksize_r_eff - rows_in
pad_cols = (cols_out - 1) * stride_h + ksize_c_eff - cols_in
pad_rows, pad_cols = max(0, pad_rows), max(0, pad_cols)
grad_expanded = array_ops.transpose(
array_ops.reshape(grad, (batch_size, rows_out,
cols_out, ksize_r, ksize_c, channels)),
(1, 2, 3, 4, 0, 5)
)
grad_flat = array_ops.reshape(grad_expanded, (-1, batch_size * channels))
row_steps = range(0, rows_out * stride_r, stride_r)
col_steps = range(0, cols_out * stride_h, stride_h)
idx = []
for i in range(rows_out):
for j in range(cols_out):
r_low, c_low = row_steps[i] - pad_rows, col_steps[j] - pad_cols
r_high, c_high = r_low + ksize_r_eff, c_low + ksize_c_eff
idx.extend([(r * (cols_in) + c,
i * (cols_out * ksize_r * ksize_c) +
j * (ksize_r * ksize_c) +
ri * (ksize_c) + ci)
for (ri, r) in enumerate(range(r_low, r_high, rate_r))
for (ci, c) in enumerate(range(c_low, c_high, rate_c))
if 0 <= r and r < rows_in and 0 <= c and c < cols_in
])
sp_shape = (rows_in * cols_in,
rows_out * cols_out * ksize_r * ksize_c)
sp_mat = sparse_tensor.SparseTensor(
array_ops.constant(idx, dtype=ops.dtypes.int64),
array_ops.ones((len(idx),), dtype=ops.dtypes.float32),
sp_shape
)
jac = sparse_ops.sparse_tensor_dense_matmul(sp_mat, grad_flat)
grad_out = array_ops.reshape(
jac, (rows_in, cols_in, batch_size, channels)
)
grad_out = array_ops.transpose(grad_out, (2, 0, 1, 3))
return [grad_out]
@ops.RegisterGradient("ScatterNd")
def _ScatterNdGrad(op, grad):
indices = op.inputs[0]
updates_grad = array_ops.gather_nd(grad, indices)
return [None, updates_grad, None]
|
unnikrishnankgs/va
|
venv/lib/python3.5/site-packages/tensorflow/python/ops/array_grad.py
|
Python
|
bsd-2-clause
| 22,276
|
#!/usr/bin/env python
data = '../data/fm_train_real.dat'
parameter_list = [[data]]
def converter_tdistributedstochasticneighborembedding_modular(data_fname, seed=1):
try:
from modshogun import RealFeatures, TDistributedStochasticNeighborEmbedding
from modshogun import Math_init_random, CSVFile
# reproducible results
Math_init_random(seed)
features = RealFeatures(CSVFile(data_fname))
converter = TDistributedStochasticNeighborEmbedding()
converter.set_target_dim(2)
embedding = converter.apply(features)
return embedding
except ImportError:
print('No Eigen3 available')
if __name__=='__main__':
print('TDistributedStochasticNeighborEmbedding')
converter_tdistributedstochasticneighborembedding_modular(*parameter_list[0])
|
AzamYahya/shogun
|
examples/undocumented/python_modular/converter_tdistributedstochasticneighborembedding_modular.py
|
Python
|
gpl-3.0
| 754
|
#! /usr/bin/env python
"""
Script to generate notebooks with output from notebooks that don't have
output.
"""
# prefer HTML over rST for now until nbconvert changes drop
OUTPUT = "html"
import os
import io
import sys
import time
import shutil
SOURCE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "..",
"examples",
"notebooks"))
from Queue import Empty
try: # IPython has been refactored
from IPython.kernel import KernelManager
except ImportError:
from IPython.zmq.blockingkernelmanager import (BlockingKernelManager as
KernelManager)
from IPython.nbformat.current import reads, write, NotebookNode
cur_dir = os.path.abspath(os.path.dirname(__file__))
# for conversion of .ipynb -> html/rst
from IPython.config import Config
try:
from IPython.nbconvert.exporters import HTMLExporter
except ImportError:
from warnings import warn
from statsmodels.tools.sm_exceptions import ModuleUnavailableWarning
warn("Notebook examples not built. You need IPython 1.0.",
ModuleUnavailableWarning)
sys.exit(0)
import hash_funcs
class NotebookRunner:
"""
Paramters
---------
notebook_dir : str
Path to the notebooks to convert
extra_args : list
These are command line arguments passed to start the notebook kernel
profile : str
The profile name to use
timeout : int
How many seconds to wait for each sell to complete running
"""
def __init__(self, notebook_dir, extra_args=None, profile=None,
timeout=90):
self.notebook_dir = os.path.abspath(notebook_dir)
self.profile = profile
self.timeout = timeout
km = KernelManager()
if extra_args is None:
extra_args = []
if profile is not None:
extra_args += ["--profile=%s" % profile]
km.start_kernel(stderr=open(os.devnull, 'w'),
extra_arguments=extra_args)
try:
kc = km.client()
kc.start_channels()
iopub = kc.iopub_channel
except AttributeError: # still on 0.13
kc = km
kc.start_channels()
iopub = kc.sub_channel
shell = kc.shell_channel
# make sure it's working
shell.execute("pass")
shell.get_msg()
# all of these should be run pylab inline
shell.execute("%pylab inline")
shell.get_msg()
self.kc = kc
self.km = km
self.iopub = iopub
def __iter__(self):
notebooks = [os.path.join(self.notebook_dir, i)
for i in os.listdir(self.notebook_dir)
if i.endswith('.ipynb') and 'generated' not in i]
for ipynb in notebooks:
with open(ipynb, 'r') as f:
nb = reads(f.read(), 'json')
yield ipynb, nb
def __call__(self, nb):
return self.run_notebook(nb)
def run_cell(self, shell, iopub, cell, exec_count):
outs = []
shell.execute(cell.input)
# hard-coded timeout, problem?
shell.get_msg(timeout=90)
cell.prompt_number = exec_count # msg["content"]["execution_count"]
while True:
try:
# whats the assumption on timeout here?
# is it asynchronous?
msg = iopub.get_msg(timeout=.2)
except Empty:
break
msg_type = msg["msg_type"]
if msg_type in ["status" , "pyin"]:
continue
elif msg_type == "clear_output":
outs = []
continue
content = msg["content"]
out = NotebookNode(output_type=msg_type)
if msg_type == "stream":
out.stream = content["name"]
out.text = content["data"]
elif msg_type in ["display_data", "pyout"]:
for mime, data in content["data"].iteritems():
attr = mime.split("/")[-1].lower()
# this gets most right, but fix svg+html, plain
attr = attr.replace('+xml', '').replace('plain', 'text')
setattr(out, attr, data)
if msg_type == "pyout":
out.prompt_number = exec_count #content["execution_count"]
elif msg_type == "pyerr":
out.ename = content["ename"]
out.evalue = content["evalue"]
out.traceback = content["traceback"]
else:
print "unhandled iopub msg:", msg_type
outs.append(out)
return outs
def run_notebook(self, nb):
"""
"""
shell = self.kc.shell_channel
iopub = self.iopub
cells = 0
errors = 0
cell_errors = 0
exec_count = 1
#TODO: What are the worksheets? -ss
for ws in nb.worksheets:
for cell in ws.cells:
if cell.cell_type != 'code':
# there won't be any output
continue
cells += 1
try:
# attaches the output to cell inplace
outs = self.run_cell(shell, iopub, cell, exec_count)
if outs and outs[-1]['output_type'] == 'pyerr':
cell_errors += 1
exec_count += 1
except Exception as e:
print "failed to run cell:", repr(e)
print cell.input
errors += 1
continue
cell.outputs = outs
print "ran notebook %s" % nb.metadata.name
print " ran %3i cells" % cells
if errors:
print " %3i cells raised exceptions" % errors
else:
print " there were no errors in run_cell"
if cell_errors:
print " %3i cells have exceptions in their output" % cell_errors
else:
print " all code executed in the notebook as expected"
def __del__(self):
self.kc.stop_channels()
self.km.shutdown_kernel()
del self.km
def _get_parser():
try:
import argparse
except ImportError:
raise ImportError("This script only runs on Python >= 2.7")
parser = argparse.ArgumentParser(description="Convert .ipynb notebook "
"inputs to HTML page with output")
parser.add_argument("path", type=str, default=SOURCE_DIR, nargs="?",
help="path to folder containing notebooks")
parser.add_argument("--profile", type=str,
help="profile name to use")
parser.add_argument("--timeout", default=90, type=int,
metavar="N",
help="how long to wait for cells to run in seconds")
return parser
def nb2html(nb):
"""
Cribbed from nbviewer
"""
config = Config()
config.HTMLExporter.template_file = 'basic'
config.NbconvertApp.fileext = "html"
config.CSSHtmlHeaderTransformer.enabled = False
C = HTMLExporter(config=config)
return C.from_notebook_node(nb)[0]
def nb2rst(nb, files_dir):
"""
nb should be a NotebookNode
"""
#NOTE: This does not currently work. Needs to be update to IPython 1.0.
config = Config()
C = ConverterRST(config=config)
# bastardize how this is supposed to be called
# either the API is broken, or I'm not using it right
# why can't I set this using the config?
C.files_dir = files_dir + "_files"
if not os.path.exists(C.files_dir):
os.makedirs(C.files_dir)
# already parsed into a NotebookNode
C.nb = nb
return C.convert()
if __name__ == '__main__':
rst_target_dir = os.path.join(cur_dir, '..',
'docs/source/examples/notebooks/generated/')
if not os.path.exists(rst_target_dir):
os.makedirs(rst_target_dir)
parser = _get_parser()
arg_ns, other_args = parser.parse_known_args()
os.chdir(arg_ns.path) # so we execute in notebook dir
notebook_runner = NotebookRunner(arg_ns.path, other_args, arg_ns.profile,
arg_ns.timeout)
try:
for fname, nb in notebook_runner:
base, ext = os.path.splitext(fname)
fname_only = os.path.basename(base)
# check if we need to write
towrite, filehash = hash_funcs.check_hash(open(fname, "r").read(),
fname_only)
if not towrite:
print "Hash has not changed for file %s" % fname_only
continue
print "Writing ", fname_only
# This edits the notebook cells inplace
notebook_runner(nb)
# for debugging writes ipynb file with output
#new_ipynb = "%s_generated%s" % (base, ".ipynb")
#with io.open(new_ipynb, "w", encoding="utf-8") as f:
# write(nb, f, "json")
# use nbconvert to convert to rst
support_file_dir = os.path.join(rst_target_dir,
fname_only+"_files")
if OUTPUT == "rst":
new_rst = os.path.join(rst_target_dir, fname_only+".rst")
rst_out = nb2rst(nb, fname_only)
# write them to source directory
if not os.path.exists(rst_target_dir):
os.makedirs(rst_target_dir)
with io.open(new_rst, "w", encoding="utf-8") as f:
f.write(rst_out)
# move support files
if os.path.exists(fname_only+"_files"):
shutil.move(fname_only+"_files",
os.path.join(rst_target_dir, fname_only+"_files"))
elif OUTPUT == "html":
from notebook_output_template import notebook_template
new_html = os.path.join(rst_target_dir, fname_only+".rst")
# get the title out of the notebook because sphinx needs it
title_cell = nb['worksheets'][0]['cells'].pop(0)
if title_cell['cell_type'] == 'heading':
pass
elif (title_cell['cell_type'] == 'markdown'
and title_cell['source'].strip().startswith('#')):
# IPython 3.x got rid of header cells
pass
else:
print "Title not in first cell for ", fname_only
print "Not generating rST"
continue
html_out = nb2html(nb)
# indent for insertion into raw html block in rST
html_out = "\n".join([" "+i for i in html_out.split("\n")])
with io.open(new_html, "w", encoding="utf-8") as f:
f.write(title_cell["source"].replace("#",
"").strip() + u"\n")
f.write(u"="*len(title_cell["source"])+u"\n\n")
f.write(notebook_template.substitute(name=fname_only,
body=html_out))
hash_funcs.update_hash_dict(filehash, fname_only)
except Exception, err:
raise err
finally:
os.chdir(cur_dir)
# probably not necessary
del notebook_runner
|
musically-ut/statsmodels
|
tools/nbgenerate.py
|
Python
|
bsd-3-clause
| 11,564
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
page_title = "Blog"
def get_context(context):
context.update(frappe.get_doc("Blog Settings", "Blog Settings").as_dict())
context.children = get_children()
def get_children():
return frappe.db.sql("""select concat("blog/", page_name) as name,
title from `tabBlog Category`
where ifnull(published, 0) = 1 order by title asc""", as_dict=1)
|
geo-poland/frappe
|
frappe/templates/pages/blog.py
|
Python
|
mit
| 504
|
# -*- coding: utf-8 -*-
"""
Test cases related to XML Schema parsing and validation
"""
import unittest, sys, os.path
this_dir = os.path.dirname(__file__)
if this_dir not in sys.path:
sys.path.insert(0, this_dir) # needed for Py3
from common_imports import etree, BytesIO, HelperTestCase, fileInTestDir
from common_imports import doctest, make_doctest
class ETreeXMLSchemaTestCase(HelperTestCase):
def test_xmlschema(self):
tree_valid = self.parse('<a><b></b></a>')
tree_invalid = self.parse('<a><c></c></a>')
schema = self.parse('''
<xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<xsd:element name="a" type="AType"/>
<xsd:complexType name="AType">
<xsd:sequence>
<xsd:element name="b" type="xsd:string" />
</xsd:sequence>
</xsd:complexType>
</xsd:schema>
''')
schema = etree.XMLSchema(schema)
self.assert_(schema.validate(tree_valid))
self.assert_(not schema.validate(tree_invalid))
def test_xmlschema_default_attributes(self):
schema = self.parse('''
<xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<xsd:element name="a" type="AType"/>
<xsd:complexType name="AType">
<xsd:sequence minOccurs="4" maxOccurs="4">
<xsd:element name="b" type="BType" />
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="BType">
<xsd:attribute name="hardy" type="xsd:string" default="hey" />
</xsd:complexType>
</xsd:schema>
''')
schema = etree.XMLSchema(schema, attribute_defaults=True)
tree = self.parse('<a><b hardy="ho"/><b/><b hardy="ho"/><b/></a>')
root = tree.getroot()
self.assertEquals('ho', root[0].get('hardy'))
self.assertEquals(None, root[1].get('hardy'))
self.assertEquals('ho', root[2].get('hardy'))
self.assertEquals(None, root[3].get('hardy'))
self.assert_(schema(tree))
root = tree.getroot()
self.assertEquals('ho', root[0].get('hardy'))
self.assertEquals('hey', root[1].get('hardy'))
self.assertEquals('ho', root[2].get('hardy'))
self.assertEquals('hey', root[3].get('hardy'))
def test_xmlschema_parse(self):
schema = self.parse('''
<xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<xsd:element name="a" type="AType"/>
<xsd:complexType name="AType">
<xsd:sequence>
<xsd:element name="b" type="xsd:string" />
</xsd:sequence>
</xsd:complexType>
</xsd:schema>
''')
schema = etree.XMLSchema(schema)
parser = etree.XMLParser(schema=schema)
tree_valid = self.parse('<a><b></b></a>', parser=parser)
self.assertEquals('a', tree_valid.getroot().tag)
self.assertRaises(etree.XMLSyntaxError,
self.parse, '<a><c></c></a>', parser=parser)
def test_xmlschema_parse_default_attributes(self):
# does not work as of libxml2 2.7.3
schema = self.parse('''
<xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<xsd:element name="a" type="AType"/>
<xsd:complexType name="AType">
<xsd:sequence minOccurs="4" maxOccurs="4">
<xsd:element name="b" type="BType" />
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="BType">
<xsd:attribute name="hardy" type="xsd:string" default="hey" />
</xsd:complexType>
</xsd:schema>
''')
schema = etree.XMLSchema(schema)
parser = etree.XMLParser(schema=schema, attribute_defaults=True)
tree_valid = self.parse('<a><b hardy="ho"/><b/><b hardy="ho"/><b/></a>',
parser=parser)
root = tree_valid.getroot()
self.assertEquals('ho', root[0].get('hardy'))
self.assertEquals('hey', root[1].get('hardy'))
self.assertEquals('ho', root[2].get('hardy'))
self.assertEquals('hey', root[3].get('hardy'))
def test_xmlschema_parse_default_attributes_schema_config(self):
# does not work as of libxml2 2.7.3
schema = self.parse('''
<xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<xsd:element name="a" type="AType"/>
<xsd:complexType name="AType">
<xsd:sequence minOccurs="4" maxOccurs="4">
<xsd:element name="b" type="BType" />
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="BType">
<xsd:attribute name="hardy" type="xsd:string" default="hey" />
</xsd:complexType>
</xsd:schema>
''')
schema = etree.XMLSchema(schema, attribute_defaults=True)
parser = etree.XMLParser(schema=schema)
tree_valid = self.parse('<a><b hardy="ho"/><b/><b hardy="ho"/><b/></a>',
parser=parser)
root = tree_valid.getroot()
self.assertEquals('ho', root[0].get('hardy'))
self.assertEquals('hey', root[1].get('hardy'))
self.assertEquals('ho', root[2].get('hardy'))
self.assertEquals('hey', root[3].get('hardy'))
def test_xmlschema_parse_fixed_attributes(self):
# does not work as of libxml2 2.7.3
schema = self.parse('''
<xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<xsd:element name="a" type="AType"/>
<xsd:complexType name="AType">
<xsd:sequence minOccurs="3" maxOccurs="3">
<xsd:element name="b" type="BType" />
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="BType">
<xsd:attribute name="hardy" type="xsd:string" fixed="hey" />
</xsd:complexType>
</xsd:schema>
''')
schema = etree.XMLSchema(schema)
parser = etree.XMLParser(schema=schema, attribute_defaults=True)
tree_valid = self.parse('<a><b/><b hardy="hey"/><b/></a>',
parser=parser)
root = tree_valid.getroot()
self.assertEquals('hey', root[0].get('hardy'))
self.assertEquals('hey', root[1].get('hardy'))
self.assertEquals('hey', root[2].get('hardy'))
def test_xmlschema_stringio(self):
schema_file = BytesIO('''
<xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<xsd:element name="a" type="AType"/>
<xsd:complexType name="AType">
<xsd:sequence>
<xsd:element name="b" type="xsd:string" />
</xsd:sequence>
</xsd:complexType>
</xsd:schema>
''')
schema = etree.XMLSchema(file=schema_file)
parser = etree.XMLParser(schema=schema)
tree_valid = self.parse('<a><b></b></a>', parser=parser)
self.assertEquals('a', tree_valid.getroot().tag)
self.assertRaises(etree.XMLSyntaxError,
self.parse, '<a><c></c></a>', parser=parser)
def test_xmlschema_iterparse(self):
schema = self.parse('''
<xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<xsd:element name="a" type="AType"/>
<xsd:complexType name="AType">
<xsd:sequence>
<xsd:element name="b" type="xsd:string" />
</xsd:sequence>
</xsd:complexType>
</xsd:schema>
''')
schema = etree.XMLSchema(schema)
xml = BytesIO('<a><b></b></a>')
events = [ (event, el.tag)
for (event, el) in etree.iterparse(xml, schema=schema) ]
self.assertEquals([('end', 'b'), ('end', 'a')],
events)
def test_xmlschema_iterparse_fail(self):
schema = self.parse('''
<xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<xsd:element name="a" type="AType"/>
<xsd:complexType name="AType">
<xsd:sequence>
<xsd:element name="b" type="xsd:string" />
</xsd:sequence>
</xsd:complexType>
</xsd:schema>
''')
schema = etree.XMLSchema(schema)
self.assertRaises(
etree.XMLSyntaxError,
list, etree.iterparse(BytesIO('<a><c></c></a>'), schema=schema))
def test_xmlschema_elementtree_error(self):
self.assertRaises(ValueError, etree.XMLSchema, etree.ElementTree())
def test_xmlschema_invalid_schema1(self):
schema = self.parse('''\
<xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<element name="a" type="AType"/>
<xsd:complexType name="AType">
<xsd:sequence>
<xsd:element name="b" type="xsd:string" />
</xsd:sequence>
</xsd:complexType>
</xsd:schema>
''')
self.assertRaises(etree.XMLSchemaParseError,
etree.XMLSchema, schema)
def test_xmlschema_invalid_schema2(self):
schema = self.parse('<test/>')
self.assertRaises(etree.XMLSchemaParseError,
etree.XMLSchema, schema)
def test_xmlschema_file(self):
# this will only work if we access the file through path or
# file object..
f = open(fileInTestDir('test.xsd'), 'rb')
try:
schema = etree.XMLSchema(file=f)
finally:
f.close()
tree_valid = self.parse('<a><b></b></a>')
self.assert_(schema.validate(tree_valid))
def test_xmlschema_import_file(self):
# this will only work if we access the file through path or
# file object..
schema = etree.XMLSchema(file=fileInTestDir('test_import.xsd'))
tree_valid = self.parse(
'<a:x xmlns:a="http://codespeak.net/lxml/schema/ns1"><b></b></a:x>')
self.assert_(schema.validate(tree_valid))
def test_xmlschema_shortcut(self):
tree_valid = self.parse('<a><b></b></a>')
tree_invalid = self.parse('<a><c></c></a>')
schema = self.parse('''\
<xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<xsd:element name="a" type="AType"/>
<xsd:complexType name="AType">
<xsd:sequence>
<xsd:element name="b" type="xsd:string" />
</xsd:sequence>
</xsd:complexType>
</xsd:schema>
''')
self.assert_(tree_valid.xmlschema(schema))
self.assert_(not tree_invalid.xmlschema(schema))
class ETreeXMLSchemaResolversTestCase(HelperTestCase):
resolver_schema_int = BytesIO("""\
<xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns:etype="http://codespeak.net/lxml/test/external"
targetNamespace="http://codespeak.net/lxml/test/internal">
<xsd:import namespace="http://codespeak.net/lxml/test/external" schemaLocation="XXX.xsd" />
<xsd:element name="a" type="etype:AType"/>
</xsd:schema>""")
resolver_schema_int2 = BytesIO("""\
<xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns:etype="http://codespeak.net/lxml/test/external"
targetNamespace="http://codespeak.net/lxml/test/internal">
<xsd:import namespace="http://codespeak.net/lxml/test/external" schemaLocation="YYY.xsd" />
<xsd:element name="a" type="etype:AType"/>
</xsd:schema>""")
resolver_schema_ext = """\
<xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema"
targetNamespace="http://codespeak.net/lxml/test/external">
<xsd:complexType name="AType">
<xsd:sequence><xsd:element name="b" type="xsd:string" minOccurs="0" maxOccurs="unbounded" /></xsd:sequence>
</xsd:complexType>
</xsd:schema>"""
class simple_resolver(etree.Resolver):
def __init__(self, schema):
self.schema = schema
def resolve(self, url, id, context):
assert url == 'XXX.xsd'
return self.resolve_string(self.schema, context)
# tests:
def test_xmlschema_resolvers(self):
"""Test that resolvers work with schema."""
parser = etree.XMLParser()
parser.resolvers.add(self.simple_resolver(self.resolver_schema_ext))
schema_doc = etree.parse(self.resolver_schema_int, parser = parser)
schema = etree.XMLSchema(schema_doc)
def test_xmlschema_resolvers_root(self):
"""Test that the default resolver will get called if there's no
specific parser resolver."""
root_resolver = self.simple_resolver(self.resolver_schema_ext)
etree.get_default_parser().resolvers.add(root_resolver)
schema_doc = etree.parse(self.resolver_schema_int)
schema = etree.XMLSchema(schema_doc)
etree.get_default_parser().resolvers.remove(root_resolver)
def test_xmlschema_resolvers_noroot(self):
"""Test that the default resolver will not get called when a more
specific resolver is registered."""
class res_root(etree.Resolver):
def resolve(self, url, id, context):
assert False
return None
root_resolver = res_root()
etree.get_default_parser().resolvers.add(root_resolver)
parser = etree.XMLParser()
parser.resolvers.add(self.simple_resolver(self.resolver_schema_ext))
schema_doc = etree.parse(self.resolver_schema_int, parser = parser)
schema = etree.XMLSchema(schema_doc)
etree.get_default_parser().resolvers.remove(root_resolver)
def test_xmlschema_nested_resolvers(self):
"""Test that resolvers work in a nested fashion."""
resolver_schema = self.resolver_schema_ext
class res_nested(etree.Resolver):
def __init__(self, ext_schema):
self.ext_schema = ext_schema
def resolve(self, url, id, context):
assert url == 'YYY.xsd'
return self.resolve_string(self.ext_schema, context)
class res(etree.Resolver):
def __init__(self, ext_schema_1, ext_schema_2):
self.ext_schema_1 = ext_schema_1
self.ext_schema_2 = ext_schema_2
def resolve(self, url, id, context):
assert url == 'XXX.xsd'
new_parser = etree.XMLParser()
new_parser.resolvers.add(res_nested(self.ext_schema_2))
new_schema_doc = etree.parse(self.ext_schema_1, parser = new_parser)
new_schema = etree.XMLSchema(new_schema_doc)
return self.resolve_string(resolver_schema, context)
parser = etree.XMLParser()
parser.resolvers.add(res(self.resolver_schema_int2, self.resolver_schema_ext))
schema_doc = etree.parse(self.resolver_schema_int, parser = parser)
schema = etree.XMLSchema(schema_doc)
def test_suite():
suite = unittest.TestSuite()
suite.addTests([unittest.makeSuite(ETreeXMLSchemaTestCase)])
suite.addTests([unittest.makeSuite(ETreeXMLSchemaResolversTestCase)])
suite.addTests(
[make_doctest('../../../doc/validation.txt')])
return suite
if __name__ == '__main__':
print('to test use test.py %s' % __file__)
|
vmanoria/bluemix-hue-filebrowser
|
hue-3.8.1-bluemix/desktop/core/ext-py/lxml/src/lxml/tests/test_xmlschema.py
|
Python
|
gpl-2.0
| 14,290
|
# coding: utf-8
#
# Copyright 2015 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This XBlock embeds an instance of Oppia in the OpenEdX platform."""
import pkg_resources
from xblock.core import XBlock
from xblock.fields import Scope, Integer, String
from xblock.fragment import Fragment
class OppiaXBlock(XBlock):
"""
An XBlock providing an embedded Oppia exploration.
"""
# Note: These fields are defined on the class, and can be accessed in the
# code as self.<fieldname>.
oppiaid = String(
help="ID of the Oppia exploration to embed",
default=None,
scope=Scope.content)
src = String(
help="Source URL of the site",
default="https://www.oppia.org",
scope=Scope.content)
width = Integer(
help="Width of the embedded exploration",
default=700,
scope=Scope.content)
height = Integer(
help="Height of the embedded exploration",
default=500,
scope=Scope.content)
def resource_string(self, path):
"""Handy helper for getting resources from our kit."""
data = pkg_resources.resource_string(__name__, path)
return data.decode("utf8")
def student_view(self, context=None):
"""
The primary view of the OppiaXBlock, shown to students
when viewing courses.
"""
html = self.resource_string("static/html/oppia.html")
frag = Fragment(html.format(self=self))
frag.add_javascript_url(
"//cdn.jsdelivr.net/oppia/0.0.1/oppia-player.min.js")
frag.add_javascript(self.resource_string("static/js/oppia.js"))
frag.initialize_js('OppiaXBlock')
return frag
def _log(self, message):
"""
Logger for load, state transition and completion events.
"""
pass
@XBlock.json_handler
def on_exploration_loaded(self, data, suffix=''):
"""Called when an exploration has loaded."""
self._log('Exploration %s was loaded.' % self.oppiaid)
@XBlock.json_handler
def on_state_transition(self, data, suffix=''):
"""Called when a state transition in the exploration has occurred."""
self._log(
"Recording the following state transition for exploration %s: "
"'%s' to '%s'" % (
self.oppiaid, data['oldStateName'], data['newStateName']))
@XBlock.json_handler
def on_exploration_completed(self, data, suffix=''):
"""Called when the exploration has been completed."""
self._log('Exploration %s has been completed.' % self.oppiaid)
def studio_view(self, context):
"""
Create a fragment used to display the edit view in the Studio.
"""
html_str = pkg_resources.resource_string(
__name__, "static/html/oppia_edit.html")
oppiaid = self.oppiaid or ''
frag = Fragment(unicode(html_str).format(
oppiaid=oppiaid, src=self.src, width=self.width,
height=self.height))
js_str = pkg_resources.resource_string(
__name__, "static/js/oppia_edit.js")
frag.add_javascript(unicode(js_str))
frag.initialize_js('OppiaXBlockEditor')
return frag
@XBlock.json_handler
def studio_submit(self, data, suffix=''):
"""
Called when submitting the form in Studio.
"""
self.oppiaid = data.get('oppiaid')
self.src = data.get('src')
self.width = data.get('width')
self.height = data.get('height')
return {'result': 'success'}
@staticmethod
def workbench_scenarios():
"""A canned scenario for display in the workbench."""
return [
("Oppia Embedding",
"""<vertical_demo>
<oppia oppiaid="0" src="https://www.oppia.org" width="700" />
</vertical_demo>
"""),
]
|
kaffeel/oppia
|
integrations_dev/openedx_xblock/xblock-oppia/oppia/oppia.py
|
Python
|
apache-2.0
| 4,448
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible import constants as C
from ansible.errors import AnsibleParserError
from ansible.module_utils._text import to_text, to_native
from ansible.playbook.play import Play
from ansible.playbook.playbook_include import PlaybookInclude
from ansible.plugins.loader import add_all_plugin_dirs
from ansible.utils.display import Display
display = Display()
__all__ = ['Playbook']
class Playbook:
def __init__(self, loader):
# Entries in the datastructure of a playbook may
# be either a play or an include statement
self._entries = []
self._basedir = to_text(os.getcwd(), errors='surrogate_or_strict')
self._loader = loader
self._file_name = None
@staticmethod
def load(file_name, variable_manager=None, loader=None):
pb = Playbook(loader=loader)
pb._load_playbook_data(file_name=file_name, variable_manager=variable_manager)
return pb
def _load_playbook_data(self, file_name, variable_manager, vars=None):
if os.path.isabs(file_name):
self._basedir = os.path.dirname(file_name)
else:
self._basedir = os.path.normpath(os.path.join(self._basedir, os.path.dirname(file_name)))
# set the loaders basedir
cur_basedir = self._loader.get_basedir()
self._loader.set_basedir(self._basedir)
add_all_plugin_dirs(self._basedir)
self._file_name = file_name
try:
ds = self._loader.load_from_file(os.path.basename(file_name))
except UnicodeDecodeError as e:
raise AnsibleParserError("Could not read playbook (%s) due to encoding issues: %s" % (file_name, to_native(e)))
# check for errors and restore the basedir in case this error is caught and handled
if ds is None:
self._loader.set_basedir(cur_basedir)
raise AnsibleParserError("Empty playbook, nothing to do", obj=ds)
elif not isinstance(ds, list):
self._loader.set_basedir(cur_basedir)
raise AnsibleParserError("A playbook must be a list of plays, got a %s instead" % type(ds), obj=ds)
elif not ds:
display.deprecated("Empty plays will currently be skipped, in the future they will cause a syntax error", version='2.12')
# Parse the playbook entries. For plays, we simply parse them
# using the Play() object, and includes are parsed using the
# PlaybookInclude() object
for entry in ds:
if not isinstance(entry, dict):
# restore the basedir in case this error is caught and handled
self._loader.set_basedir(cur_basedir)
raise AnsibleParserError("playbook entries must be either a valid play or an include statement", obj=entry)
if any(action in entry for action in ('import_playbook', 'include')):
if 'include' in entry:
display.deprecated("'include' for playbook includes. You should use 'import_playbook' instead", version="2.12")
pb = PlaybookInclude.load(entry, basedir=self._basedir, variable_manager=variable_manager, loader=self._loader)
if pb is not None:
self._entries.extend(pb._entries)
else:
which = entry.get('import_playbook', entry.get('include', entry))
display.display("skipping playbook '%s' due to conditional test failure" % which, color=C.COLOR_SKIP)
else:
entry_obj = Play.load(entry, variable_manager=variable_manager, loader=self._loader, vars=vars)
self._entries.append(entry_obj)
# we're done, so restore the old basedir in the loader
self._loader.set_basedir(cur_basedir)
def get_loader(self):
return self._loader
def get_plays(self):
return self._entries[:]
|
anryko/ansible
|
lib/ansible/playbook/__init__.py
|
Python
|
gpl-3.0
| 4,725
|
#=======================================================================
#
# Python Lexical Analyser
#
#
# Scanning an input stream
#
#=======================================================================
from __future__ import absolute_import
import cython
cython.declare(BOL=object, EOL=object, EOF=object, NOT_FOUND=object)
from . import Errors
from .Regexps import BOL, EOL, EOF
NOT_FOUND = object()
class Scanner(object):
"""
A Scanner is used to read tokens from a stream of characters
using the token set specified by a Plex.Lexicon.
Constructor:
Scanner(lexicon, stream, name = '')
See the docstring of the __init__ method for details.
Methods:
See the docstrings of the individual methods for more
information.
read() --> (value, text)
Reads the next lexical token from the stream.
position() --> (name, line, col)
Returns the position of the last token read using the
read() method.
begin(state_name)
Causes scanner to change state.
produce(value [, text])
Causes return of a token value to the caller of the
Scanner.
"""
# lexicon = None # Lexicon
# stream = None # file-like object
# name = ''
# buffer = ''
# buf_start_pos = 0 # position in input of start of buffer
# next_pos = 0 # position in input of next char to read
# cur_pos = 0 # position in input of current char
# cur_line = 1 # line number of current char
# cur_line_start = 0 # position in input of start of current line
# start_pos = 0 # position in input of start of token
# start_line = 0 # line number of start of token
# start_col = 0 # position in line of start of token
# text = None # text of last token read
# initial_state = None # Node
# state_name = '' # Name of initial state
# queue = None # list of tokens to be returned
# trace = 0
def __init__(self, lexicon, stream, name='', initial_pos=None):
"""
Scanner(lexicon, stream, name = '')
|lexicon| is a Plex.Lexicon instance specifying the lexical tokens
to be recognised.
|stream| can be a file object or anything which implements a
compatible read() method.
|name| is optional, and may be the name of the file being
scanned or any other identifying string.
"""
self.trace = 0
self.buffer = u''
self.buf_start_pos = 0
self.next_pos = 0
self.cur_pos = 0
self.cur_line = 1
self.start_pos = 0
self.start_line = 0
self.start_col = 0
self.text = None
self.state_name = None
self.lexicon = lexicon
self.stream = stream
self.name = name
self.queue = []
self.initial_state = None
self.begin('')
self.next_pos = 0
self.cur_pos = 0
self.cur_line_start = 0
self.cur_char = BOL
self.input_state = 1
if initial_pos is not None:
self.cur_line, self.cur_line_start = initial_pos[1], -initial_pos[2]
def read(self):
"""
Read the next lexical token from the stream and return a
tuple (value, text), where |value| is the value associated with
the token as specified by the Lexicon, and |text| is the actual
string read from the stream. Returns (None, '') on end of file.
"""
queue = self.queue
while not queue:
self.text, action = self.scan_a_token()
if action is None:
self.produce(None)
self.eof()
else:
value = action.perform(self, self.text)
if value is not None:
self.produce(value)
result = queue[0]
del queue[0]
return result
def scan_a_token(self):
"""
Read the next input sequence recognised by the machine
and return (text, action). Returns ('', None) on end of
file.
"""
self.start_pos = self.cur_pos
self.start_line = self.cur_line
self.start_col = self.cur_pos - self.cur_line_start
action = self.run_machine_inlined()
if action is not None:
if self.trace:
print("Scanner: read: Performing %s %d:%d" % (
action, self.start_pos, self.cur_pos))
text = self.buffer[
self.start_pos - self.buf_start_pos:
self.cur_pos - self.buf_start_pos]
return (text, action)
else:
if self.cur_pos == self.start_pos:
if self.cur_char is EOL:
self.next_char()
if self.cur_char is None or self.cur_char is EOF:
return (u'', None)
raise Errors.UnrecognizedInput(self, self.state_name)
def run_machine_inlined(self):
"""
Inlined version of run_machine for speed.
"""
state = self.initial_state
cur_pos = self.cur_pos
cur_line = self.cur_line
cur_line_start = self.cur_line_start
cur_char = self.cur_char
input_state = self.input_state
next_pos = self.next_pos
buffer = self.buffer
buf_start_pos = self.buf_start_pos
buf_len = len(buffer)
b_action, b_cur_pos, b_cur_line, b_cur_line_start, b_cur_char, b_input_state, b_next_pos = \
None, 0, 0, 0, u'', 0, 0
trace = self.trace
while 1:
if trace: #TRACE#
print("State %d, %d/%d:%s -->" % ( #TRACE#
state['number'], input_state, cur_pos, repr(cur_char))) #TRACE#
# Begin inlined self.save_for_backup()
#action = state.action #@slow
action = state['action'] #@fast
if action is not None:
b_action, b_cur_pos, b_cur_line, b_cur_line_start, b_cur_char, b_input_state, b_next_pos = \
action, cur_pos, cur_line, cur_line_start, cur_char, input_state, next_pos
# End inlined self.save_for_backup()
c = cur_char
#new_state = state.new_state(c) #@slow
new_state = state.get(c, NOT_FOUND) #@fast
if new_state is NOT_FOUND: #@fast
new_state = c and state.get('else') #@fast
if new_state:
if trace: #TRACE#
print("State %d" % new_state['number']) #TRACE#
state = new_state
# Begin inlined: self.next_char()
if input_state == 1:
cur_pos = next_pos
# Begin inlined: c = self.read_char()
buf_index = next_pos - buf_start_pos
if buf_index < buf_len:
c = buffer[buf_index]
next_pos += 1
else:
discard = self.start_pos - buf_start_pos
data = self.stream.read(0x1000)
buffer = self.buffer[discard:] + data
self.buffer = buffer
buf_start_pos += discard
self.buf_start_pos = buf_start_pos
buf_len = len(buffer)
buf_index -= discard
if data:
c = buffer[buf_index]
next_pos += 1
else:
c = u''
# End inlined: c = self.read_char()
if c == u'\n':
cur_char = EOL
input_state = 2
elif not c:
cur_char = EOL
input_state = 4
else:
cur_char = c
elif input_state == 2:
cur_char = u'\n'
input_state = 3
elif input_state == 3:
cur_line += 1
cur_line_start = cur_pos = next_pos
cur_char = BOL
input_state = 1
elif input_state == 4:
cur_char = EOF
input_state = 5
else: # input_state = 5
cur_char = u''
# End inlined self.next_char()
else: # not new_state
if trace: #TRACE#
print("blocked") #TRACE#
# Begin inlined: action = self.back_up()
if b_action is not None:
(action, cur_pos, cur_line, cur_line_start,
cur_char, input_state, next_pos) = \
(b_action, b_cur_pos, b_cur_line, b_cur_line_start,
b_cur_char, b_input_state, b_next_pos)
else:
action = None
break # while 1
# End inlined: action = self.back_up()
self.cur_pos = cur_pos
self.cur_line = cur_line
self.cur_line_start = cur_line_start
self.cur_char = cur_char
self.input_state = input_state
self.next_pos = next_pos
if trace: #TRACE#
if action is not None: #TRACE#
print("Doing %s" % action) #TRACE#
return action
def next_char(self):
input_state = self.input_state
if self.trace:
print("Scanner: next: %s [%d] %d" % (" " * 20, input_state, self.cur_pos))
if input_state == 1:
self.cur_pos = self.next_pos
c = self.read_char()
if c == u'\n':
self.cur_char = EOL
self.input_state = 2
elif not c:
self.cur_char = EOL
self.input_state = 4
else:
self.cur_char = c
elif input_state == 2:
self.cur_char = u'\n'
self.input_state = 3
elif input_state == 3:
self.cur_line += 1
self.cur_line_start = self.cur_pos = self.next_pos
self.cur_char = BOL
self.input_state = 1
elif input_state == 4:
self.cur_char = EOF
self.input_state = 5
else: # input_state = 5
self.cur_char = u''
if self.trace:
print("--> [%d] %d %s" % (input_state, self.cur_pos, repr(self.cur_char)))
def position(self):
"""
Return a tuple (name, line, col) representing the location of
the last token read using the read() method. |name| is the
name that was provided to the Scanner constructor; |line|
is the line number in the stream (1-based); |col| is the
position within the line of the first character of the token
(0-based).
"""
return (self.name, self.start_line, self.start_col)
def get_position(self):
"""Python accessible wrapper around position(), only for error reporting.
"""
return self.position()
def begin(self, state_name):
"""Set the current state of the scanner to the named state."""
self.initial_state = (
self.lexicon.get_initial_state(state_name))
self.state_name = state_name
def produce(self, value, text=None):
"""
Called from an action procedure, causes |value| to be returned
as the token value from read(). If |text| is supplied, it is
returned in place of the scanned text.
produce() can be called more than once during a single call to an action
procedure, in which case the tokens are queued up and returned one
at a time by subsequent calls to read(), until the queue is empty,
whereupon scanning resumes.
"""
if text is None:
text = self.text
self.queue.append((value, text))
def eof(self):
"""
Override this method if you want something to be done at
end of file.
"""
|
Architektor/PySnip
|
venv/lib/python2.7/site-packages/Cython/Plex/Scanners.py
|
Python
|
gpl-3.0
| 12,237
|
import time
import unittest
from robot.utils.asserts import assert_equals
from robot.utils.robottime import TimestampCache
class FakeTimestampCache(TimestampCache):
def __init__(self, epoch):
TimestampCache.__init__(self)
self.epoch = epoch + self.timezone_correction()
def _get_epoch(self):
return self.epoch
def timezone_correction(self):
dst = 3600 if time.daylight == 0 else 0
tz = 7200 + time.timezone
return (tz + dst)
class TestTimestamp(unittest.TestCase):
def test_new_timestamp(self):
actual = FakeTimestampCache(1338816626.999).get_timestamp()
assert_equals(actual, '20120604 16:30:26.999')
def test_cached(self):
cache = FakeTimestampCache(1338816626.900)
cache.get_timestamp()
cache.epoch += 0.099
assert_equals(cache.get_timestamp(), '20120604 16:30:26.999')
def test_round_to_next_second(self):
cache = FakeTimestampCache(1338816626.0)
assert_equals(cache.get_timestamp(), '20120604 16:30:26.000')
cache.epoch += 0.9995
assert_equals(cache.get_timestamp(), '20120604 16:30:27.000')
def test_cache_timestamp_without_millis_separator(self):
cache = FakeTimestampCache(1338816626.0)
assert_equals(cache.get_timestamp(millissep=None), '20120604 16:30:26')
assert_equals(cache.get_timestamp(millissep=None), '20120604 16:30:26')
assert_equals(cache.get_timestamp(), '20120604 16:30:26.000')
def test_separators(self):
cache = FakeTimestampCache(1338816626.001)
assert_equals(cache.get_timestamp(daysep='-', daytimesep='T'),
'2012-06-04T16:30:26.001')
assert_equals(cache.get_timestamp(timesep='', millissep='X'),
'20120604 163026X001')
if __name__ == "__main__":
unittest.main()
|
yahman72/robotframework
|
utest/utils/test_timestampcache.py
|
Python
|
apache-2.0
| 1,868
|
#!/usr/bin/env python
'''Testing rect map debug rendering.
You should see a checkered square grid.
Press escape or close the window to finish the test.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import unittest
from render_base import RenderBase
import scene2d
from scene2d.debug import gen_rect_map
class RectFlatDebugTest(RenderBase):
def test_main(self):
self.init_window(256, 256)
self.set_map(gen_rect_map([[{}]*10]*10, 32, 32), resize=True)
self.view.allow_oob = False
self.run_test()
if __name__ == '__main__':
unittest.main()
|
nicememory/pie
|
pyglet/contrib/currently-broken/scene2d/tests/scene2d/RECT_FLAT_DEBUG.py
|
Python
|
apache-2.0
| 598
|
# -*- coding: utf-8 -*-
address_mode_def = {}
address_mode_def['S_IMPLIED'] = dict(size=1, short='sngl')
address_mode_def['S_IMMEDIATE'] = dict(size=2, short='imm')
address_mode_def['S_IMMEDIATE_WITH_MODIFIER'] = dict(size=2, short='imm')
address_mode_def['S_ACCUMULATOR'] = dict(size=1, short='acc')
address_mode_def['S_IMMEDIATE'] = dict(size=2, short='imm')
address_mode_def['S_ZEROPAGE'] = dict(size=2, short='zp')
address_mode_def['S_ZEROPAGE_X'] = dict(size=2, short='zpx')
address_mode_def['S_ZEROPAGE_Y'] = dict(size=2, short='zpy')
address_mode_def['S_ABSOLUTE'] = dict(size=3, short='abs')
address_mode_def['S_ABSOLUTE_X'] = dict(size=3, short='absx')
address_mode_def['S_ABSOLUTE_Y'] = dict(size=3, short='absy')
address_mode_def['S_INDIRECT_X'] = dict(size=2, short='indx')
address_mode_def['S_INDIRECT_Y'] = dict(size=2, short='indy')
address_mode_def['S_RELATIVE'] = dict(size=2, short='rel')
opcodes = {}
opcodes['ADC'] = dict(imm=0x69, zp=0x65, zpx=0x75, abs=0x6d, absx=0x7d,
absy=0x79, indx=0x61, indy=0x71)
opcodes['AND'] = dict(imm=0x29, zp=0x25, zpx=0x35, abs=0x2d, absx=0x3d,
absy=0x39, indx=0x21, indy=0x31)
opcodes['ASL'] = dict(acc=0x0a, imm=0x0a, zp=0x06, zpx=0x16, abs=0x0e,
absx=0x1e)
opcodes['BCC'] = dict(rel=0x90)
opcodes['BCS'] = dict(rel=0xb0)
opcodes['BEQ'] = dict(rel=0xf0)
opcodes['BIT'] = dict(zp=0x24, abs=0x2c)
opcodes['BMI'] = dict(rel=0x30)
opcodes['BNE'] = dict(rel=0xd0)
opcodes['BPL'] = dict(rel=0x10)
opcodes['BVC'] = dict(rel=0x50)
opcodes['BVS'] = dict(rel=0x70)
opcodes['CLC'] = dict(sngl=0x18)
opcodes['CLD'] = dict(sngl=0xd8)
opcodes['CLI'] = dict(sngl=0x58)
opcodes['CLV'] = dict(sngl=0xb8)
opcodes['CMP'] = dict(imm=0xc9, zp=0xc5, zpx=0xd5, abs=0xcd, absx=0xdd,
absy=0xd9, indx=0xc1, indy=0xd1)
opcodes['CPX'] = dict(imm=0xe0, zp=0xe4, abs=0xec)
opcodes['CPY'] = dict(imm=0xc0, zp=0xc4, abs=0xcc)
opcodes['DEC'] = dict(zp=0xc6, zpx=0xd6, abs=0xce, absx=0xde)
opcodes['DEX'] = dict(sngl=0xca)
opcodes['DEY'] = dict(sngl=0x88)
opcodes['EOR'] = dict(imm=0x49, zp=0x45, zpx=0x55, abs=0x4d, absx=0x5d,
absy=0x59, indx=0x41, indy=0x51)
opcodes['INC'] = dict(zp=0xe6, zpx=0xf6, abs=0xee, absx=0xfe)
opcodes['INX'] = dict(sngl=0xe8)
opcodes['INY'] = dict(sngl=0xc8)
opcodes['JMP'] = dict(abs=0x4c)
opcodes['JSR'] = dict(abs=0x20)
opcodes['LDA'] = dict(imm=0xa9, zp=0xa5, zpx=0xb5, abs=0xad, absx=0xbd,
absy=0xb9, indx=0xa1, indy=0xb1)
opcodes['LDX'] = dict(imm=0xa2, zp=0xa6, zpy=0xb6, abs=0xae, absy=0xbe)
opcodes['LDY'] = dict(imm=0xa0, zp=0xa4, zpx=0xb4, abs=0xac, absx=0xbc)
opcodes['LSR'] = dict(acc=0x4a, imm=0x4a, zp=0x46, zpx=0x56, abs=0x4e,
absx=0x5e)
opcodes['NOP'] = dict(sngl=0xea)
opcodes['ORA'] = dict(imm=0x09, zp=0x05, zpx=0x15, abs=0x0d, absx=0x1d,
absy=0x19, indx=0x01, indy=0x11)
opcodes['PHA'] = dict(sngl=0x48)
opcodes['PHP'] = dict(sngl=0x08)
opcodes['PLA'] = dict(sngl=0x68)
opcodes['PLP'] = dict(sngl=0x28)
opcodes['SBC'] = dict(imm=0xe9, zp=0xe5, zpx=0xf5, abs=0xed, absx=0xfd,
absy=0xf9, indx=0xe1, indy=0xf1)
opcodes['SEC'] = dict(sngl=0x38)
opcodes['SED'] = dict(sngl=0xf8)
opcodes['SEI'] = dict(sngl=0x78)
opcodes['STA'] = dict(zp=0x85, zpx=0x95, abs=0x8d, absx=0x9d, absy=0x99,
indx=0x81, indy=0x91)
opcodes['STX'] = dict(zp=0x86, zpy=0x96, abs=0x8e)
opcodes['STY'] = dict(zp=0x84, zpx=0x94, abs=0x8c)
opcodes['ROL'] = dict(imm=0x2a, zp=0x26, zpx=0x36, abs=0x2e, absx=0x3e)
opcodes['ROR'] = dict(imm=0x6a, zp=0x66, zpx=0x76, abs=0x6e, absx=0x7e)
opcodes['RTI'] = dict(sngl=0x40)
opcodes['RTS'] = dict(sngl=0x60)
opcodes['TAX'] = dict(sngl=0xaa)
opcodes['TAY'] = dict(sngl=0xa8)
opcodes['TSX'] = dict(sngl=0xba)
opcodes['TXA'] = dict(sngl=0x8a)
opcodes['TXS'] = dict(sngl=0x9a)
opcodes['TYA'] = dict(sngl=0x98)
|
shekkbuilder/pyNES
|
pynes/c6502.py
|
Python
|
bsd-3-clause
| 3,905
|
def get_tuple() -> tuple[str, ...]:
pass
<the_ref>get_tuple()
|
asedunov/intellij-community
|
python/testData/quickdoc/HomogeneousTuple.py
|
Python
|
apache-2.0
| 66
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""".. currentmodule:: migrate.versioning.util"""
import warnings
import logging
from decorator import decorator
from pkg_resources import EntryPoint
import six
from sqlalchemy import create_engine
from sqlalchemy.engine import Engine
from sqlalchemy.pool import StaticPool
from migrate import exceptions
from migrate.versioning.util.keyedinstance import KeyedInstance
from migrate.versioning.util.importpath import import_path
log = logging.getLogger(__name__)
def load_model(dotted_name):
"""Import module and use module-level variable".
:param dotted_name: path to model in form of string: ``some.python.module:Class``
.. versionchanged:: 0.5.4
"""
if isinstance(dotted_name, six.string_types):
if ':' not in dotted_name:
# backwards compatibility
warnings.warn('model should be in form of module.model:User '
'and not module.model.User', exceptions.MigrateDeprecationWarning)
dotted_name = ':'.join(dotted_name.rsplit('.', 1))
return EntryPoint.parse('x=%s' % dotted_name).load(False)
else:
# Assume it's already loaded.
return dotted_name
def asbool(obj):
"""Do everything to use object as bool"""
if isinstance(obj, six.string_types):
obj = obj.strip().lower()
if obj in ['true', 'yes', 'on', 'y', 't', '1']:
return True
elif obj in ['false', 'no', 'off', 'n', 'f', '0']:
return False
else:
raise ValueError("String is not true/false: %r" % obj)
if obj in (True, False):
return bool(obj)
else:
raise ValueError("String is not true/false: %r" % obj)
def guess_obj_type(obj):
"""Do everything to guess object type from string
Tries to convert to `int`, `bool` and finally returns if not succeded.
.. versionadded: 0.5.4
"""
result = None
try:
result = int(obj)
except:
pass
if result is None:
try:
result = asbool(obj)
except:
pass
if result is not None:
return result
else:
return obj
@decorator
def catch_known_errors(f, *a, **kw):
"""Decorator that catches known api errors
.. versionadded: 0.5.4
"""
try:
return f(*a, **kw)
except exceptions.PathFoundError as e:
raise exceptions.KnownError("The path %s already exists" % e.args[0])
def construct_engine(engine, **opts):
""".. versionadded:: 0.5.4
Constructs and returns SQLAlchemy engine.
Currently, there are 2 ways to pass create_engine options to :mod:`migrate.versioning.api` functions:
:param engine: connection string or a existing engine
:param engine_dict: python dictionary of options to pass to `create_engine`
:param engine_arg_*: keyword parameters to pass to `create_engine` (evaluated with :func:`migrate.versioning.util.guess_obj_type`)
:type engine_dict: dict
:type engine: string or Engine instance
:type engine_arg_*: string
:returns: SQLAlchemy Engine
.. note::
keyword parameters override ``engine_dict`` values.
"""
if isinstance(engine, Engine):
return engine
elif not isinstance(engine, six.string_types):
raise ValueError("you need to pass either an existing engine or a database uri")
# get options for create_engine
if opts.get('engine_dict') and isinstance(opts['engine_dict'], dict):
kwargs = opts['engine_dict']
else:
kwargs = dict()
# DEPRECATED: handle echo the old way
echo = asbool(opts.get('echo', False))
if echo:
warnings.warn('echo=True parameter is deprecated, pass '
'engine_arg_echo=True or engine_dict={"echo": True}',
exceptions.MigrateDeprecationWarning)
kwargs['echo'] = echo
# parse keyword arguments
for key, value in six.iteritems(opts):
if key.startswith('engine_arg_'):
kwargs[key[11:]] = guess_obj_type(value)
log.debug('Constructing engine')
# TODO: return create_engine(engine, poolclass=StaticPool, **kwargs)
# seems like 0.5.x branch does not work with engine.dispose and staticpool
return create_engine(engine, **kwargs)
@decorator
def with_engine(f, *a, **kw):
"""Decorator for :mod:`migrate.versioning.api` functions
to safely close resources after function usage.
Passes engine parameters to :func:`construct_engine` and
resulting parameter is available as kw['engine'].
Engine is disposed after wrapped function is executed.
.. versionadded: 0.6.0
"""
url = a[0]
engine = construct_engine(url, **kw)
try:
kw['engine'] = engine
return f(*a, **kw)
finally:
if isinstance(engine, Engine) and engine is not url:
log.debug('Disposing SQLAlchemy engine %s', engine)
engine.dispose()
class Memoize:
"""Memoize(fn) - an instance which acts like fn but memoizes its arguments
Will only work on functions with non-mutable arguments
ActiveState Code 52201
"""
def __init__(self, fn):
self.fn = fn
self.memo = {}
def __call__(self, *args):
if args not in self.memo:
self.memo[args] = self.fn(*args)
return self.memo[args]
|
odubno/microblog
|
venv/lib/python2.7/site-packages/migrate/versioning/util/__init__.py
|
Python
|
bsd-3-clause
| 5,339
|
"""The here_travel_time component."""
|
jawilson/home-assistant
|
homeassistant/components/here_travel_time/__init__.py
|
Python
|
apache-2.0
| 38
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp import api, fields, models
class res_partner(models.Model):
_name = 'res.partner'
_inherit = 'res.partner'
@api.multi
def _purchase_invoice_count(self):
PurchaseOrder = self.env['purchase.order']
Invoice = self.env['account.invoice']
for partner in self:
partner.purchase_order_count = PurchaseOrder.search_count([('partner_id', 'child_of', partner.id)])
partner.supplier_invoice_count = Invoice.search_count([('partner_id', 'child_of', partner.id), ('type', '=', 'in_invoice')])
@api.model
def _commercial_fields(self):
return super(res_partner, self)._commercial_fields()
property_purchase_currency_id = fields.Many2one(
'res.currency', string="Supplier Currency", company_dependent=True,
help="This currency will be used, instead of the default one, for purchases from the current partner")
purchase_order_count = fields.Integer(compute='_purchase_invoice_count', string='# of Purchase Order')
supplier_invoice_count = fields.Integer(compute='_purchase_invoice_count', string='# Vendor Bills')
|
vileopratama/vitech
|
src/addons/purchase/partner.py
|
Python
|
mit
| 1,222
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community"
}
DOCUMENTATION = '''
---
module: fmgr_ha
version_added: "2.8"
notes:
- Full Documentation at U(https://ftnt-ansible-docs.readthedocs.io/en/latest/).
author:
- Luke Weighall (@lweighall)
- Andrew Welsh (@Ghilli3)
- Jim Huber (@p4r4n0y1ng)
short_description: Manages the High-Availability State of FortiManager Clusters and Nodes.
description: Change HA state or settings of FortiManager nodes (Standalone/Master/Slave).
options:
fmgr_ha_mode:
description:
- Sets the role of the FortiManager host for HA.
required: false
choices: ["standalone", "master", "slave"]
fmgr_ha_peer_ipv4:
description:
- Sets the IPv4 address of a HA peer.
required: false
fmgr_ha_peer_ipv6:
description:
- Sets the IPv6 address of a HA peer.
required: false
fmgr_ha_peer_sn:
description:
- Sets the HA Peer Serial Number.
required: false
fmgr_ha_peer_status:
description:
- Sets the peer status to enable or disable.
required: false
choices: ["enable", "disable"]
fmgr_ha_cluster_pw:
description:
- Sets the password for the HA cluster. Only required once. System remembers between HA mode switches.
required: false
fmgr_ha_cluster_id:
description:
- Sets the ID number of the HA cluster. Defaults to 1.
required: false
default: 1
fmgr_ha_hb_threshold:
description:
- Sets heartbeat lost threshold (1-255).
required: false
default: 3
fmgr_ha_hb_interval:
description:
- Sets the heartbeat interval (1-255).
required: false
default: 5
fmgr_ha_file_quota:
description:
- Sets the File quota in MB (2048-20480).
required: false
default: 4096
'''
EXAMPLES = '''
- name: SET FORTIMANAGER HA NODE TO MASTER
fmgr_ha:
fmgr_ha_mode: "master"
fmgr_ha_cluster_pw: "fortinet"
fmgr_ha_cluster_id: "1"
- name: SET FORTIMANAGER HA NODE TO SLAVE
fmgr_ha:
fmgr_ha_mode: "slave"
fmgr_ha_cluster_pw: "fortinet"
fmgr_ha_cluster_id: "1"
- name: SET FORTIMANAGER HA NODE TO STANDALONE
fmgr_ha:
fmgr_ha_mode: "standalone"
- name: ADD FORTIMANAGER HA PEER
fmgr_ha:
fmgr_ha_peer_ipv4: "192.168.1.254"
fmgr_ha_peer_sn: "FMG-VM1234567890"
fmgr_ha_peer_status: "enable"
- name: CREATE CLUSTER ON MASTER
fmgr_ha:
fmgr_ha_mode: "master"
fmgr_ha_cluster_pw: "fortinet"
fmgr_ha_cluster_id: "1"
fmgr_ha_hb_threshold: "10"
fmgr_ha_hb_interval: "15"
fmgr_ha_file_quota: "2048"
'''
RETURN = """
api_result:
description: full API response, includes status code and message
returned: always
type: str
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortimanager.fortimanager import FortiManagerHandler
from ansible.module_utils.network.fortimanager.common import FMGBaseException
from ansible.module_utils.network.fortimanager.common import FMGRCommon
from ansible.module_utils.network.fortimanager.common import FMGRMethods
from ansible.module_utils.network.fortimanager.common import DEFAULT_RESULT_OBJ
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def fmgr_set_ha_mode(fmgr, paramgram):
"""
:param fmgr: The fmgr object instance from fortimanager.py
:type fmgr: class object
:param paramgram: The formatted dictionary of options to process
:type paramgram: dict
:return: The response from the FortiManager
:rtype: dict
"""
# INIT A BASIC OBJECTS
response = DEFAULT_RESULT_OBJ
url = ""
datagram = {}
if paramgram["fmgr_ha_cluster_pw"] is not None and str(paramgram["fmgr_ha_mode"].lower()) != "standalone":
datagram = {
"mode": paramgram["fmgr_ha_mode"],
"file-quota": paramgram["fmgr_ha_file_quota"],
"hb-interval": paramgram["fmgr_ha_hb_interval"],
"hb-lost-threshold": paramgram["fmgr_ha_hb_threshold"],
"password": paramgram["fmgr_ha_cluster_pw"],
"clusterid": paramgram["fmgr_ha_cluster_id"]
}
elif str(paramgram["fmgr_ha_mode"].lower()) == "standalone":
datagram = {
"mode": paramgram["fmgr_ha_mode"],
"file-quota": paramgram["fmgr_ha_file_quota"],
"hb-interval": paramgram["fmgr_ha_hb_interval"],
"hb-lost-threshold": paramgram["fmgr_ha_hb_threshold"],
"clusterid": paramgram["fmgr_ha_cluster_id"]
}
url = '/cli/global/system/ha'
response = fmgr.process_request(url, datagram, FMGRMethods.SET)
return response
def fmgr_get_ha_peer_list(fmgr):
"""
:param fmgr: The fmgr object instance from fortimanager.py
:type fmgr: class object
:param paramgram: The formatted dictionary of options to process
:type paramgram: dict
:return: The response from the FortiManager
:rtype: dict
"""
# INIT A BASIC OBJECTS
response = DEFAULT_RESULT_OBJ
datagram = {}
paramgram = {}
url = '/cli/global/system/ha/peer/'
response = fmgr.process_request(url, datagram, FMGRMethods.GET)
return response
def fmgr_set_ha_peer(fmgr, paramgram):
"""
:param fmgr: The fmgr object instance from fortimanager.py
:type fmgr: class object
:param paramgram: The formatted dictionary of options to process
:type paramgram: dict
:return: The response from the FortiManager
:rtype: dict
"""
datagram = {
"ip": paramgram["fmgr_ha_peer_ipv4"],
"ip6": paramgram["fmgr_ha_peer_ipv6"],
"serial-number": paramgram["fmgr_ha_peer_sn"],
"status": paramgram["fmgr_ha_peer_status"],
"id": paramgram["peer_id"]
}
url = '/cli/global/system/ha/peer/'
response = fmgr.process_request(url, datagram, FMGRMethods.SET)
return response
def main():
argument_spec = dict(
fmgr_ha_mode=dict(required=False, type="str", choices=["standalone", "master", "slave"]),
fmgr_ha_cluster_pw=dict(required=False, type="str", no_log=True),
fmgr_ha_peer_status=dict(required=False, type="str", choices=["enable", "disable"]),
fmgr_ha_peer_sn=dict(required=False, type="str"),
fmgr_ha_peer_ipv4=dict(required=False, type="str"),
fmgr_ha_peer_ipv6=dict(required=False, type="str"),
fmgr_ha_hb_threshold=dict(required=False, type="int", default=3),
fmgr_ha_hb_interval=dict(required=False, type="int", default=5),
fmgr_ha_file_quota=dict(required=False, type="int", default=4096),
fmgr_ha_cluster_id=dict(required=False, type="int", default=1)
)
required_if = [
['fmgr_ha_peer_ipv4', 'present', ['fmgr_ha_peer_sn', 'fmgr_ha_peer_status']],
['fmgr_ha_peer_ipv6', 'present', ['fmgr_ha_peer_sn', 'fmgr_ha_peer_status']],
['fmgr_ha_mode', 'master', ['fmgr_ha_cluster_pw', 'fmgr_ha_cluster_id']],
['fmgr_ha_mode', 'slave', ['fmgr_ha_cluster_pw', 'fmgr_ha_cluster_id']],
]
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, required_if=required_if)
paramgram = {
"fmgr_ha_mode": module.params["fmgr_ha_mode"],
"fmgr_ha_cluster_pw": module.params["fmgr_ha_cluster_pw"],
"fmgr_ha_peer_status": module.params["fmgr_ha_peer_status"],
"fmgr_ha_peer_sn": module.params["fmgr_ha_peer_sn"],
"fmgr_ha_peer_ipv4": module.params["fmgr_ha_peer_ipv4"],
"fmgr_ha_peer_ipv6": module.params["fmgr_ha_peer_ipv6"],
"fmgr_ha_hb_threshold": module.params["fmgr_ha_hb_threshold"],
"fmgr_ha_hb_interval": module.params["fmgr_ha_hb_interval"],
"fmgr_ha_file_quota": module.params["fmgr_ha_file_quota"],
"fmgr_ha_cluster_id": module.params["fmgr_ha_cluster_id"],
}
module.paramgram = paramgram
fmgr = None
if module._socket_path:
connection = Connection(module._socket_path)
fmgr = FortiManagerHandler(connection, module)
fmgr.tools = FMGRCommon()
else:
module.fail_json(**FAIL_SOCKET_MSG)
# INIT FLAGS AND COUNTERS
get_ha_peers = 0
results = DEFAULT_RESULT_OBJ
try:
if any(v is not None for v in (paramgram["fmgr_ha_peer_sn"], paramgram["fmgr_ha_peer_ipv4"],
paramgram["fmgr_ha_peer_ipv6"], paramgram["fmgr_ha_peer_status"])):
get_ha_peers = 1
except Exception as err:
raise FMGBaseException(err)
try:
# IF HA MODE IS NOT NULL, SWITCH THAT
if paramgram["fmgr_ha_mode"] is not None:
if (str.lower(paramgram["fmgr_ha_mode"]) != "standalone" and paramgram["fmgr_ha_cluster_pw"] is not None)\
or str.lower(paramgram["fmgr_ha_mode"]) == "standalone":
results = fmgr_set_ha_mode(fmgr, paramgram)
fmgr.govern_response(module=module, results=results, stop_on_success=False,
ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram))
elif str.lower(paramgram["fmgr_ha_mode"]) != "standalone" and\
paramgram["fmgr_ha_mode"] is not None and\
paramgram["fmgr_ha_cluster_pw"] is None:
module.exit_json(msg="If setting HA Mode of MASTER or SLAVE, you must specify a cluster password")
except Exception as err:
raise FMGBaseException(err)
# IF GET_HA_PEERS IS ENABLED, LETS PROCESS THE PEERS
try:
if get_ha_peers == 1:
# GET THE CURRENT LIST OF PEERS FROM THE NODE
peers = fmgr_get_ha_peer_list(fmgr)
# GET LENGTH OF RETURNED PEERS LIST AND ADD ONE FOR THE NEXT ID
paramgram["next_peer_id"] = len(peers[1]) + 1
# SET THE ACTUAL NUMBER OF PEERS
num_of_peers = len(peers[1])
# SET THE PEER ID FOR DISABLE METHOD
paramgram["peer_id"] = len(peers) - 1
# SET THE PEER LOOPCOUNT TO 1 TO START THE LOOP
peer_loopcount = 1
# LOOP THROUGH PEERS TO FIND THE SERIAL NUMBER MATCH TO GET THE RIGHT PEER ID
# IDEA BEING WE DON'T WANT TO SUBMIT A BAD peer_id THAT DOESN'T JIVE WITH CURRENT DB ON FMG
# SO LETS SEARCH FOR IT, AND IF WE FIND IT, WE WILL CHANGE THE PEER ID VARIABLES TO MATCH
# IF NOT FOUND, LIFE GOES ON AND WE ASSUME THAT WE'RE ADDING A PEER
# AT WHICH POINT THE next_peer_id VARIABLE WILL HAVE THE RIGHT PRIMARY KEY
if paramgram["fmgr_ha_peer_sn"] is not None:
while peer_loopcount <= num_of_peers:
# GET THE SERIAL NUMBER FOR CURRENT PEER IN LOOP TO COMPARE TO SN IN PLAYBOOK
try:
sn_compare = peers[1][peer_loopcount - 1]["serial-number"]
# IF THE SN IN THE PEERS MATCHES THE PLAYBOOK SN, SET THE IDS
if sn_compare == paramgram["fmgr_ha_peer_sn"]:
paramgram["peer_id"] = peer_loopcount
paramgram["next_peer_id"] = paramgram["peer_id"]
except Exception as err:
raise FMGBaseException(err)
# ADVANCE THE LOOP AND REPEAT UNTIL DONE
peer_loopcount += 1
# IF THE PEER STATUS ISN'T IN THE PLAYBOOK, ASSUME ITS ENABLE
if paramgram["fmgr_ha_peer_status"] is None:
paramgram["fmgr_ha_peer_status"] = "enable"
# IF THE PEER STATUS IS ENABLE, USE THE next_peer_id IN THE API CALL FOR THE ID
if paramgram["fmgr_ha_peer_status"] == "enable":
results = fmgr_set_ha_peer(fmgr, paramgram)
fmgr.govern_response(module=module, results=results, stop_on_success=True,
ansible_facts=fmgr.construct_ansible_facts(results,
module.params, paramgram))
# IF THE PEER STATUS IS DISABLE, WE HAVE TO HANDLE THAT A BIT DIFFERENTLY
# JUST USING TWO DIFFERENT peer_id 's HERE
if paramgram["fmgr_ha_peer_status"] == "disable":
results = fmgr_set_ha_peer(fmgr, paramgram)
fmgr.govern_response(module=module, results=results, stop_on_success=True,
ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram))
except Exception as err:
raise FMGBaseException(err)
return module.exit_json(**results[1])
if __name__ == "__main__":
main()
|
thaim/ansible
|
lib/ansible/modules/network/fortimanager/fmgr_ha.py
|
Python
|
mit
| 13,484
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: s3
short_description: manage objects in S3.
description:
- This module allows the user to manage S3 buckets and the objects within them. Includes support for creating and deleting both objects and buckets, retrieving objects as files or strings and generating download links. This module has a dependency on python-boto.
version_added: "1.1"
options:
aws_access_key:
description:
- AWS access key id. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
default: null
aliases: ['ec2_secret_key', 'secret_key']
bucket:
description:
- Bucket name.
required: true
default: null
aliases: []
dest:
description:
- The destination file path when downloading an object/key with a GET operation.
required: false
aliases: []
version_added: "1.3"
encrypt:
description:
- When set for PUT mode, asks for server-side encryption
required: false
default: no
version_added: "2.0"
expiration:
description:
- Time limit (in seconds) for the URL generated and returned by S3/Walrus when performing a mode=put or mode=geturl operation.
required: false
default: 600
aliases: []
headers:
description:
- Custom headers for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'.
required: false
default: null
version_added: "2.0"
marker:
description:
- Specifies the key to start with when using list mode. Object keys are returned in alphabetical order, starting with key after the marker in order.
required: false
default: null
version_added: "2.0"
max_keys:
description:
- Max number of results to return in list mode, set this if you want to retrieve fewer than the default 1000 keys.
required: false
default: 1000
version_added: "2.0"
metadata:
description:
- Metadata for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'.
required: false
default: null
version_added: "1.6"
mode:
description:
- Switches the module behaviour between put (upload), get (download), geturl (return download url (Ansible 1.3+), getstr (download object as string (1.3+)), list (list keys (2.0+)), create (bucket), delete (bucket), and delobj (delete object).
required: true
default: null
aliases: []
object:
description:
- Keyname of the object inside the bucket. Can be used to create "virtual directories", see examples.
required: false
default: null
permission:
description:
- This option let's the user set the canned permissions on the object/bucket that are created. The permissions that can be set are 'private', 'public-read', 'public-read-write', 'authenticated-read'. Multiple permissions can be specified as a list.
required: false
default: private
version_added: "2.0"
prefix:
description:
- Limits the response to keys that begin with the specified prefix for list mode
required: false
default: null
version_added: "2.0"
version:
description:
- Version ID of the object inside the bucket. Can be used to get a specific version of a file if versioning is enabled in the target bucket.
required: false
default: null
aliases: []
version_added: "2.0"
overwrite:
description:
- Force overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations.
required: false
default: true
version_added: "1.2"
region:
description:
- "AWS region to create the bucket in. If not set then the value of the AWS_REGION and EC2_REGION environment variables are checked, followed by the aws_region and ec2_region settings in the Boto config file. If none of those are set the region defaults to the S3 Location: US Standard. Prior to ansible 1.8 this parameter could be specified but had no effect."
required: false
default: null
version_added: "1.8"
retries:
description:
- On recoverable failure, how many times to retry before actually failing.
required: false
default: 0
version_added: "2.0"
s3_url:
description:
- S3 URL endpoint for usage with Eucalypus, fakes3, etc. Otherwise assumes AWS
default: null
aliases: [ S3_URL ]
src:
description:
- The source file path when performing a PUT operation.
required: false
default: null
aliases: []
version_added: "1.3"
requirements: [ "boto" ]
author:
- "Lester Wade (@lwade)"
- "Ralph Tice (@ralph-tice)"
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Simple PUT operation
- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put
# Simple GET operation
- s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get
# Get a specific version of an object.
- s3: bucket=mybucket object=/my/desired/key.txt version=48c9ee5131af7a716edc22df9772aa6f dest=/usr/local/myfile.txt mode=get
# PUT/upload with metadata
- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put metadata='Content-Encoding=gzip,Cache-Control=no-cache'
# PUT/upload with custom headers
- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put headers=x-amz-grant-full-control=emailAddress=owner@example.com
# List keys simple
- s3: bucket=mybucket mode=list
# List keys all options
- s3: bucket=mybucket mode=list prefix=/my/desired/ marker=/my/desired/0023.txt max_keys=472
# Create an empty bucket
- s3: bucket=mybucket mode=create permission=public-read
# Create a bucket with key as directory, in the EU region
- s3: bucket=mybucket object=/my/directory/path mode=create region=eu-west-1
# Delete a bucket and all contents
- s3: bucket=mybucket mode=delete
# GET an object but dont download if the file checksums match
- s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get overwrite=different
# Delete an object from a bucket
- s3: bucket=mybucket object=/my/desired/key.txt mode=delobj
'''
import os
import urlparse
from ssl import SSLError
try:
import boto
import boto.ec2
from boto.s3.connection import Location
from boto.s3.connection import OrdinaryCallingFormat
from boto.s3.connection import S3Connection
from boto.s3.acl import CannedACLStrings
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def key_check(module, s3, bucket, obj, version=None):
try:
bucket = s3.lookup(bucket)
key_check = bucket.get_key(obj, version_id=version)
except s3.provider.storage_response_error, e:
if version is not None and e.status == 400: # If a specified version doesn't exist a 400 is returned.
key_check = None
else:
module.fail_json(msg=str(e))
if key_check:
return True
else:
return False
def keysum(module, s3, bucket, obj, version=None):
bucket = s3.lookup(bucket)
key_check = bucket.get_key(obj, version_id=version)
if not key_check:
return None
md5_remote = key_check.etag[1:-1]
etag_multipart = '-' in md5_remote # Check for multipart, etag is not md5
if etag_multipart is True:
module.fail_json(msg="Files uploaded with multipart of s3 are not supported with checksum, unable to compute checksum.")
return md5_remote
def bucket_check(module, s3, bucket):
try:
result = s3.lookup(bucket)
except s3.provider.storage_response_error, e:
module.fail_json(msg= str(e))
if result:
return True
else:
return False
def create_bucket(module, s3, bucket, location=None):
if location is None:
location = Location.DEFAULT
try:
bucket = s3.create_bucket(bucket, location=location)
for acl in module.params.get('permission'):
bucket.set_acl(acl)
except s3.provider.storage_response_error, e:
module.fail_json(msg= str(e))
if bucket:
return True
def get_bucket(module, s3, bucket):
try:
return s3.lookup(bucket)
except s3.provider.storage_response_error, e:
module.fail_json(msg= str(e))
def list_keys(module, bucket_object, prefix, marker, max_keys):
all_keys = bucket_object.get_all_keys(prefix=prefix, marker=marker, max_keys=max_keys)
keys = [x.key for x in all_keys]
module.exit_json(msg="LIST operation complete", s3_keys=keys)
def delete_bucket(module, s3, bucket):
try:
bucket = s3.lookup(bucket)
bucket_contents = bucket.list()
bucket.delete_keys([key.name for key in bucket_contents])
bucket.delete()
return True
except s3.provider.storage_response_error, e:
module.fail_json(msg= str(e))
def delete_key(module, s3, bucket, obj):
try:
bucket = s3.lookup(bucket)
bucket.delete_key(obj)
module.exit_json(msg="Object deleted from bucket %s"%bucket, changed=True)
except s3.provider.storage_response_error, e:
module.fail_json(msg= str(e))
def create_dirkey(module, s3, bucket, obj):
try:
bucket = s3.lookup(bucket)
key = bucket.new_key(obj)
key.set_contents_from_string('')
module.exit_json(msg="Virtual directory %s created in bucket %s" % (obj, bucket.name), changed=True)
except s3.provider.storage_response_error, e:
module.fail_json(msg= str(e))
def path_check(path):
if os.path.exists(path):
return True
else:
return False
def upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers):
try:
bucket = s3.lookup(bucket)
key = bucket.new_key(obj)
if metadata:
for meta_key in metadata.keys():
key.set_metadata(meta_key, metadata[meta_key])
key.set_contents_from_filename(src, encrypt_key=encrypt, headers=headers)
for acl in module.params.get('permission'):
key.set_acl(acl)
url = key.generate_url(expiry)
module.exit_json(msg="PUT operation complete", url=url, changed=True)
except s3.provider.storage_copy_error, e:
module.fail_json(msg= str(e))
def download_s3file(module, s3, bucket, obj, dest, retries, version=None):
# retries is the number of loops; range/xrange needs to be one
# more to get that count of loops.
bucket = s3.lookup(bucket)
key = bucket.get_key(obj, version_id=version)
for x in range(0, retries + 1):
try:
key.get_contents_to_filename(dest)
module.exit_json(msg="GET operation complete", changed=True)
except s3.provider.storage_copy_error, e:
module.fail_json(msg= str(e))
except SSLError as e:
# actually fail on last pass through the loop.
if x >= retries:
module.fail_json(msg="s3 download failed; %s" % e)
# otherwise, try again, this may be a transient timeout.
pass
def download_s3str(module, s3, bucket, obj, version=None):
try:
bucket = s3.lookup(bucket)
key = bucket.get_key(obj, version_id=version)
contents = key.get_contents_as_string()
module.exit_json(msg="GET operation complete", contents=contents, changed=True)
except s3.provider.storage_copy_error, e:
module.fail_json(msg= str(e))
def get_download_url(module, s3, bucket, obj, expiry, changed=True):
try:
bucket = s3.lookup(bucket)
key = bucket.lookup(obj)
url = key.generate_url(expiry)
module.exit_json(msg="Download url:", url=url, expiry=expiry, changed=changed)
except s3.provider.storage_response_error, e:
module.fail_json(msg= str(e))
def is_fakes3(s3_url):
""" Return True if s3_url has scheme fakes3:// """
if s3_url is not None:
return urlparse.urlparse(s3_url).scheme in ('fakes3', 'fakes3s')
else:
return False
def is_walrus(s3_url):
""" Return True if it's Walrus endpoint, not S3
We assume anything other than *.amazonaws.com is Walrus"""
if s3_url is not None:
o = urlparse.urlparse(s3_url)
return not o.hostname.endswith('amazonaws.com')
else:
return False
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
bucket = dict(required=True),
dest = dict(default=None),
encrypt = dict(default=True, type='bool'),
expiry = dict(default=600, aliases=['expiration']),
headers = dict(type='dict'),
marker = dict(default=None),
max_keys = dict(default=1000),
metadata = dict(type='dict'),
mode = dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list'], required=True),
object = dict(),
permission = dict(type='list', default=['private']),
version = dict(default=None),
overwrite = dict(aliases=['force'], default='always'),
prefix = dict(default=None),
retries = dict(aliases=['retry'], type='int', default=0),
s3_url = dict(aliases=['S3_URL']),
src = dict(),
),
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
bucket = module.params.get('bucket')
encrypt = module.params.get('encrypt')
expiry = int(module.params['expiry'])
if module.params.get('dest'):
dest = os.path.expanduser(module.params.get('dest'))
headers = module.params.get('headers')
marker = module.params.get('marker')
max_keys = module.params.get('max_keys')
metadata = module.params.get('metadata')
mode = module.params.get('mode')
obj = module.params.get('object')
version = module.params.get('version')
overwrite = module.params.get('overwrite')
prefix = module.params.get('prefix')
retries = module.params.get('retries')
s3_url = module.params.get('s3_url')
src = module.params.get('src')
for acl in module.params.get('permission'):
if acl not in CannedACLStrings:
module.fail_json(msg='Unknown permission specified: %s' % str(acl))
if overwrite not in ['always', 'never', 'different']:
if module.boolean(overwrite):
overwrite = 'always'
else:
overwrite='never'
if overwrite not in ['always', 'never', 'different']:
if module.boolean(overwrite):
overwrite = 'always'
else:
overwrite='never'
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
if region in ('us-east-1', '', None):
# S3ism for the US Standard region
location = Location.DEFAULT
else:
# Boto uses symbolic names for locations but region strings will
# actually work fine for everything except us-east-1 (US Standard)
location = region
if module.params.get('object'):
obj = os.path.expanduser(module.params['object'])
# allow eucarc environment variables to be used if ansible vars aren't set
if not s3_url and 'S3_URL' in os.environ:
s3_url = os.environ['S3_URL']
# bucket names with .'s in them need to use the calling_format option,
# otherwise the connection will fail. See https://github.com/boto/boto/issues/2836
# for more details.
if '.' in bucket:
aws_connect_kwargs['calling_format'] = OrdinaryCallingFormat()
# Look at s3_url and tweak connection settings
# if connecting to Walrus or fakes3
try:
if is_fakes3(s3_url):
fakes3 = urlparse.urlparse(s3_url)
s3 = S3Connection(
is_secure=fakes3.scheme == 'fakes3s',
host=fakes3.hostname,
port=fakes3.port,
calling_format=OrdinaryCallingFormat(),
**aws_connect_kwargs
)
elif is_walrus(s3_url):
walrus = urlparse.urlparse(s3_url).hostname
s3 = boto.connect_walrus(walrus, **aws_connect_kwargs)
else:
s3 = boto.s3.connect_to_region(location, is_secure=True, **aws_connect_kwargs)
# use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases
if s3 is None:
s3 = boto.connect_s3(**aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg='No Authentication Handler found: %s ' % str(e))
except Exception, e:
module.fail_json(msg='Failed to connect to S3: %s' % str(e))
if s3 is None: # this should never happen
module.fail_json(msg ='Unknown error, failed to create s3 connection, no information from boto.')
# If our mode is a GET operation (download), go through the procedure as appropriate ...
if mode == 'get':
# First, we check to see if the bucket exists, we get "bucket" returned.
bucketrtn = bucket_check(module, s3, bucket)
if bucketrtn is False:
module.fail_json(msg="Target bucket cannot be found", failed=True)
# Next, we check to see if the key in the bucket exists. If it exists, it also returns key_matches md5sum check.
keyrtn = key_check(module, s3, bucket, obj, version=version)
if keyrtn is False:
if version is not None:
module.fail_json(msg="Key %s with version id %s does not exist."% (obj, version), failed=True)
else:
module.fail_json(msg="Key %s does not exist."%obj, failed=True)
# If the destination path doesn't exist or overwrite is True, no need to do the md5um etag check, so just download.
pathrtn = path_check(dest)
if pathrtn is False or overwrite == 'always':
download_s3file(module, s3, bucket, obj, dest, retries, version=version)
# Compare the remote MD5 sum of the object with the local dest md5sum, if it already exists.
if pathrtn is True:
md5_remote = keysum(module, s3, bucket, obj, version=version)
md5_local = module.md5(dest)
if md5_local == md5_remote:
sum_matches = True
if overwrite == 'always':
download_s3file(module, s3, bucket, obj, dest, retries, version=version)
else:
module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite=always parameter to force.", changed=False)
else:
sum_matches = False
if overwrite in ('always', 'different'):
download_s3file(module, s3, bucket, obj, dest, retries, version=version)
else:
module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force download.")
# Firstly, if key_matches is TRUE and overwrite is not enabled, we EXIT with a helpful message.
if sum_matches is True and overwrite == 'never':
module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite parameter to force.", changed=False)
# if our mode is a PUT operation (upload), go through the procedure as appropriate ...
if mode == 'put':
# Use this snippet to debug through conditionals:
# module.exit_json(msg="Bucket return %s"%bucketrtn)
# sys.exit(0)
# Lets check the src path.
pathrtn = path_check(src)
if pathrtn is False:
module.fail_json(msg="Local object for PUT does not exist", failed=True)
# Lets check to see if bucket exists to get ground truth.
bucketrtn = bucket_check(module, s3, bucket)
if bucketrtn is True:
keyrtn = key_check(module, s3, bucket, obj)
# Lets check key state. Does it exist and if it does, compute the etag md5sum.
if bucketrtn is True and keyrtn is True:
md5_remote = keysum(module, s3, bucket, obj)
md5_local = module.md5(src)
if md5_local == md5_remote:
sum_matches = True
if overwrite == 'always':
upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)
else:
get_download_url(module, s3, bucket, obj, expiry, changed=False)
else:
sum_matches = False
if overwrite in ('always', 'different'):
upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)
else:
module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.")
# If neither exist (based on bucket existence), we can create both.
if bucketrtn is False and pathrtn is True:
create_bucket(module, s3, bucket, location)
upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)
# If bucket exists but key doesn't, just upload.
if bucketrtn is True and pathrtn is True and keyrtn is False:
upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)
# Delete an object from a bucket, not the entire bucket
if mode == 'delobj':
if obj is None:
module.fail_json(msg="object parameter is required", failed=True);
if bucket:
bucketrtn = bucket_check(module, s3, bucket)
if bucketrtn is True:
deletertn = delete_key(module, s3, bucket, obj)
if deletertn is True:
module.exit_json(msg="Object %s deleted from bucket %s." % (obj, bucket), changed=True)
else:
module.fail_json(msg="Bucket does not exist.", changed=False)
else:
module.fail_json(msg="Bucket parameter is required.", failed=True)
# Delete an entire bucket, including all objects in the bucket
if mode == 'delete':
if bucket:
bucketrtn = bucket_check(module, s3, bucket)
if bucketrtn is True:
deletertn = delete_bucket(module, s3, bucket)
if deletertn is True:
module.exit_json(msg="Bucket %s and all keys have been deleted."%bucket, changed=True)
else:
module.fail_json(msg="Bucket does not exist.", changed=False)
else:
module.fail_json(msg="Bucket parameter is required.", failed=True)
# Support for listing a set of keys
if mode == 'list':
bucket_object = get_bucket(module, s3, bucket)
# If the bucket does not exist then bail out
if bucket_object is None:
module.fail_json(msg="Target bucket (%s) cannot be found"% bucket, failed=True)
list_keys(module, bucket_object, prefix, marker, max_keys)
# Need to research how to create directories without "populating" a key, so this should just do bucket creation for now.
# WE SHOULD ENABLE SOME WAY OF CREATING AN EMPTY KEY TO CREATE "DIRECTORY" STRUCTURE, AWS CONSOLE DOES THIS.
if mode == 'create':
if bucket and not obj:
bucketrtn = bucket_check(module, s3, bucket)
if bucketrtn is True:
module.exit_json(msg="Bucket already exists.", changed=False)
else:
module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, s3, bucket, location))
if bucket and obj:
bucketrtn = bucket_check(module, s3, bucket)
if obj.endswith('/'):
dirobj = obj
else:
dirobj = obj + "/"
if bucketrtn is True:
keyrtn = key_check(module, s3, bucket, dirobj)
if keyrtn is True:
module.exit_json(msg="Bucket %s and key %s already exists."% (bucket, obj), changed=False)
else:
create_dirkey(module, s3, bucket, dirobj)
if bucketrtn is False:
created = create_bucket(module, s3, bucket, location)
create_dirkey(module, s3, bucket, dirobj)
# Support for grabbing the time-expired URL for an object in S3/Walrus.
if mode == 'geturl':
if bucket and obj:
bucketrtn = bucket_check(module, s3, bucket)
if bucketrtn is False:
module.fail_json(msg="Bucket %s does not exist."%bucket, failed=True)
else:
keyrtn = key_check(module, s3, bucket, obj)
if keyrtn is True:
get_download_url(module, s3, bucket, obj, expiry)
else:
module.fail_json(msg="Key %s does not exist."%obj, failed=True)
else:
module.fail_json(msg="Bucket and Object parameters must be set", failed=True)
if mode == 'getstr':
if bucket and obj:
bucketrtn = bucket_check(module, s3, bucket)
if bucketrtn is False:
module.fail_json(msg="Bucket %s does not exist."%bucket, failed=True)
else:
keyrtn = key_check(module, s3, bucket, obj, version=version)
if keyrtn is True:
download_s3str(module, s3, bucket, obj, version=version)
else:
if version is not None:
module.fail_json(msg="Key %s with version id %s does not exist."% (obj, version), failed=True)
else:
module.fail_json(msg="Key %s does not exist."%obj, failed=True)
module.exit_json(failed=False)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
|
garyjyao1/ansible
|
lib/ansible/modules/core/cloud/amazon/s3.py
|
Python
|
gpl-3.0
| 26,908
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp import tools
from openerp.osv import osv
from openerp.osv import fields
from openerp.tools.translate import _
class invite_wizard(osv.osv_memory):
""" Wizard to invite partners and make them followers. """
_name = 'mail.wizard.invite'
_description = 'Invite wizard'
def default_get(self, cr, uid, fields, context=None):
result = super(invite_wizard, self).default_get(cr, uid, fields, context=context)
user_name = self.pool.get('res.users').name_get(cr, uid, [uid], context=context)[0][1]
model = result.get('res_model')
res_id = result.get('res_id')
if 'message' in fields and model and res_id:
ir_model = self.pool.get('ir.model')
model_ids = ir_model.search(cr, uid, [('model', '=', self.pool[model]._name)], context=context)
model_name = ir_model.name_get(cr, uid, model_ids, context=context)[0][1]
document_name = self.pool[model].name_get(cr, uid, [res_id], context=context)[0][1]
message = _('<div><p>Hello,</p><p>%s invited you to follow %s document: %s.<p></div>') % (user_name, model_name, document_name)
result['message'] = message
elif 'message' in fields:
result['message'] = _('<div><p>Hello,</p><p>%s invited you to follow a new document.</p></div>') % user_name
return result
_columns = {
'res_model': fields.char('Related Document Model', size=128,
required=True, select=1,
help='Model of the followed resource'),
'res_id': fields.integer('Related Document ID', select=1,
help='Id of the followed resource'),
'partner_ids': fields.many2many('res.partner', string='Recipients',
help="List of partners that will be added as follower of the current document."),
'message': fields.html('Message'),
'send_mail': fields.boolean('Send Email',
help="If checked, the partners will receive an email warning they have been "
"added in the document's followers."),
}
_defaults = {
'send_mail' : True,
}
def add_followers(self, cr, uid, ids, context=None):
for wizard in self.browse(cr, uid, ids, context=context):
model_obj = self.pool[wizard.res_model]
document = model_obj.browse(cr, uid, wizard.res_id, context=context)
# filter partner_ids to get the new followers, to avoid sending email to already following partners
new_follower_ids = [p.id for p in wizard.partner_ids if p not in document.message_follower_ids]
model_obj.message_subscribe(cr, uid, [wizard.res_id], new_follower_ids, context=context)
ir_model = self.pool.get('ir.model')
model_ids = ir_model.search(cr, uid, [('model', '=', model_obj._name)], context=context)
model_name = ir_model.name_get(cr, uid, model_ids, context=context)[0][1]
# send an email if option checked and if a message exists (do not send void emails)
if wizard.send_mail and wizard.message and not wizard.message == '<br>': # when deleting the message, cleditor keeps a <br>
# add signature
# FIXME 8.0: use notification_email_send, send a wall message and let mail handle email notification + message box
signature_company = self.pool.get('mail.notification').get_signature_footer(cr, uid, user_id=uid, res_model=wizard.res_model, res_id=wizard.res_id, context=context)
wizard.message = tools.append_content_to_html(wizard.message, signature_company, plaintext=False, container_tag='div')
# send mail to new followers
# the invite wizard should create a private message not related to any object -> no model, no res_id
mail_mail = self.pool.get('mail.mail')
mail_id = mail_mail.create(cr, uid, {
'model': wizard.res_model,
'res_id': wizard.res_id,
'subject': _('Invitation to follow %s: %s') % (model_name, document.name_get()[0][1]),
'body_html': '%s' % wizard.message,
'auto_delete': True,
'recipient_ids': [(4, id) for id in new_follower_ids]
}, context=context)
mail_mail.send(cr, uid, [mail_id], context=context)
return {'type': 'ir.actions.act_window_close'}
|
jmesteve/saas3
|
openerp/addons/mail/wizard/invite.py
|
Python
|
agpl-3.0
| 5,467
|
# -*- coding: utf-8 -*-
"""
jinja2
~~~~~~
Jinja2 is a template engine written in pure Python. It provides a
Django inspired non-XML syntax but supports inline expressions and
an optional sandboxed environment.
Nutshell
--------
Here a small example of a Jinja2 template::
{% extends 'base.html' %}
{% block title %}Memberlist{% endblock %}
{% block content %}
<ul>
{% for user in users %}
<li><a href="{{ user.url }}">{{ user.username }}</a></li>
{% endfor %}
</ul>
{% endblock %}
:copyright: (c) 2017 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
__docformat__ = 'restructuredtext en'
__version__ = '2.9.5'
# high level interface
from jinja2.environment import Environment, Template
# loaders
from jinja2.loaders import BaseLoader, FileSystemLoader, PackageLoader, \
DictLoader, FunctionLoader, PrefixLoader, ChoiceLoader, \
ModuleLoader
# bytecode caches
from jinja2.bccache import BytecodeCache, FileSystemBytecodeCache, \
MemcachedBytecodeCache
# undefined types
from jinja2.runtime import Undefined, DebugUndefined, StrictUndefined, \
make_logging_undefined
# exceptions
from jinja2.exceptions import TemplateError, UndefinedError, \
TemplateNotFound, TemplatesNotFound, TemplateSyntaxError, \
TemplateAssertionError
# decorators and public utilities
from jinja2.filters import environmentfilter, contextfilter, \
evalcontextfilter
from jinja2.utils import Markup, escape, clear_caches, \
environmentfunction, evalcontextfunction, contextfunction, \
is_undefined, select_autoescape
__all__ = [
'Environment', 'Template', 'BaseLoader', 'FileSystemLoader',
'PackageLoader', 'DictLoader', 'FunctionLoader', 'PrefixLoader',
'ChoiceLoader', 'BytecodeCache', 'FileSystemBytecodeCache',
'MemcachedBytecodeCache', 'Undefined', 'DebugUndefined',
'StrictUndefined', 'TemplateError', 'UndefinedError', 'TemplateNotFound',
'TemplatesNotFound', 'TemplateSyntaxError', 'TemplateAssertionError',
'ModuleLoader', 'environmentfilter', 'contextfilter', 'Markup', 'escape',
'environmentfunction', 'contextfunction', 'clear_caches', 'is_undefined',
'evalcontextfilter', 'evalcontextfunction', 'make_logging_undefined',
'select_autoescape',
]
def _patch_async():
from jinja2.utils import have_async_gen
if have_async_gen:
from jinja2.asyncsupport import patch_all
patch_all()
_patch_async()
del _patch_async
|
sdoran35/hate-to-hugs
|
venv/lib/python3.6/site-packages/jinja2/__init__.py
|
Python
|
mit
| 2,565
|
# -*- coding: utf-8 -*-
from xml.etree import ElementTree
from openerp.addons.web.controllers.main import load_actions_from_ir_values
from openerp.addons.web.http import Controller, route, request
class Board(Controller):
@route('/board/add_to_dashboard', type='json', auth='user')
def add_to_dashboard(self, menu_id, action_id, context_to_save, domain, view_mode, name=''):
# FIXME move this method to board.board model
dashboard_action = load_actions_from_ir_values('action', 'tree_but_open',
[('ir.ui.menu', menu_id)], False)
if dashboard_action:
action = dashboard_action[0][2]
if action['res_model'] == 'board.board' and action['views'][0][1] == 'form':
# Maybe should check the content instead of model board.board ?
view_id = action['views'][0][0]
board = request.session.model(action['res_model']).fields_view_get(view_id, 'form')
if board and 'arch' in board:
xml = ElementTree.fromstring(board['arch'])
column = xml.find('./board/column')
if column is not None:
new_action = ElementTree.Element('action', {
'name': str(action_id),
'string': name,
'view_mode': view_mode,
'context': str(context_to_save),
'domain': str(domain)
})
column.insert(0, new_action)
arch = ElementTree.tostring(xml, 'utf-8')
return request.session.model('ir.ui.view.custom').create({
'user_id': request.session.uid,
'ref_id': view_id,
'arch': arch
}, request.context)
return False
|
jmesteve/saas3
|
openerp/addons/board/controllers.py
|
Python
|
agpl-3.0
| 1,981
|
# Default Django settings. Override these with settings in the module
# pointed-to by the DJANGO_SETTINGS_MODULE environment variable.
# This is defined here as a do-nothing function because we can't import
# django.utils.translation -- that module depends on the settings.
gettext_noop = lambda s: s
####################
# CORE #
####################
DEBUG = False
TEMPLATE_DEBUG = False
# Whether the framework should propagate raw exceptions rather than catching
# them. This is useful under some testing situations and should never be used
# on a live site.
DEBUG_PROPAGATE_EXCEPTIONS = False
# Whether to use the "Etag" header. This saves bandwidth but slows down performance.
USE_ETAGS = False
# People who get code error notifications.
# In the format (('Full Name', 'email@example.com'), ('Full Name', 'anotheremail@example.com'))
ADMINS = ()
# Tuple of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = ()
# Hosts/domain names that are valid for this site.
# "*" matches anything, ".example.com" matches example.com and all subdomains
ALLOWED_HOSTS = []
# Local time zone for this installation. All choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all
# systems may support all possibilities). When USE_TZ is True, this is
# interpreted as the default user time zone.
TIME_ZONE = 'America/Chicago'
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = False
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# Languages we provide translations for, out of the box.
LANGUAGES = (
('af', gettext_noop('Afrikaans')),
('ar', gettext_noop('Arabic')),
('ast', gettext_noop('Asturian')),
('az', gettext_noop('Azerbaijani')),
('bg', gettext_noop('Bulgarian')),
('be', gettext_noop('Belarusian')),
('bn', gettext_noop('Bengali')),
('br', gettext_noop('Breton')),
('bs', gettext_noop('Bosnian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('en-au', gettext_noop('Australian English')),
('en-gb', gettext_noop('British English')),
('eo', gettext_noop('Esperanto')),
('es', gettext_noop('Spanish')),
('es-ar', gettext_noop('Argentinian Spanish')),
('es-mx', gettext_noop('Mexican Spanish')),
('es-ni', gettext_noop('Nicaraguan Spanish')),
('es-ve', gettext_noop('Venezuelan Spanish')),
('et', gettext_noop('Estonian')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('fy', gettext_noop('Frisian')),
('ga', gettext_noop('Irish')),
('gl', gettext_noop('Galician')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('hu', gettext_noop('Hungarian')),
('ia', gettext_noop('Interlingua')),
('id', gettext_noop('Indonesian')),
('io', gettext_noop('Ido')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('kk', gettext_noop('Kazakh')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('ko', gettext_noop('Korean')),
('lb', gettext_noop('Luxembourgish')),
('lt', gettext_noop('Lithuanian')),
('lv', gettext_noop('Latvian')),
('mk', gettext_noop('Macedonian')),
('ml', gettext_noop('Malayalam')),
('mn', gettext_noop('Mongolian')),
('mr', gettext_noop('Marathi')),
('my', gettext_noop('Burmese')),
('nb', gettext_noop('Norwegian Bokmal')),
('ne', gettext_noop('Nepali')),
('nl', gettext_noop('Dutch')),
('nn', gettext_noop('Norwegian Nynorsk')),
('os', gettext_noop('Ossetic')),
('pa', gettext_noop('Punjabi')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sq', gettext_noop('Albanian')),
('sr', gettext_noop('Serbian')),
('sr-latn', gettext_noop('Serbian Latin')),
('sv', gettext_noop('Swedish')),
('sw', gettext_noop('Swahili')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('th', gettext_noop('Thai')),
('tr', gettext_noop('Turkish')),
('tt', gettext_noop('Tatar')),
('udm', gettext_noop('Udmurt')),
('uk', gettext_noop('Ukrainian')),
('ur', gettext_noop('Urdu')),
('vi', gettext_noop('Vietnamese')),
('zh-cn', gettext_noop('Simplified Chinese')),
('zh-hans', gettext_noop('Simplified Chinese')),
('zh-hant', gettext_noop('Traditional Chinese')),
('zh-tw', gettext_noop('Traditional Chinese')),
)
# Languages using BiDi (right-to-left) layout
LANGUAGES_BIDI = ("he", "ar", "fa", "ur")
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
LOCALE_PATHS = ()
# Settings for language cookie
LANGUAGE_COOKIE_NAME = 'django_language'
LANGUAGE_COOKIE_AGE = None
LANGUAGE_COOKIE_DOMAIN = None
LANGUAGE_COOKIE_PATH = '/'
# If you set this to True, Django will format dates, numbers and calendars
# according to user current locale.
USE_L10N = False
# Not-necessarily-technical managers of the site. They get broken link
# notifications and other various emails.
MANAGERS = ADMINS
# Default content type and charset to use for all HttpResponse objects, if a
# MIME type isn't manually specified. These are used to construct the
# Content-Type header.
DEFAULT_CONTENT_TYPE = 'text/html'
DEFAULT_CHARSET = 'utf-8'
# Encoding of files read from disk (template and initial SQL files).
FILE_CHARSET = 'utf-8'
# Email address that error messages come from.
SERVER_EMAIL = 'root@localhost'
# Whether to send broken-link emails. Deprecated, must be removed in 1.8.
SEND_BROKEN_LINK_EMAILS = False
# Database connection info. If left empty, will default to the dummy backend.
DATABASES = {}
# Classes used to implement DB routing behavior.
DATABASE_ROUTERS = []
# The email backend to use. For possible shortcuts see django.core.mail.
# The default is to use the SMTP backend.
# Third-party backends can be specified by providing a Python path
# to a module that defines an EmailBackend class.
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# Host for sending email.
EMAIL_HOST = 'localhost'
# Port for sending email.
EMAIL_PORT = 25
# Optional SMTP authentication information for EMAIL_HOST.
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
EMAIL_USE_SSL = False
# List of strings representing installed apps.
INSTALLED_APPS = ()
# List of locations of the template source files, in search order.
TEMPLATE_DIRS = ()
# List of callables that know how to import templates from various sources.
# See the comments in django/core/template/loader.py for interface
# documentation.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
# List of processors used by RequestContext to populate the context.
# Each one should be a callable that takes the request object as its
# only parameter and returns a dictionary to add to the context.
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
# 'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
)
# Output to use in template system for invalid (e.g. misspelled) variables.
TEMPLATE_STRING_IF_INVALID = ''
# Default email address to use for various automated correspondence from
# the site managers.
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
# Subject-line prefix for email messages send with django.core.mail.mail_admins
# or ...mail_managers. Make sure to include the trailing space.
EMAIL_SUBJECT_PREFIX = '[Django] '
# Whether to append trailing slashes to URLs.
APPEND_SLASH = True
# Whether to prepend the "www." subdomain to URLs that don't have it.
PREPEND_WWW = False
# Override the server-derived value of SCRIPT_NAME
FORCE_SCRIPT_NAME = None
# List of compiled regular expression objects representing User-Agent strings
# that are not allowed to visit any page, systemwide. Use this for bad
# robots/crawlers. Here are a few examples:
# import re
# DISALLOWED_USER_AGENTS = (
# re.compile(r'^NaverBot.*'),
# re.compile(r'^EmailSiphon.*'),
# re.compile(r'^SiteSucker.*'),
# re.compile(r'^sohu-search')
# )
DISALLOWED_USER_AGENTS = ()
ABSOLUTE_URL_OVERRIDES = {}
# Tuple of strings representing allowed prefixes for the {% ssi %} tag.
# Example: ('/home/html', '/var/www')
ALLOWED_INCLUDE_ROOTS = ()
# If this is an admin settings module, this should be a list of
# settings modules (in the format 'foo.bar.baz') for which this admin
# is an admin.
ADMIN_FOR = ()
# List of compiled regular expression objects representing URLs that need not
# be reported by BrokenLinkEmailsMiddleware. Here are a few examples:
# import re
# IGNORABLE_404_URLS = (
# re.compile(r'^/apple-touch-icon.*\.png$'),
# re.compile(r'^/favicon.ico$),
# re.compile(r'^/robots.txt$),
# re.compile(r'^/phpmyadmin/),
# re.compile(r'\.(cgi|php|pl)$'),
# )
IGNORABLE_404_URLS = ()
# A secret key for this particular Django installation. Used in secret-key
# hashing algorithms. Set this in your settings, or Django will complain
# loudly.
SECRET_KEY = ''
# Default file storage mechanism that holds media.
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = None
# URL that handles the static files served from STATIC_ROOT.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = None
# List of upload handler classes to be applied in order.
FILE_UPLOAD_HANDLERS = (
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
)
# Maximum size, in bytes, of a request before it will be streamed to the
# file system instead of into memory.
FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Directory in which upload streamed files will be temporarily saved. A value of
# `None` will make Django use the operating system's default temporary directory
# (i.e. "/tmp" on *nix systems).
FILE_UPLOAD_TEMP_DIR = None
# The numeric mode to set newly-uploaded files to. The value should be a mode
# you'd pass directly to os.chmod; see http://docs.python.org/lib/os-file-dir.html.
FILE_UPLOAD_PERMISSIONS = None
# The numeric mode to assign to newly-created directories, when uploading files.
# The value should be a mode as you'd pass to os.chmod;
# see http://docs.python.org/lib/os-file-dir.html.
FILE_UPLOAD_DIRECTORY_PERMISSIONS = None
# Python module path where user will place custom format definition.
# The directory where this setting is pointing should contain subdirectories
# named as the locales, containing a formats.py file
# (i.e. "myproject.locale" for myproject/locale/en/formats.py etc. use)
FORMAT_MODULE_PATH = None
# Default formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
# Default formatting for datetime objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATETIME_FORMAT = 'N j, Y, P'
# Default formatting for time objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
TIME_FORMAT = 'P'
# Default formatting for date objects when only the year and month are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
YEAR_MONTH_FORMAT = 'F Y'
# Default formatting for date objects when only the month and day are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
MONTH_DAY_FORMAT = 'F j'
# Default short formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATE_FORMAT = 'm/d/Y'
# Default short formatting for datetime objects.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATETIME_FORMAT = 'm/d/Y P'
# Default formats to be used when parsing dates from input boxes, in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
'%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
'%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
'%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
'%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
)
# Default formats to be used when parsing times from input boxes, in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M:%S.%f', # '14:30:59.000200'
'%H:%M', # '14:30'
)
# Default formats to be used when parsing dates and times from input boxes,
# in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
)
# First day of week, to be used on calendars
# 0 means Sunday, 1 means Monday...
FIRST_DAY_OF_WEEK = 0
# Decimal separator symbol
DECIMAL_SEPARATOR = '.'
# Boolean that sets whether to add thousand separator when formatting numbers
USE_THOUSAND_SEPARATOR = False
# Number of digits that will be together, when splitting them by
# THOUSAND_SEPARATOR. 0 means no grouping, 3 means splitting by thousands...
NUMBER_GROUPING = 0
# Thousand separator symbol
THOUSAND_SEPARATOR = ','
# Do you want to manage transactions manually?
# Hint: you really don't!
TRANSACTIONS_MANAGED = False
# The tablespaces to use for each model when not specified otherwise.
DEFAULT_TABLESPACE = ''
DEFAULT_INDEX_TABLESPACE = ''
# Default X-Frame-Options header value
X_FRAME_OPTIONS = 'SAMEORIGIN'
USE_X_FORWARDED_HOST = False
# The Python dotted path to the WSGI application that Django's internal servers
# (runserver, runfcgi) will use. If `None`, the return value of
# 'django.core.wsgi.get_wsgi_application' is used, thus preserving the same
# behavior as previous versions of Django. Otherwise this should point to an
# actual WSGI application object.
WSGI_APPLICATION = None
# If your Django app is behind a proxy that sets a header to specify secure
# connections, AND that proxy ensures that user-submitted headers with the
# same name are ignored (so that people can't spoof it), set this value to
# a tuple of (header_name, header_value). For any requests that come in with
# that header/value, request.is_secure() will return True.
# WARNING! Only set this if you fully understand what you're doing. Otherwise,
# you may be opening yourself up to a security risk.
SECURE_PROXY_SSL_HEADER = None
##############
# MIDDLEWARE #
##############
# List of middleware classes to use. Order is important; in the request phase,
# this middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
)
############
# SESSIONS #
############
SESSION_CACHE_ALIAS = 'default' # Cache to store session data if using the cache session backend.
SESSION_COOKIE_NAME = 'sessionid' # Cookie name. This can be whatever you want.
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2 # Age of cookie, in seconds (default: 2 weeks).
SESSION_COOKIE_DOMAIN = None # A string like ".example.com", or None for standard domain cookie.
SESSION_COOKIE_SECURE = False # Whether the session cookie should be secure (https:// only).
SESSION_COOKIE_PATH = '/' # The path of the session cookie.
SESSION_COOKIE_HTTPONLY = True # Whether to use the non-RFC standard httpOnly flag (IE, FF3+, others)
SESSION_SAVE_EVERY_REQUEST = False # Whether to save the session data on every request.
SESSION_EXPIRE_AT_BROWSER_CLOSE = False # Whether a user's session cookie expires when the Web browser is closed.
SESSION_ENGINE = 'django.contrib.sessions.backends.db' # The module to store session data
SESSION_FILE_PATH = None # Directory to store session files if using the file session module. If None, the backend will use a sensible default.
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer' # class to serialize session data
#########
# CACHE #
#########
# The cache backends to use.
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
CACHE_MIDDLEWARE_KEY_PREFIX = ''
CACHE_MIDDLEWARE_SECONDS = 600
CACHE_MIDDLEWARE_ALIAS = 'default'
####################
# COMMENTS #
####################
COMMENTS_ALLOW_PROFANITIES = False
# The profanities that will trigger a validation error in
# CommentDetailsForm.clean_comment. All of these should be in lowercase.
PROFANITIES_LIST = ()
##################
# AUTHENTICATION #
##################
AUTH_USER_MODEL = 'auth.User'
AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',)
LOGIN_URL = '/accounts/login/'
LOGOUT_URL = '/accounts/logout/'
LOGIN_REDIRECT_URL = '/accounts/profile/'
# The number of days a password reset link is valid for
PASSWORD_RESET_TIMEOUT_DAYS = 3
# the first hasher in this list is the preferred algorithm. any
# password using different algorithms will be converted automatically
# upon login
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
'django.contrib.auth.hashers.UnsaltedSHA1PasswordHasher',
'django.contrib.auth.hashers.UnsaltedMD5PasswordHasher',
'django.contrib.auth.hashers.CryptPasswordHasher',
)
###########
# SIGNING #
###########
SIGNING_BACKEND = 'django.core.signing.TimestampSigner'
########
# CSRF #
########
# Dotted path to callable to be used as view when a request is
# rejected by the CSRF middleware.
CSRF_FAILURE_VIEW = 'django.views.csrf.csrf_failure'
# Settings for CSRF cookie.
CSRF_COOKIE_NAME = 'csrftoken'
CSRF_COOKIE_AGE = 60 * 60 * 24 * 7 * 52
CSRF_COOKIE_DOMAIN = None
CSRF_COOKIE_PATH = '/'
CSRF_COOKIE_SECURE = False
CSRF_COOKIE_HTTPONLY = False
############
# MESSAGES #
############
# Class to use as messages backend
MESSAGE_STORAGE = 'django.contrib.messages.storage.fallback.FallbackStorage'
# Default values of MESSAGE_LEVEL and MESSAGE_TAGS are defined within
# django.contrib.messages to avoid imports in this settings file.
###########
# LOGGING #
###########
# The callable to use to configure logging
LOGGING_CONFIG = 'logging.config.dictConfig'
# Custom logging configuration.
LOGGING = {}
# Default exception reporter filter class used in case none has been
# specifically assigned to the HttpRequest instance.
DEFAULT_EXCEPTION_REPORTER_FILTER = 'django.views.debug.SafeExceptionReporterFilter'
###########
# TESTING #
###########
# The name of the class to use to run the test suite
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Apps that don't need to be serialized at test database creation time
# (only apps with migrations are to start with)
TEST_NON_SERIALIZED_APPS = []
############
# FIXTURES #
############
# The list of directories to search for fixtures
FIXTURE_DIRS = ()
###############
# STATICFILES #
###############
# A list of locations of additional static files
STATICFILES_DIRS = ()
# The default file storage backend used during the build process
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
##############
# MIGRATIONS #
##############
# Migration module overrides for apps, by app label.
MIGRATION_MODULES = {}
#################
# SYSTEM CHECKS #
#################
# List of all issues generated by system checks that should be silenced. Light
# issues like warnings, infos or debugs will not generate a message. Silencing
# serious issues like errors and criticals does not result in hiding the
# message, but Django will not stop you from e.g. running server.
SILENCED_SYSTEM_CHECKS = []
|
rooshilp/CMPUT410Lab6
|
virt_env/virt1/lib/python2.7/site-packages/django/conf/global_settings.py
|
Python
|
apache-2.0
| 23,177
|
from . import test_barcode_nomenclature
|
Aravinthu/odoo
|
addons/barcodes/tests/__init__.py
|
Python
|
agpl-3.0
| 40
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from . import base_gengo_translations
|
Aravinthu/odoo
|
addons/website_gengo/models/__init__.py
|
Python
|
agpl-3.0
| 138
|
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Text Views With Headers
This package defines several text views with headers
"""
class HeaderView(object):
"""A Text View With a Header
This view simply serializes the model and places the given
header on top.
:param header: the header (can be anything on which str() can be called)
"""
def __init__(self, header):
self.header = header
def __call__(self, model):
return str(self.header) + "\n" + str(model)
class TitledView(HeaderView):
"""A Text View With a Title
This view simply serializes the model, and places
a preformatted header containing the given title
text on top. The title text can be up to 64 characters
long.
:param str title: the title of the view
"""
FORMAT_STR = ('=' * 72) + "\n===={0: ^64}====\n" + ('=' * 72)
def __init__(self, title):
super(TitledView, self).__init__(self.FORMAT_STR.format(title))
|
ChinaMassClouds/copenstack-server
|
openstack/src/nova-2014.2/nova/openstack/common/report/views/text/header.py
|
Python
|
gpl-2.0
| 1,534
|
from django.contrib.auth.models import User
from django.http import Http404
from rest_framework import serializers
from openedx.core.djangoapps.course_groups.cohorts import is_course_cohorted
from notification_prefs import NOTIFICATION_PREF_KEY
from lang_pref import LANGUAGE_KEY
class NotifierUserSerializer(serializers.ModelSerializer):
"""
A serializer containing all information about a user needed by the notifier
(namely the user's name, email address, notification and language
preferences, and course enrollment and cohort information).
Because these pieces of information reside in different tables, this is
designed to work well with prefetch_related and select_related, which
require the use of all() instead of get() or filter(). The following fields
should be prefetched on the user objects being serialized:
* profile
* preferences
* courseenrollment_set
* course_groups
* roles__permissions
"""
name = serializers.SerializerMethodField("get_name")
preferences = serializers.SerializerMethodField("get_preferences")
course_info = serializers.SerializerMethodField("get_course_info")
def get_name(self, user):
return user.profile.name
def get_preferences(self, user):
return {
pref.key: pref.value
for pref
in user.preferences.all()
if pref.key in [LANGUAGE_KEY, NOTIFICATION_PREF_KEY]
}
def get_course_info(self, user):
cohort_id_map = {
cohort.course_id: cohort.id
for cohort in user.course_groups.all()
}
see_all_cohorts_set = {
role.course_id
for role in user.roles.all()
for perm in role.permissions.all() if perm.name == "see_all_cohorts"
}
ret = {}
for enrollment in user.courseenrollment_set.all():
if enrollment.is_active:
try:
ret[unicode(enrollment.course_id)] = {
"cohort_id": cohort_id_map.get(enrollment.course_id),
"see_all_cohorts": (
enrollment.course_id in see_all_cohorts_set or
not is_course_cohorted(enrollment.course_id)
),
}
except Http404: # is_course_cohorted raises this if course does not exist
pass
return ret
class Meta(object): # pylint: disable=missing-docstring
model = User
fields = ("id", "email", "name", "preferences", "course_info")
read_only_fields = ("id", "email")
|
beni55/edx-platform
|
lms/djangoapps/notifier_api/serializers.py
|
Python
|
agpl-3.0
| 2,661
|
# cython: infer_types=True, language_level=3, py2_import=True
#
# Cython Scanner
#
import os
import platform
import cython
cython.declare(EncodedString=object, any_string_prefix=unicode, IDENT=unicode,
print_function=object)
from Cython import Utils
from Cython.Plex.Scanners import Scanner
from Cython.Plex.Errors import UnrecognizedInput
from Errors import error
from Lexicon import any_string_prefix, make_lexicon, IDENT
from Future import print_function
from StringEncoding import EncodedString
debug_scanner = 0
trace_scanner = 0
scanner_debug_flags = 0
scanner_dump_file = None
lexicon = None
def get_lexicon():
global lexicon
if not lexicon:
lexicon = make_lexicon()
return lexicon
#------------------------------------------------------------------
py_reserved_words = [
"global", "nonlocal", "def", "class", "print", "del", "pass", "break",
"continue", "return", "raise", "import", "exec", "try",
"except", "finally", "while", "if", "elif", "else", "for",
"in", "assert", "and", "or", "not", "is", "in", "lambda",
"from", "yield", "with", "nonlocal",
]
pyx_reserved_words = py_reserved_words + [
"include", "ctypedef", "cdef", "cpdef",
"cimport", "DEF", "IF", "ELIF", "ELSE"
]
class Method(object):
def __init__(self, name):
self.name = name
self.__name__ = name # for Plex tracing
def __call__(self, stream, text):
return getattr(stream, self.name)(text)
#------------------------------------------------------------------
class CompileTimeScope(object):
def __init__(self, outer = None):
self.entries = {}
self.outer = outer
def declare(self, name, value):
self.entries[name] = value
def update(self, other):
self.entries.update(other)
def lookup_here(self, name):
return self.entries[name]
def __contains__(self, name):
return name in self.entries
def lookup(self, name):
try:
return self.lookup_here(name)
except KeyError:
outer = self.outer
if outer:
return outer.lookup(name)
else:
raise
def initial_compile_time_env():
benv = CompileTimeScope()
names = ('UNAME_SYSNAME', 'UNAME_NODENAME', 'UNAME_RELEASE',
'UNAME_VERSION', 'UNAME_MACHINE')
for name, value in zip(names, platform.uname()):
benv.declare(name, value)
try:
import __builtin__ as builtins
except ImportError:
import builtins
names = ('False', 'True',
'abs', 'all', 'any', 'ascii', 'bin', 'bool', 'bytearray', 'bytes',
'chr', 'cmp', 'complex', 'dict', 'divmod', 'enumerate', 'filter',
'float', 'format', 'frozenset', 'hash', 'hex', 'int', 'len',
'list', 'long', 'map', 'max', 'min', 'oct', 'ord', 'pow', 'range',
'repr', 'reversed', 'round', 'set', 'slice', 'sorted', 'str',
'sum', 'tuple', 'xrange', 'zip')
for name in names:
try:
benv.declare(name, getattr(builtins, name))
except AttributeError:
# ignore, likely Py3
pass
denv = CompileTimeScope(benv)
return denv
#------------------------------------------------------------------
class SourceDescriptor(object):
"""
A SourceDescriptor should be considered immutable.
"""
_file_type = 'pyx'
_escaped_description = None
_cmp_name = ''
def __str__(self):
assert False # To catch all places where a descriptor is used directly as a filename
def set_file_type_from_name(self, filename):
name, ext = os.path.splitext(filename)
self._file_type = ext in ('.pyx', '.pxd', '.py') and ext[1:] or 'pyx'
def is_cython_file(self):
return self._file_type in ('pyx', 'pxd')
def is_python_file(self):
return self._file_type == 'py'
def get_escaped_description(self):
if self._escaped_description is None:
self._escaped_description = \
self.get_description().encode('ASCII', 'replace').decode("ASCII")
return self._escaped_description
def __gt__(self, other):
# this is only used to provide some sort of order
try:
return self._cmp_name > other._cmp_name
except AttributeError:
return False
def __lt__(self, other):
# this is only used to provide some sort of order
try:
return self._cmp_name < other._cmp_name
except AttributeError:
return False
def __le__(self, other):
# this is only used to provide some sort of order
try:
return self._cmp_name <= other._cmp_name
except AttributeError:
return False
class FileSourceDescriptor(SourceDescriptor):
"""
Represents a code source. A code source is a more generic abstraction
for a "filename" (as sometimes the code doesn't come from a file).
Instances of code sources are passed to Scanner.__init__ as the
optional name argument and will be passed back when asking for
the position()-tuple.
"""
def __init__(self, filename, path_description=None):
filename = Utils.decode_filename(filename)
self.path_description = path_description or filename
self.filename = filename
self.set_file_type_from_name(filename)
self._cmp_name = filename
self._lines = {}
def get_lines(self, encoding=None, error_handling=None):
# we cache the lines only the second time this is called, in
# order to save memory when they are only used once
key = (encoding, error_handling)
try:
lines = self._lines[key]
if lines is not None:
return lines
except KeyError:
pass
f = Utils.open_source_file(
self.filename, encoding=encoding,
error_handling=error_handling,
# newline normalisation is costly before Py2.6
require_normalised_newlines=False)
try:
lines = list(f)
finally:
f.close()
if key in self._lines:
self._lines[key] = lines
else:
# do not cache the first access, but remember that we
# already read it once
self._lines[key] = None
return lines
def get_description(self):
return self.path_description
def get_error_description(self):
path = self.filename
cwd = Utils.decode_filename(os.getcwd() + os.path.sep)
if path.startswith(cwd):
return path[len(cwd):]
return path
def get_filenametable_entry(self):
return self.filename
def __eq__(self, other):
return isinstance(other, FileSourceDescriptor) and self.filename == other.filename
def __hash__(self):
return hash(self.filename)
def __repr__(self):
return "<FileSourceDescriptor:%s>" % self.filename
class StringSourceDescriptor(SourceDescriptor):
"""
Instances of this class can be used instead of a filenames if the
code originates from a string object.
"""
filename = None
def __init__(self, name, code):
self.name = name
#self.set_file_type_from_name(name)
self.codelines = [x + "\n" for x in code.split("\n")]
self._cmp_name = name
def get_lines(self, encoding=None, error_handling=None):
if not encoding:
return self.codelines
else:
return [ line.encode(encoding, error_handling).decode(encoding)
for line in self.codelines ]
def get_description(self):
return self.name
get_error_description = get_description
def get_filenametable_entry(self):
return "stringsource"
def __hash__(self):
return id(self)
# Do not hash on the name, an identical string source should be the
# same object (name is often defaulted in other places)
# return hash(self.name)
def __eq__(self, other):
return isinstance(other, StringSourceDescriptor) and self.name == other.name
def __repr__(self):
return "<StringSourceDescriptor:%s>" % self.name
#------------------------------------------------------------------
class PyrexScanner(Scanner):
# context Context Compilation context
# included_files [string] Files included with 'include' statement
# compile_time_env dict Environment for conditional compilation
# compile_time_eval boolean In a true conditional compilation context
# compile_time_expr boolean In a compile-time expression context
def __init__(self, file, filename, parent_scanner = None,
scope = None, context = None, source_encoding=None, parse_comments=True, initial_pos=None):
Scanner.__init__(self, get_lexicon(), file, filename, initial_pos)
if parent_scanner:
self.context = parent_scanner.context
self.included_files = parent_scanner.included_files
self.compile_time_env = parent_scanner.compile_time_env
self.compile_time_eval = parent_scanner.compile_time_eval
self.compile_time_expr = parent_scanner.compile_time_expr
else:
self.context = context
self.included_files = scope.included_files
self.compile_time_env = initial_compile_time_env()
self.compile_time_eval = 1
self.compile_time_expr = 0
if hasattr(context.options, 'compile_time_env') and \
context.options.compile_time_env is not None:
self.compile_time_env.update(context.options.compile_time_env)
self.parse_comments = parse_comments
self.source_encoding = source_encoding
if filename.is_python_file():
self.in_python_file = True
self.keywords = set(py_reserved_words)
else:
self.in_python_file = False
self.keywords = set(pyx_reserved_words)
self.trace = trace_scanner
self.indentation_stack = [0]
self.indentation_char = None
self.bracket_nesting_level = 0
self.begin('INDENT')
self.sy = ''
self.next()
def commentline(self, text):
if self.parse_comments:
self.produce('commentline', text)
def current_level(self):
return self.indentation_stack[-1]
def open_bracket_action(self, text):
self.bracket_nesting_level = self.bracket_nesting_level + 1
return text
def close_bracket_action(self, text):
self.bracket_nesting_level = self.bracket_nesting_level - 1
return text
def newline_action(self, text):
if self.bracket_nesting_level == 0:
self.begin('INDENT')
self.produce('NEWLINE', '')
string_states = {
"'": 'SQ_STRING',
'"': 'DQ_STRING',
"'''": 'TSQ_STRING',
'"""': 'TDQ_STRING'
}
def begin_string_action(self, text):
while text[:1] in any_string_prefix:
text = text[1:]
self.begin(self.string_states[text])
self.produce('BEGIN_STRING')
def end_string_action(self, text):
self.begin('')
self.produce('END_STRING')
def unclosed_string_action(self, text):
self.end_string_action(text)
self.error("Unclosed string literal")
def indentation_action(self, text):
self.begin('')
# Indentation within brackets should be ignored.
#if self.bracket_nesting_level > 0:
# return
# Check that tabs and spaces are being used consistently.
if text:
c = text[0]
#print "Scanner.indentation_action: indent with", repr(c) ###
if self.indentation_char is None:
self.indentation_char = c
#print "Scanner.indentation_action: setting indent_char to", repr(c)
else:
if self.indentation_char != c:
self.error("Mixed use of tabs and spaces")
if text.replace(c, "") != "":
self.error("Mixed use of tabs and spaces")
# Figure out how many indents/dedents to do
current_level = self.current_level()
new_level = len(text)
#print "Changing indent level from", current_level, "to", new_level ###
if new_level == current_level:
return
elif new_level > current_level:
#print "...pushing level", new_level ###
self.indentation_stack.append(new_level)
self.produce('INDENT', '')
else:
while new_level < self.current_level():
#print "...popping level", self.indentation_stack[-1] ###
self.indentation_stack.pop()
self.produce('DEDENT', '')
#print "...current level now", self.current_level() ###
if new_level != self.current_level():
self.error("Inconsistent indentation")
def eof_action(self, text):
while len(self.indentation_stack) > 1:
self.produce('DEDENT', '')
self.indentation_stack.pop()
self.produce('EOF', '')
def next(self):
try:
sy, systring = self.read()
except UnrecognizedInput:
self.error("Unrecognized character")
if sy == IDENT:
if systring in self.keywords:
if systring == u'print' and print_function in self.context.future_directives:
self.keywords.discard('print')
systring = EncodedString(systring)
elif systring == u'exec' and self.context.language_level >= 3:
self.keywords.discard('exec')
systring = EncodedString(systring)
else:
sy = systring
else:
systring = EncodedString(systring)
self.sy = sy
self.systring = systring
if False: # debug_scanner:
_, line, col = self.position()
if not self.systring or self.sy == self.systring:
t = self.sy
else:
t = "%s %s" % (self.sy, self.systring)
print("--- %3d %2d %s" % (line, col, t))
def peek(self):
saved = self.sy, self.systring
self.next()
next = self.sy, self.systring
self.unread(*next)
self.sy, self.systring = saved
return next
def put_back(self, sy, systring):
self.unread(self.sy, self.systring)
self.sy = sy
self.systring = systring
def unread(self, token, value):
# This method should be added to Plex
self.queue.insert(0, (token, value))
def error(self, message, pos = None, fatal = True):
if pos is None:
pos = self.position()
if self.sy == 'INDENT':
err = error(pos, "Possible inconsistent indentation")
err = error(pos, message)
if fatal: raise err
def expect(self, what, message = None):
if self.sy == what:
self.next()
else:
self.expected(what, message)
def expect_keyword(self, what, message = None):
if self.sy == IDENT and self.systring == what:
self.next()
else:
self.expected(what, message)
def expected(self, what, message = None):
if message:
self.error(message)
else:
if self.sy == IDENT:
found = self.systring
else:
found = self.sy
self.error("Expected '%s', found '%s'" % (what, found))
def expect_indent(self):
self.expect('INDENT',
"Expected an increase in indentation level")
def expect_dedent(self):
self.expect('DEDENT',
"Expected a decrease in indentation level")
def expect_newline(self, message = "Expected a newline"):
# Expect either a newline or end of file
if self.sy != 'EOF':
self.expect('NEWLINE', message)
|
Teamxrtc/webrtc-streaming-node
|
third_party/webrtc/src/chromium/src/third_party/cython/src/Cython/Compiler/Scanning.py
|
Python
|
mit
| 16,183
|
# Collect facts related to system service manager and init.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import platform
import re
from ansible.module_utils._text import to_native
from ansible.module_utils.facts.utils import get_file_content
from ansible.module_utils.facts.collector import BaseFactCollector
# The distutils module is not shipped with SUNWPython on Solaris.
# It's in the SUNWPython-devel package which also contains development files
# that don't belong on production boxes. Since our Solaris code doesn't
# depend on LooseVersion, do not import it on Solaris.
if platform.system() != 'SunOS':
from distutils.version import LooseVersion
class ServiceMgrFactCollector(BaseFactCollector):
name = 'service_mgr'
_fact_ids = set()
required_facts = set(['platform', 'distribution'])
@staticmethod
def is_systemd_managed(module):
# tools must be installed
if module.get_bin_path('systemctl'):
# this should show if systemd is the boot init system, if checking init faild to mark as systemd
# these mirror systemd's own sd_boot test http://www.freedesktop.org/software/systemd/man/sd_booted.html
for canary in ["/run/systemd/system/", "/dev/.run/systemd/", "/dev/.systemd/"]:
if os.path.exists(canary):
return True
return False
def collect(self, module=None, collected_facts=None):
facts_dict = {}
if not module:
return facts_dict
collected_facts = collected_facts or {}
service_mgr_name = None
# TODO: detect more custom init setups like bootscripts, dmd, s6, Epoch, etc
# also other OSs other than linux might need to check across several possible candidates
# Mapping of proc_1 values to more useful names
proc_1_map = {
'procd': 'openwrt_init',
'runit-init': 'runit',
'svscan': 'svc',
'openrc-init': 'openrc',
}
# try various forms of querying pid 1
proc_1 = get_file_content('/proc/1/comm')
if proc_1 is None:
# FIXME: return code isnt checked
# FIXME: if stdout is empty string, odd things
# FIXME: other code seems to think we could get proc_1 == None past this point
rc, proc_1, err = module.run_command("ps -p 1 -o comm|tail -n 1", use_unsafe_shell=True)
# If the output of the command starts with what looks like a PID, then the 'ps' command
# probably didn't work the way we wanted, probably because it's busybox
if re.match(r' *[0-9]+ ', proc_1):
proc_1 = None
# The ps command above may return "COMMAND" if the user cannot read /proc, e.g. with grsecurity
if proc_1 == "COMMAND\n":
proc_1 = None
# FIXME: empty string proc_1 staus empty string
if proc_1 is not None:
proc_1 = os.path.basename(proc_1)
proc_1 = to_native(proc_1)
proc_1 = proc_1.strip()
if proc_1 is not None and (proc_1 == 'init' or proc_1.endswith('sh')):
# many systems return init, so this cannot be trusted, if it ends in 'sh' it probalby is a shell in a container
proc_1 = None
# if not init/None it should be an identifiable or custom init, so we are done!
if proc_1 is not None:
# Lookup proc_1 value in map and use proc_1 value itself if no match
# FIXME: empty string still falls through
service_mgr_name = proc_1_map.get(proc_1, proc_1)
# FIXME: replace with a system->service_mgr_name map?
# start with the easy ones
elif collected_facts.get('ansible_distribution', None) == 'MacOSX':
# FIXME: find way to query executable, version matching is not ideal
if LooseVersion(platform.mac_ver()[0]) >= LooseVersion('10.4'):
service_mgr_name = 'launchd'
else:
service_mgr_name = 'systemstarter'
elif 'BSD' in collected_facts.get('ansible_system', '') or collected_facts.get('ansible_system') in ['Bitrig', 'DragonFly']:
# FIXME: we might want to break out to individual BSDs or 'rc'
service_mgr_name = 'bsdinit'
elif collected_facts.get('ansible_system') == 'AIX':
service_mgr_name = 'src'
elif collected_facts.get('ansible_system') == 'SunOS':
service_mgr_name = 'smf'
elif collected_facts.get('ansible_distribution') == 'OpenWrt':
service_mgr_name = 'openwrt_init'
elif collected_facts.get('ansible_system') == 'Linux':
# FIXME: mv is_systemd_managed
if self.is_systemd_managed(module=module):
service_mgr_name = 'systemd'
elif module.get_bin_path('initctl') and os.path.exists("/etc/init/"):
service_mgr_name = 'upstart'
elif os.path.exists('/sbin/openrc'):
service_mgr_name = 'openrc'
elif os.path.exists('/etc/init.d/'):
service_mgr_name = 'sysvinit'
if not service_mgr_name:
# if we cannot detect, fallback to generic 'service'
service_mgr_name = 'service'
facts_dict['service_mgr'] = service_mgr_name
return facts_dict
|
Russell-IO/ansible
|
lib/ansible/module_utils/facts/system/service_mgr.py
|
Python
|
gpl-3.0
| 6,057
|
"""Implementaton of :class:`PythonIntegerRing` class. """
from __future__ import print_function, division
from sympy.polys.domains.integerring import IntegerRing
from sympy.polys.domains.groundtypes import (
PythonInteger, SymPyInteger, python_sqrt,
python_factorial, python_gcdex, python_gcd, python_lcm,
)
from sympy.polys.polyerrors import CoercionFailed
from sympy.utilities import public
@public
class PythonIntegerRing(IntegerRing):
"""Integer ring based on Python's ``int`` type. """
dtype = PythonInteger
zero = dtype(0)
one = dtype(1)
alias = 'ZZ_python'
def __init__(self):
"""Allow instantiation of this domain. """
def to_sympy(self, a):
"""Convert ``a`` to a SymPy object. """
return SymPyInteger(a)
def from_sympy(self, a):
"""Convert SymPy's Integer to ``dtype``. """
if a.is_Integer:
return PythonInteger(a.p)
elif a.is_Float and int(a) == a:
return PythonInteger(int(a))
else:
raise CoercionFailed("expected an integer, got %s" % a)
def from_FF_python(K1, a, K0):
"""Convert ``ModularInteger(int)`` to Python's ``int``. """
return a.to_int()
def from_ZZ_python(K1, a, K0):
"""Convert Python's ``int`` to Python's ``int``. """
return a
def from_QQ_python(K1, a, K0):
"""Convert Python's ``Fraction`` to Python's ``int``. """
if a.denominator == 1:
return a.numerator
def from_FF_gmpy(K1, a, K0):
"""Convert ``ModularInteger(mpz)`` to Python's ``int``. """
return PythonInteger(a.to_int())
def from_ZZ_gmpy(K1, a, K0):
"""Convert GMPY's ``mpz`` to Python's ``int``. """
return PythonInteger(a)
def from_QQ_gmpy(K1, a, K0):
"""Convert GMPY's ``mpq`` to Python's ``int``. """
if a.denom() == 1:
return PythonInteger(a.numer())
def from_RealField(K1, a, K0):
"""Convert mpmath's ``mpf`` to Python's ``int``. """
p, q = K0.to_rational(a)
if q == 1:
return PythonInteger(p)
def gcdex(self, a, b):
"""Compute extended GCD of ``a`` and ``b``. """
return python_gcdex(a, b)
def gcd(self, a, b):
"""Compute GCD of ``a`` and ``b``. """
return python_gcd(a, b)
def lcm(self, a, b):
"""Compute LCM of ``a`` and ``b``. """
return python_lcm(a, b)
def sqrt(self, a):
"""Compute square root of ``a``. """
return python_sqrt(a)
def factorial(self, a):
"""Compute factorial of ``a``. """
return python_factorial(a)
|
wxgeo/geophar
|
wxgeometrie/sympy/polys/domains/pythonintegerring.py
|
Python
|
gpl-2.0
| 2,650
|
"""
XBlock runtime services for LibraryContentModule
"""
from django.core.exceptions import PermissionDenied
from opaque_keys.edx.locator import LibraryLocator, LibraryUsageLocator
from search.search_engine_base import SearchEngine
from xmodule.library_content_module import ANY_CAPA_TYPE_VALUE
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.capa_module import CapaDescriptor
def normalize_key_for_search(library_key):
""" Normalizes library key for use with search indexing """
return library_key.replace(version_guid=None, branch=None)
class LibraryToolsService(object):
"""
Service that allows LibraryContentModule to interact with libraries in the
modulestore.
"""
def __init__(self, modulestore):
self.store = modulestore
def _get_library(self, library_key):
"""
Given a library key like "library-v1:ProblemX+PR0B", return the
'library' XBlock with meta-information about the library.
A specific version may be specified.
Returns None on error.
"""
if not isinstance(library_key, LibraryLocator):
library_key = LibraryLocator.from_string(library_key)
try:
return self.store.get_library(
library_key, remove_version=False, remove_branch=False, head_validation=False
)
except ItemNotFoundError:
return None
def get_library_version(self, lib_key):
"""
Get the version (an ObjectID) of the given library.
Returns None if the library does not exist.
"""
library = self._get_library(lib_key)
if library:
# We need to know the library's version so ensure it's set in library.location.library_key.version_guid
assert library.location.library_key.version_guid is not None
return library.location.library_key.version_guid
return None
def create_block_analytics_summary(self, course_key, block_keys):
"""
Given a CourseKey and a list of (block_type, block_id) pairs,
prepare the JSON-ready metadata needed for analytics logging.
This is [
{"usage_key": x, "original_usage_key": y, "original_usage_version": z, "descendants": [...]}
]
where the main list contains all top-level blocks, and descendants contains a *flat* list of all
descendants of the top level blocks, if any.
"""
def summarize_block(usage_key):
""" Basic information about the given block """
orig_key, orig_version = self.store.get_block_original_usage(usage_key)
return {
"usage_key": unicode(usage_key),
"original_usage_key": unicode(orig_key) if orig_key else None,
"original_usage_version": unicode(orig_version) if orig_version else None,
}
result_json = []
for block_key in block_keys:
key = course_key.make_usage_key(*block_key)
info = summarize_block(key)
info['descendants'] = []
try:
block = self.store.get_item(key, depth=None) # Load the item and all descendants
children = list(getattr(block, "children", []))
while children:
child_key = children.pop()
child = self.store.get_item(child_key)
info['descendants'].append(summarize_block(child_key))
children.extend(getattr(child, "children", []))
except ItemNotFoundError:
pass # The block has been deleted
result_json.append(info)
return result_json
def _problem_type_filter(self, library, capa_type):
""" Filters library children by capa type"""
search_engine = SearchEngine.get_search_engine(index="library_index")
if search_engine:
filter_clause = {
"library": unicode(normalize_key_for_search(library.location.library_key)),
"content_type": CapaDescriptor.INDEX_CONTENT_TYPE,
"problem_types": capa_type
}
search_result = search_engine.search(field_dictionary=filter_clause)
results = search_result.get('results', [])
return [LibraryUsageLocator.from_string(item['data']['id']) for item in results]
else:
return [key for key in library.children if self._filter_child(key, capa_type)]
def _filter_child(self, usage_key, capa_type):
"""
Filters children by CAPA problem type, if configured
"""
if usage_key.block_type != "problem":
return False
descriptor = self.store.get_item(usage_key, depth=0)
assert isinstance(descriptor, CapaDescriptor)
return capa_type in descriptor.problem_types
def can_use_library_content(self, block):
"""
Determines whether a modulestore holding a course_id supports libraries.
"""
return self.store.check_supports(block.location.course_key, 'copy_from_template')
def update_children(self, dest_block, user_id, user_perms=None, version=None):
"""
This method is to be used when the library that a LibraryContentModule
references has been updated. It will re-fetch all matching blocks from
the libraries, and copy them as children of dest_block. The children
will be given new block_ids, but the definition ID used should be the
exact same definition ID used in the library.
This method will update dest_block's 'source_library_version' field to
store the version number of the libraries used, so we easily determine
if dest_block is up to date or not.
"""
if user_perms and not user_perms.can_write(dest_block.location.course_key):
raise PermissionDenied()
if not dest_block.source_library_id:
dest_block.source_library_version = ""
return
source_blocks = []
library_key = dest_block.source_library_key
if version:
library_key = library_key.replace(branch=ModuleStoreEnum.BranchName.library, version_guid=version)
library = self._get_library(library_key)
if library is None:
raise ValueError("Requested library not found.")
if user_perms and not user_perms.can_read(library_key):
raise PermissionDenied()
filter_children = (dest_block.capa_type != ANY_CAPA_TYPE_VALUE)
if filter_children:
# Apply simple filtering based on CAPA problem types:
source_blocks.extend(self._problem_type_filter(library, dest_block.capa_type))
else:
source_blocks.extend(library.children)
with self.store.bulk_operations(dest_block.location.course_key):
dest_block.source_library_version = unicode(library.location.library_key.version_guid)
self.store.update_item(dest_block, user_id)
head_validation = not version
dest_block.children = self.store.copy_from_template(
source_blocks, dest_block.location, user_id, head_validation=head_validation
)
# ^-- copy_from_template updates the children in the DB
# but we must also set .children here to avoid overwriting the DB again
def list_available_libraries(self):
"""
List all known libraries.
Returns tuples of (LibraryLocator, display_name)
"""
return [
(lib.location.library_key.replace(version_guid=None, branch=None), lib.display_name)
for lib in self.store.get_libraries()
]
|
ahmadiga/min_edx
|
common/lib/xmodule/xmodule/library_tools.py
|
Python
|
agpl-3.0
| 7,784
|
import goocanvas
import core
import math
import pango
import gtk
class Axes(object):
def __init__(self, viz):
self.viz = viz
self.color = 0x8080C0FF
self.hlines = goocanvas.Path(parent=viz.canvas.get_root_item(), stroke_color_rgba=self.color)
self.hlines.lower(None)
self.vlines = goocanvas.Path(parent=viz.canvas.get_root_item(), stroke_color_rgba=self.color)
self.vlines.lower(None)
self.labels = []
hadj = self.viz.get_hadjustment()
vadj = self.viz.get_vadjustment()
def update(adj):
if self.visible:
self.update_view()
hadj.connect("value-changed", update)
vadj.connect("value-changed", update)
hadj.connect("changed", update)
vadj.connect("changed", update)
self.visible = True
self.update_view()
def set_visible(self, visible):
self.visible = visible
if self.visible:
self.hlines.props.visibility = goocanvas.ITEM_VISIBLE
self.vlines.props.visibility = goocanvas.ITEM_VISIBLE
else:
self.hlines.props.visibility = goocanvas.ITEM_HIDDEN
self.vlines.props.visibility = goocanvas.ITEM_HIDDEN
for label in self.labels:
label.props.visibility = goocanvas.ITEM_HIDDEN
def _compute_divisions(self, xi, xf):
assert xf > xi
dx = xf - xi
size = dx
ndiv = 5
text_width = dx/ndiv/2
def rint(x):
return math.floor(x+0.5)
dx_over_ndiv = dx / ndiv
for n in range(5): # iterate 5 times to find optimum division size
#/* div: length of each division */
tbe = math.log10(dx_over_ndiv)#; /* looking for approx. 'ndiv' divisions in a length 'dx' */
div = pow(10, rint(tbe))#; /* div: power of 10 closest to dx/ndiv */
if math.fabs(div/2 - dx_over_ndiv) < math.fabs(div - dx_over_ndiv): #/* test if div/2 is closer to dx/ndiv */
div /= 2
elif math.fabs(div*2 - dx_over_ndiv) < math.fabs(div - dx_over_ndiv):
div *= 2 # /* test if div*2 is closer to dx/ndiv */
x0 = div*math.ceil(xi / div) - div
if n > 1:
ndiv = rint(size / text_width)
return x0, div
def update_view(self):
if self.viz.zoom is None:
return
unused_labels = self.labels
self.labels = []
for label in unused_labels:
label.set_property("visibility", goocanvas.ITEM_HIDDEN)
def get_label():
try:
label = unused_labels.pop(0)
except IndexError:
label = goocanvas.Text(parent=self.viz.canvas.get_root_item(), stroke_color_rgba=self.color)
else:
label.set_property("visibility", goocanvas.ITEM_VISIBLE)
label.lower(None)
self.labels.append(label)
return label
hadj = self.viz.get_hadjustment()
vadj = self.viz.get_vadjustment()
zoom = self.viz.zoom.value
offset = 10/zoom
x1, y1 = self.viz.canvas.convert_from_pixels(hadj.value, vadj.value)
x2, y2 = self.viz.canvas.convert_from_pixels(hadj.value + hadj.page_size, vadj.value + vadj.page_size)
line_width = 5.0/self.viz.zoom.value
# draw the horizontal axis
self.hlines.set_property("line-width", line_width)
yc = y2 - line_width/2
sim_x1 = x1/core.PIXELS_PER_METER
sim_x2 = x2/core.PIXELS_PER_METER
x0, xdiv = self._compute_divisions(sim_x1, sim_x2)
path = ["M %r %r L %r %r" % (x1, yc, x2, yc)]
x = x0
while x < sim_x2:
path.append("M %r %r L %r %r" % (core.PIXELS_PER_METER*x, yc - offset, core.PIXELS_PER_METER*x, yc))
label = get_label()
label.set_properties(font=("Sans Serif %f" % int(12/zoom)),
text=("%G" % x),
fill_color_rgba=self.color,
alignment=pango.ALIGN_CENTER,
anchor=gtk.ANCHOR_S,
x=core.PIXELS_PER_METER*x,
y=(yc - offset))
x += xdiv
del x
self.hlines.set_property("data", " ".join(path))
# draw the vertical axis
self.vlines.set_property("line-width", line_width)
xc = x1 + line_width/2
sim_y1 = y1/core.PIXELS_PER_METER
sim_y2 = y2/core.PIXELS_PER_METER
y0, ydiv = self._compute_divisions(sim_y1, sim_y2)
path = ["M %r %r L %r %r" % (xc, y1, xc, y2)]
y = y0
while y < sim_y2:
path.append("M %r %r L %r %r" % (xc, core.PIXELS_PER_METER*y, xc + offset, core.PIXELS_PER_METER*y))
label = get_label()
label.set_properties(font=("Sans Serif %f" % int(12/zoom)),
text=("%G" % y),
fill_color_rgba=self.color,
alignment=pango.ALIGN_LEFT,
anchor=gtk.ANCHOR_W,
x=xc + offset,
y=core.PIXELS_PER_METER*y)
y += ydiv
self.vlines.set_property("data", " ".join(path))
self.labels.extend(unused_labels)
|
JBonsink/GSOC-2013
|
tools/ns-allinone-3.14.1/ns-3.14.1/src/visualizer/visualizer/hud.py
|
Python
|
gpl-3.0
| 5,462
|
"""
Tests for `tty`.
https://pubs.opengroup.org/onlinepubs/9699919799/utilities/tty.html
"""
# from helpers import check, check_version, run
from helpers import check_version, run
def test_version():
"""Check that we're using Boreutil's implementation."""
assert check_version("tty")
def test_missing_args():
"""Nothing to test: `tty` doesn't require any arguments."""
pass
def test_extra_args():
"""Extra args => error of the form `tty: ...`"""
assert run(["tty", "owo"]).stderr.startswith("tty: ")
assert run(["tty", "owo"]).returncode > 0
def test_help():
"""Passing -h or --help => print help text."""
assert run(["tty", "-h"]).stdout.split(' ')[0] == 'Usage:'
assert run(["tty", "--help"]).stdout.split(' ')[0] == 'Usage:'
assert run(["tty", "-h"]).returncode > 0
assert run(["tty", "--help"]).returncode > 0
def test_main():
"""I haven't found a way to automate these tests because they require a tty."""
# `tty`/`tty -s` should return 0, if ran from a TTY.
# assert check(["tty"]).returncode == 0
# assert run(["tty", "-s"]).stdout == 0
# `tty` should print a tty name, if ran from a TTY.
# assert check_tty(["tty"]).stdout == os.ttyname(0) + "\n"
# `tty -s` should print nothing, always.
assert run(["tty", "-s"]).stdout == ""
# assert check_tty(["tty", "-s"]).stdout == ""
# `tty -s` should return >0, if not ran from a TTY.
assert run(["tty", "-s"]).returncode > 0
|
duckinator/boreutils
|
test/test_tty.py
|
Python
|
isc
| 1,482
|
#! /usr/bin/env python
#
# SCons - a Software Constructor
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/script/scons.py 4369 2009/09/19 16:58:54 scons"
__version__ = "1.2.0.d20090919"
__build__ = "r4369[MODIFIED]"
__buildsys__ = "scons-dev"
__date__ = "2009/09/19 16:58:54"
__developer__ = "scons"
import os
import os.path
import sys
##############################################################################
# BEGIN STANDARD SCons SCRIPT HEADER
#
# This is the cut-and-paste logic so that a self-contained script can
# interoperate correctly with different SCons versions and installation
# locations for the engine. If you modify anything in this section, you
# should also change other scripts that use this same header.
##############################################################################
# Strip the script directory from sys.path() so on case-insensitive
# (WIN32) systems Python doesn't think that the "scons" script is the
# "SCons" package. Replace it with our own library directories
# (version-specific first, in case they installed by hand there,
# followed by generic) so we pick up the right version of the build
# engine modules if they're in either directory.
script_dir = sys.path[0]
if script_dir in sys.path:
sys.path.remove(script_dir)
libs = []
if "SCONS_LIB_DIR" in os.environ:
libs.append(os.environ["SCONS_LIB_DIR"])
local_version = 'scons-local-' + __version__
local = 'scons-local'
if script_dir:
local_version = os.path.join(script_dir, local_version)
local = os.path.join(script_dir, local)
libs.append(os.path.abspath(local_version))
libs.append(os.path.abspath(local))
scons_version = 'scons-%s' % __version__
prefs = []
if sys.platform == 'win32':
# sys.prefix is (likely) C:\Python*;
# check only C:\Python*.
prefs.append(sys.prefix)
prefs.append(os.path.join(sys.prefix, 'Lib', 'site-packages'))
else:
# On other (POSIX) platforms, things are more complicated due to
# the variety of path names and library locations. Try to be smart
# about it.
if script_dir == 'bin':
# script_dir is `pwd`/bin;
# check `pwd`/lib/scons*.
prefs.append(os.getcwd())
else:
if script_dir == '.' or script_dir == '':
script_dir = os.getcwd()
head, tail = os.path.split(script_dir)
if tail == "bin":
# script_dir is /foo/bin;
# check /foo/lib/scons*.
prefs.append(head)
head, tail = os.path.split(sys.prefix)
if tail == "usr":
# sys.prefix is /foo/usr;
# check /foo/usr/lib/scons* first,
# then /foo/usr/local/lib/scons*.
prefs.append(sys.prefix)
prefs.append(os.path.join(sys.prefix, "local"))
elif tail == "local":
h, t = os.path.split(head)
if t == "usr":
# sys.prefix is /foo/usr/local;
# check /foo/usr/local/lib/scons* first,
# then /foo/usr/lib/scons*.
prefs.append(sys.prefix)
prefs.append(head)
else:
# sys.prefix is /foo/local;
# check only /foo/local/lib/scons*.
prefs.append(sys.prefix)
else:
# sys.prefix is /foo (ends in neither /usr or /local);
# check only /foo/lib/scons*.
prefs.append(sys.prefix)
temp = map(lambda x: os.path.join(x, 'lib'), prefs)
temp.extend(map(lambda x: os.path.join(x,
'lib',
'python' + sys.version[:3],
'site-packages'),
prefs))
prefs = temp
# Add the parent directory of the current python's library to the
# preferences. On SuSE-91/AMD64, for example, this is /usr/lib64,
# not /usr/lib.
try:
libpath = os.__file__
except AttributeError:
pass
else:
# Split /usr/libfoo/python*/os.py to /usr/libfoo/python*.
libpath, tail = os.path.split(libpath)
# Split /usr/libfoo/python* to /usr/libfoo
libpath, tail = os.path.split(libpath)
# Check /usr/libfoo/scons*.
prefs.append(libpath)
# Look first for 'scons-__version__' in all of our preference libs,
# then for 'scons'.
libs.extend(map(lambda x: os.path.join(x, scons_version), prefs))
libs.extend(map(lambda x: os.path.join(x, 'scons'), prefs))
sys.path = libs + sys.path
##############################################################################
# END STANDARD SCons SCRIPT HEADER
##############################################################################
if __name__ == "__main__":
import SCons.Script
# this does all the work, and calls sys.exit
# with the proper exit status when done.
SCons.Script.main()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
looooo/pivy
|
scons/scons.py
|
Python
|
isc
| 6,018
|
class Source:
def __init__(self):
self.config = None
def set_config(self, config):
"""config should be some dict"""
self.config = config
def sync(self):
"""returns list of DSFSDF, is a coroutine"""
pass
import feedparser, pprint, asyncio, aiohttp
class FeedSource(Source):
@asyncio.coroutine
def sync(self, metadata):
future_raw_results = yield from aiohttp.request('GET', self.config["uri"])
chunks = yield from future_raw_results.read()
raw_results = feedparser.parse(chunks)
# Detect the new shit
result_ids = {i["id"] for i in raw_results["entries"]}
new_ids = result_ids - metadata.get("received", set())
# Record the new shit
if "received" not in metadata:
metadata["received"] = result_ids
else:
metadata["received"] |= result_ids
results_by_id = {i["id"]: i for i in raw_results["entries"]}
results = []
for i in new_ids:
entry = results_by_id[i]
results.append({
"title": entry["title"],
"published": entry["published_parsed"],
"summary": entry["summary"],
})
print(len(results))
return results
glar = "http://www.reddit.com/message/inbox/.rss?feed=96014f6490c1cfab32a3e899a5eb4caf2e111ca9&user=brownhead"
x = FeedSource()
metadata = {}
x.set_config({"uri": glar})
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(x.sync(metadata))
finally:
loop.close()
# foo(x.sync(metadata))
# foo(x.sync(metadata))
# datastore = defaultdict(list)
# def store(uri, entries):
# datastore[uri].append(entries)
|
brownhead/email-client
|
eclient/source.py
|
Python
|
isc
| 1,508
|
from feedhandler.feedhandler import FeedHandler
|
tobbez/lys-reader
|
backend/feedhandler/__init__.py
|
Python
|
isc
| 47
|
from __future__ import unicode_literals
from django.core.urlresolvers import reverse
from modoboa.core.models import User
from modoboa.lib.tests import ModoTestCase
from .. import factories
class PasswordSchemesTestCase(ModoTestCase):
@classmethod
def setUpTestData(cls):
"""Create test data."""
super(PasswordSchemesTestCase, cls).setUpTestData()
factories.populate_database()
def _create_account(self):
values = dict(
username="tester@test.com", first_name="Tester", last_name="Toto",
password1="Toto1234", password2="Toto1234", role="SimpleUsers",
quota_act=True,
is_active=True, email="tester@test.com", stepid='step2'
)
self.ajax_post(
reverse("admin:account_add"),
values
)
def _test_scheme(self, name, startpattern):
self.set_global_parameter("password_scheme", name, app="core")
self._create_account()
account = User.objects.get(username='tester@test.com')
self.assertTrue(account.password.startswith(startpattern))
self.assertTrue(account.check_password('Toto1234'))
def test_bcrypt_scheme(self):
self._test_scheme('blfcrypt', '{BLF-CRYPT}')
def test_sha512crypt_scheme(self):
self._test_scheme('sha512crypt', '{SHA512-CRYPT}')
def test_sha256crypt_scheme(self):
self._test_scheme('sha256crypt', '{SHA256-CRYPT}')
def test_md5crypt_scheme(self):
self._test_scheme('md5crypt', '{MD5-CRYPT}')
def test_sha256_scheme(self):
self._test_scheme('sha256', '{SHA256}')
def test_md5_scheme(self):
self._test_scheme('md5', '{MD5}')
def test_crypt(self):
self._test_scheme('crypt', '{CRYPT}')
def test_plain(self):
self._test_scheme('plain', '{PLAIN}')
|
bearstech/modoboa
|
modoboa/admin/tests/test_password_schemes.py
|
Python
|
isc
| 1,847
|
from django.contrib import admin
from solo.admin import SingletonModelAdmin
from exchange.models import ExchangeRates
from exchange.tasks import update_all_prices
@admin.register(ExchangeRates)
class ExchangeRatesAdmin(SingletonModelAdmin):
list_display = ['id', 'usd', 'eur']
list_editable = ['usd', 'eur']
def save_model(self, *args):
super().save_model(*args)
update_all_prices()
|
pmaigutyak/mp-shop
|
exchange/admin.py
|
Python
|
isc
| 418
|
from Tkinter import *
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.autograd as autograd
from torch.autograd import Variable
master = Tk()
goal = 0
var_goal = StringVar()
GAMMA = 0.9
last_state = Variable(torch.Tensor([0,0])).unsqueeze(0)
last_action = 0
last_reward = 0
class Policy(nn.Module):
def __init__(self):
super(Policy, self).__init__()
self.hidden_size = 2
self.i2h = nn.Linear(2 + self.hidden_size, self.hidden_size)
self.i2o = nn.Linear(2 + self.hidden_size, 2)
self.softmax = nn.LogSoftmax()
self.states = []
self.next_states = []
self.actions = []
self.rewards = []
self.hiddens = []
def forward(self, input, hidden):
combined = torch.cat((input, hidden), 1)
hidden = self.i2h(combined)
output = self.i2o(combined)
output = self.softmax(output)
return output, hidden
def initHidden(self):
global hidden_state
hidden_state = Variable(torch.zeros(1, self.hidden_size))
model = Policy()
model.initHidden()
last_hidden = hidden_state
optimizer = optim.Adam(model.parameters(), lr=0.01)
def select_action(state):
global hidden_state
output, hidden_state = model(state, hidden_state)
print('val '+str(output.data))
probs = F.softmax(output)
print('probs '+str(probs.data))
action = probs.multinomial()
return action.data[0,0]
def learn(indice):
state = model.states[indice]
next_state = model.next_states[indice].detach()
action = model.actions[indice]
reward = model.rewards[indice]
hidden = model.hiddens[indice]
output, next_hidden = model(state, hidden)
value = output[0,action]
output,_ = model(next_state, next_hidden.detach())
#'''
next_action_probs = F.softmax(output)
next_action = next_action_probs.multinomial().data[0,0]
next_value = output[0,next_action]
'''
next_value = output.max(1)[0]
#'''
expected = GAMMA*next_value + reward
td_loss = F.smooth_l1_loss(value, expected)
optimizer.zero_grad()
td_loss.backward(retain_variables=True)
optimizer.step()
def update(signal):
global last_action
global last_state
global last_reward
global last_hidden
state = Variable(torch.Tensor([signal,0]).float()).unsqueeze(0)
if np.abs(last_reward)>0 or np.random.rand()>0.9 or len(model.states)<10:
model.states.append(last_state)
model.next_states.append(state)
model.rewards.append(last_reward)
model.actions.append(last_action)
model.hiddens.append(last_hidden)
last_hidden = hidden_state
action = select_action(state)
print(action)
reward = 0
if action==1 and goal==1:
reward = 1
if action==1 and goal==0:
reward = -1
if action==0:
learn(np.random.choice(len(model.states)))
else:
learn(-1)
last_action = action
last_state = state
last_reward = reward
def set_goal(new_goal):
global goal
goal = new_goal
print("goal = "+str(goal))
var_goal.set('goal = '+str(goal))
Button(master, text='S1', height = 10, width = 30, command=lambda:update(0)).grid(row=0, column=0, sticky=W, pady=4)
Button(master, text='S2', height = 10, width = 30, command=lambda:update(1)).grid(row=0, column=1, sticky=W, pady=4)
Button(master, text='goal 0', height = 10, width = 30, command=lambda:set_goal(0)).grid(row=1, column=0, sticky=W, pady=4)
Button(master, text='goal 1', height = 10, width = 30, command=lambda:set_goal(1)).grid(row=1, column=1, sticky=W, pady=4)
Label(master, height = 10, textvariable = var_goal).grid(row=2, sticky=EW, pady=4)
mainloop( )
|
alexis-jacq/signals
|
tools/R_DQL.py
|
Python
|
isc
| 3,783
|
#!/usr/bin/env python
# encoding: UTF-8
import xbmcgui
import xbmcplugin
import sarpur
def play(url, name, live=False):
"""
Play audio or video on a given url"
:param url: Full url of the video
:param name: Stream name
"""
item = xbmcgui.ListItem(name, path=url)
if live:
item.setProperty('IsLive', 'true')
xbmcplugin.setResolvedUrl(sarpur.ADDON_HANDLE, True, item)
|
Dagur/sarpur-xbmc
|
plugin.video.sarpur/util/player.py
|
Python
|
mit
| 412
|
def test_rbenv_exists(host):
assert host.exists("rbenv")
def test_default_ruby_version(host):
assert host.exists("ruby")
assert host.run("ruby --version").stdout.startswith('ruby 2.6.5')
|
bitrise-io/osx-box-bootstrap
|
roles/ruby/tests/test_ruby.py
|
Python
|
mit
| 200
|
from sqlalchemy import (
Boolean, Column, Date, Enum, ForeignKey, Integer, Table, Unicode, sql,
)
from great.models.core import METADATA, table
from great.models._guid import GUID
def music_table(*args, **kwargs):
args += (
Column("mbid", GUID, nullable=True, unique=True),
Column("spotify_uri", Unicode(), nullable=True, unique=True),
)
return table(*args, **kwargs)
artists = music_table(
"artists",
Column(
"tracked",
Boolean,
default=False,
nullable=False,
server_default=sql.expression.false(),
),
with_dates=True,
)
albums = music_table(
"albums",
Column("release_date", Date),
Column(
"type",
Enum(u"lp", u"broadcast", u"ep", u"single"),
default=u"lp",
nullable=False,
),
Column(
"compilation",
Boolean,
default=False,
nullable=False,
server_default=sql.expression.false(),
),
Column(
"live",
Boolean,
default=False,
nullable=False,
server_default=sql.expression.false(),
),
)
album_artists = Table(
"album_artists",
METADATA,
Column("album_id", Integer, ForeignKey("albums.id"), primary_key=True),
Column("artist_id", Integer, ForeignKey("artists.id"), primary_key=True),
Column("join_phrase", Unicode(16)),
)
|
Julian/Great
|
great/models/music.py
|
Python
|
mit
| 1,375
|
#
import numpy as np
from numpy import linalg
import cvxopt
import cvxopt.solvers
def linear_kernel(x1, x2):
return np.dot(x1, x2)
def polynomial_kernel(x, y, p=3):
return (1 + np.dot(x, y)) ** p
def gaussian_kernel(x, y, sigma=5.0):
return np.exp(-linalg.norm(x - y) ** 2 / (2 * (sigma ** 2)))
class SVM(object):
def __init__(self, kernel=linear_kernel, C=None):
self.kernel = kernel
self.C = C
if self.C is not None: self.C = float(self.C)
def fit(self, X, y):
n_samples, n_features = X.shape
# Gram matrix
K = np.zeros((n_samples, n_samples))
for i in range(n_samples):
for j in range(n_samples):
K[i, j] = self.kernel(X[i], X[j])
P = cvxopt.matrix(np.outer(y, y) * K)
q = cvxopt.matrix(np.ones(n_samples) * -1)
A = cvxopt.matrix(y, (1, n_samples))
b = cvxopt.matrix(0.0)
if self.C is None:
G = cvxopt.matrix(np.diag(np.ones(n_samples) * -1))
h = cvxopt.matrix(np.zeros(n_samples))
else:
tmp1 = np.diag(np.ones(n_samples) * -1)
tmp2 = np.identity(n_samples)
G = cvxopt.matrix(np.vstack((tmp1, tmp2)))
tmp1 = np.zeros(n_samples)
tmp2 = np.ones(n_samples) * self.C
h = cvxopt.matrix(np.hstack((tmp1, tmp2)))
# solve QP problem
solution = cvxopt.solvers.qp(P, q, G, h, A, b)
# Lagrange multipliers
a = np.ravel(solution['x'])
# Support vectors have non zero lagrange multipliers
sv = a > 1e-5
ind = np.arange(len(a))[sv]
self.a = a[sv]
self.sv = X[sv]
self.sv_y = y[sv]
print("%d support vectors out of %d points" % (len(self.a), n_samples))
# Intercept
self.b = 0
for n in range(len(self.a)):
self.b += self.sv_y[n]
self.b -= np.sum(self.a * self.sv_y * K[ind[n], sv])
self.b /= len(self.a)
# Weight vector
if self.kernel == linear_kernel:
self.w = np.zeros(n_features)
for n in range(len(self.a)):
self.w += self.a[n] * self.sv_y[n] * self.sv[n]
else:
self.w = None
def project(self, X):
if self.w is not None:
return np.dot(X, self.w) + self.b
else:
y_predict = np.zeros(len(X))
for i in range(len(X)):
s = 0
for a, sv_y, sv in zip(self.a, self.sv_y, self.sv):
s += a * sv_y * self.kernel(X[i], sv)
y_predict[i] = s
return y_predict + self.b
def predict(self, X):
return np.sign(self.project(X))
if __name__ == "__main__":
import pylab as pl
def gen_lin_separable_data():
# generate training data in the 2-d case
mean1 = np.array([0, 2])
mean2 = np.array([2, 0])
cov = np.array([[0.8, 0.6], [0.6, 0.8]])
X1 = np.random.multivariate_normal(mean1, cov, 100)
y1 = np.ones(len(X1))
X2 = np.random.multivariate_normal(mean2, cov, 100)
y2 = np.ones(len(X2)) * -1
return X1, y1, X2, y2
def gen_non_lin_separable_data():
mean1 = [-1, 2]
mean2 = [1, -1]
mean3 = [4, -4]
mean4 = [-4, 4]
cov = [[1.0, 0.8], [0.8, 1.0]]
X1 = np.random.multivariate_normal(mean1, cov, 50)
X1 = np.vstack((X1, np.random.multivariate_normal(mean3, cov, 50)))
y1 = np.ones(len(X1))
X2 = np.random.multivariate_normal(mean2, cov, 50)
X2 = np.vstack((X2, np.random.multivariate_normal(mean4, cov, 50)))
y2 = np.ones(len(X2)) * -1
return X1, y1, X2, y2
def gen_lin_separable_overlap_data():
# generate training data in the 2-d case
mean1 = np.array([0, 2])
mean2 = np.array([2, 0])
cov = np.array([[1.5, 1.0], [1.0, 1.5]])
X1 = np.random.multivariate_normal(mean1, cov, 100)
y1 = np.ones(len(X1))
X2 = np.random.multivariate_normal(mean2, cov, 100)
y2 = np.ones(len(X2)) * -1
return X1, y1, X2, y2
def split_train(X1, y1, X2, y2):
X1_train = X1[:90]
y1_train = y1[:90]
X2_train = X2[:90]
y2_train = y2[:90]
X_train = np.vstack((X1_train, X2_train))
y_train = np.hstack((y1_train, y2_train))
return X_train, y_train
def split_test(X1, y1, X2, y2):
X1_test = X1[90:]
y1_test = y1[90:]
X2_test = X2[90:]
y2_test = y2[90:]
X_test = np.vstack((X1_test, X2_test))
y_test = np.hstack((y1_test, y2_test))
return X_test, y_test
def plot_margin(X1_train, X2_train, clf):
def f(x, w, b, c=0):
# given x, return y such that [x,y] in on the line
# w.x + b = c
return (-w[0] * x - b + c) / w[1]
pl.plot(X1_train[:, 0], X1_train[:, 1], "ro")
pl.plot(X2_train[:, 0], X2_train[:, 1], "bo")
pl.scatter(clf.sv[:, 0], clf.sv[:, 1], s=100, c="g")
# w.x + b = 0
a0 = -4;
a1 = f(a0, clf.w, clf.b)
b0 = 4;
b1 = f(b0, clf.w, clf.b)
pl.plot([a0, b0], [a1, b1], "k")
# w.x + b = 1
a0 = -4;
a1 = f(a0, clf.w, clf.b, 1)
b0 = 4;
b1 = f(b0, clf.w, clf.b, 1)
pl.plot([a0, b0], [a1, b1], "k--")
# w.x + b = -1
a0 = -4;
a1 = f(a0, clf.w, clf.b, -1)
b0 = 4;
b1 = f(b0, clf.w, clf.b, -1)
pl.plot([a0, b0], [a1, b1], "k--")
pl.axis("tight")
pl.show()
def plot_contour(X1_train, X2_train, clf):
pl.plot(X1_train[:, 0], X1_train[:, 1], "ro")
pl.plot(X2_train[:, 0], X2_train[:, 1], "bo")
pl.scatter(clf.sv[:, 0], clf.sv[:, 1], s=100, c="g")
X1, X2 = np.meshgrid(np.linspace(-6, 6, 50), np.linspace(-6, 6, 50))
X = np.array([[x1, x2] for x1, x2 in zip(np.ravel(X1), np.ravel(X2))])
Z = clf.project(X).reshape(X1.shape)
pl.contour(X1, X2, Z, [0.0], colors='k', linewidths=1, origin='lower')
pl.contour(X1, X2, Z + 1, [0.0], colors='grey', linewidths=1, origin='lower')
pl.contour(X1, X2, Z - 1, [0.0], colors='grey', linewidths=1, origin='lower')
pl.axis("tight")
pl.show()
def test_linear():
X1, y1, X2, y2 = gen_lin_separable_data()
X_train, y_train = split_train(X1, y1, X2, y2)
X_test, y_test = split_test(X1, y1, X2, y2)
clf = SVM()
clf.fit(X_train, y_train)
y_predict = clf.predict(X_test)
correct = np.sum(y_predict == y_test)
print("%d out of %d predictions correct" % (correct, len(y_predict)))
plot_margin(X_train[y_train == 1], X_train[y_train == -1], clf)
def test_non_linear():
X1, y1, X2, y2 = gen_non_lin_separable_data()
X_train, y_train = split_train(X1, y1, X2, y2)
X_test, y_test = split_test(X1, y1, X2, y2)
clf = SVM(polynomial_kernel)
clf.fit(X_train, y_train)
y_predict = clf.predict(X_test)
correct = np.sum(y_predict == y_test)
print("%d out of %d predictions correct" % (correct, len(y_predict)))
plot_contour(X_train[y_train == 1], X_train[y_train == -1], clf)
def test_soft():
X1, y1, X2, y2 = gen_lin_separable_overlap_data()
X_train, y_train = split_train(X1, y1, X2, y2)
X_test, y_test = split_test(X1, y1, X2, y2)
clf = SVM(C=1000.1)
clf.fit(X_train, y_train)
y_predict = clf.predict(X_test)
correct = np.sum(y_predict == y_test)
print("%d out of %d predictions correct" % (correct, len(y_predict)))
plot_contour(X_train[y_train == 1], X_train[y_train == -1], clf)
# test_linear()
# test_non_linear()
test_soft()
|
mtdx/ml-algorithms
|
svm/kernels.py
|
Python
|
mit
| 7,886
|
import _plotly_utils.basevalidators
class YpadValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="ypad", parent_name="scattercarpet.marker.colorbar", **kwargs
):
super(YpadValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
|
plotly/python-api
|
packages/python/plotly/plotly/validators/scattercarpet/marker/colorbar/_ypad.py
|
Python
|
mit
| 512
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import os
import re
import sys
from lib.model import nick, channel, log
def import_file(filename, parent):
_nick = {}
infile = open(filename).read().splitlines()
date = os.path.basename(filename).replace('.txt', '')
re_sentence = re.compile(r'^[(|)|<|>|].*:(?P<name>\w+)[(|)|<|>|]$')
_channel = None
for line in infile:
data = line.split(' ')
time = data.pop(0)
line = data.pop(0)
if (_channel is None):
_channel = channel.Channel(os.path.basename(parent), date, time)
if re_sentence.search(line):
name = re_sentence.search(line).group('name')
_nick[name] = nick.Nick(name, date, time)
_log = log.Log(' '.join(data), date, time, _nick[name], _channel)
if len(sys.argv) < 2:
print "Usage: %s /locate/to/tiarra/log" % (sys.argv[0])
quit()
for (root, dirs, files) in os.walk(sys.argv[1]):
for _file in files:
print root, _file
import_file(os.path.join(root, _file), root)
|
yono/tiarra_dbi_log_importer
|
tiarra_dbi_log_importer.py
|
Python
|
mit
| 1,083
|
import unittest
import urllib2
import requests
import logging
import re
import urllib
import os
import os.path
import bz2
# Default is warning, it's to suppress requests INFO log
logging.basicConfig(format='%(message)s')
def solution():
un = 'BZh91AY&SYA\xaf\x82\r\x00\x00\x01\x01\x80\x02\xc0\x02\x00 \x00!\x9ah3M\x07<]\xc9\x14\xe1BA\x06\xbe\x084'
pw = 'BZh91AY&SY\x94$|\x0e\x00\x00\x00\x81\x00\x03$ \x00!\x9ah3M\x13<]\xc9\x14\xe1BBP\x91\xf08'
return (bz2.decompress(un), bz2.decompress(pw))
class SolutionTest(unittest.TestCase):
def setUp(self):
self.prefix = "http://www.pythonchallenge.com/pc/return/"
self.suffix = ".html"
def test_solution(self):
actual = solution()
# It would be identified by pep8, but this is ascii art, who cares!
expected = ('huge', 'file')
self.assertEquals(actual, expected)
# Trick: hockey is consist of letters of oxygen
origin_url = ''.join([self.prefix, 'good', self.suffix])
try:
r = requests.get(origin_url, auth=expected)
except:
raise
self.assertTrue(r.ok)
next_entry = [re.sub(r'(.*)URL=(.*)\.html\"\>', r'\2', line)
for line in r.iter_lines() if re.match(r'.*URL.*', line)]
r.close()
if len(next_entry) != 0:
r = requests.get(
''.join([self.prefix, next_entry[0], self.suffix], auth=expected))
logging.warn('Level 09 is %s with %s' % (r.url, expected))
else:
logging.warn('Level 09 is %s with %s' % (origin_url, expected))
if __name__ == "__main__":
unittest.main(failfast=True)
|
au9ustine/org.au9ustine.puzzles.pythonchallenge
|
pc/level_08.py
|
Python
|
mit
| 1,670
|
if traffic_light == 'green':
pass # to implement
else:
stop()
|
schmit/intro-python-course
|
lectures/code/control_pass.py
|
Python
|
mit
| 71
|
# -*- coding: utf-8 -*-
# Copyright (c) 2011, Almar Klein
description = 'Simple Structured Data Format.'
long_description = """
Simple Structured Data Format
-----------------------------
Ssdf is a simple format for stroring structured data. It supports
seven data types, two of which are container elements:
None, int, float, (Unicode) string, numpy array, list/tuple, dict/Struct.
One spec, two formats
---------------------
Ssdf is actually two formats: the original text format is human readable,
and thus enables inspecting databases and data structures using a simple
text editor. The binary format is more efficient and uses compression on
the whole file (the text format only compresses arrays). It is more suited
for storing really large databases or structures containing large arrays.
Both formats are fully compatible.
Notes
-----
SSDF comes as a single module. While it's a bit big (approaching 2k lines),
it enables easier deployment inside a package in a way that works for
Python 2 as well as Python 3.
"""
from distutils.core import setup
# Get version
for line in file('ssdf.py').readlines():
if (line.startswith('__version__')):
exec(line.strip())
setup(
name = 'ssdf',
version = __version__,
author = 'Almar Klein',
author_email = 'almar.klein at gmail',
license = 'BSD',
url = 'https://bitbucket.org/almarklein/ssdf',
keywords = "simple structured data fileformat",
description = description,
long_description = long_description,
platforms = 'any',
provides = ['ssdf'],
requires = [],
py_modules = ['ssdf'],
zip_safe = False, # I want examples to work
)
# Note that the dir in package_dir must NOT be an empty string! This
# would work for distutils, but not for setuptools...
# I found this on-line doc very helpful for creating a setup script:
# http://docs.python.org/distutils/examples.html
|
bblais/Classy
|
classy/ssdf/setup.py
|
Python
|
mit
| 1,984
|
import multiprocessing as mp
from graph import shift
from time import sleep
def count(adj_node, adj_list):
return adj_list.count(adj_node)
def loop_dfs( current_node, start_node, graph, current_path, nodes_to_faces):
if len(current_path) >= 3:
path_head_3 = current_path[-3:]
previous_three_faces = [set(nodes_to_faces[edge]) for edge in path_head_3]
intersection_all = set.intersection(*previous_three_faces)
if len(intersection_all) == 2:
return []
if current_node == start_node:
#stderr.write("Found one! \n")
#all_loops.append(shift(list(current_path)))
return [shift(list(current_path))]
else:
loops = []
for adjacent_node in set(graph[current_node]):
if count(adjacent_node, current_path) < 1:
current_path.append(adjacent_node)
graph[current_node].remove(adjacent_node)
graph[adjacent_node].remove(current_node)
loops += list(loop_dfs(adjacent_node, start_node, graph, current_path, nodes_to_faces))
graph[current_node].append(adjacent_node)
graph[adjacent_node].append(current_node)
current_path.pop()
return loops
def dfs_partial( current_node, start_node, graph, current_path, nodes_to_faces):
if len(current_path) >= 3:
path_head_3 = current_path[-3:]
previous_three_faces = [set(nodes_to_faces[edge]) for edge in path_head_3]
intersection_all = set.intersection(*previous_three_faces)
if len(intersection_all) == 2:
return []
if current_node == start_node:
return [shift(list(current_path))]
if len(current_path) >= graph.keys()/4:
return [('work', current_node, start_node, graph, current_path, nodes_to_faces)]
else:
loops = []
for adjacent_node in set(graph[current_node]):
if count(adjacent_node, current_path) < 1:
current_path.append(adjacent_node)
graph[current_node].remove(adjacent_node)
graph[adjacent_node].remove(current_node)
loops += list(loop_dfs(adjacent_node, start_node, graph, current_path, nodes_to_faces))
graph[current_node].append(adjacent_node)
graph[adjacent_node].append(current_node)
current_path.pop()
return loops
class PGraph:
class Master:
num_threads = 0
workers = []
def __init__(self):
self.num_threads = mp.cpu_count()
self.work_queue = mp.Queue()
self.results_queue = mp.Queue()
self.workers = [PGraph.Worker(self.work_queue, self.results_queue) for i in range(self.num_threads+2)]
def execute(self):
#starts all workers, then begins searching for work for them to complete
p = mp.Process(target=self.find_work())
p.start()
for worker in self.workers:
worker.run()
p.join()
def find_work(self):
pass
class Worker:
threads = []
def __init__(self, work_queue, results_queue):
self.results_queue = results_queue
self.work_queue = work_queue
def run(self):
while not self.work_queue.empty():
proc = mp.Process(target=loop_dfs, args=self.work_queue.get(0))
proc.start()
self.threads.append(proc)
for thread in self.threads:
thread.join()
def __init__(self):
pass
|
MICC/MICC
|
micc/pgraph.py
|
Python
|
mit
| 3,627
|
import CCAPython.gov.cca
import logging
import time
import collections
# Configure Logging
logger = logging.getLogger('root')
def mape_k_loop(platform_component, reconfiguration_port):
"""
This is the method for the process running the monitoring loop.
"""
# Extract Contract Information
if platform_component.qos_contract != None:
monitor_interval = platform_component.qos_contract["monitor_interval"]
sample_interval = platform_component.qos_contract["sample_interval"]
reconfiguration_interval = platform_component.qos_contract["reconfiguration_interval"]
execution_time = platform_component.qos_contract["execution_time"]
deviation_factor = platform_component.qos_contract["deviation_factor"]
reconfiguration_function = platform_component.qos_contract["reconfiguration_function"]
else:
logger.debug("No QoS Contract!!!")
return
# The progress is stored at a dictionary
# The key is the timestamp when the variable was read.
# The value is the computation progress at this timestamp.
progress = 0.0
progress_log = {}
first_time_stamp = time.time()
last_reconfiguration = first_time_stamp
first_sample = progress
progress_log[first_time_stamp] = progress
last_time_stamp = first_time_stamp
last_sample = first_sample
# While the computation is not over
while progress < 1.0 :
logger.debug("Monitoring Computation Progress.")
# Monitor
progress = reconfiguration_port.getComputationProgress()
current_time_stamp = time.time()
current_sample = progress
progress_log[current_time_stamp] = current_sample
logger.debug("Progress: " + ("{:.2f}".format(progress)))
# Analyze
# Only run the analysis phase if there was progress made.
if current_sample > last_sample :
# Sort the progress_log by time stamp.
# oldest_sample will store the oldest progress recorded.
# oldest_time_stamp will store the time stamp of the oldest progress recorded.
ordered_logs = collections.OrderedDict(sorted(progress_log.items())).items()
number_of_samples = len(ordered_logs)
if number_of_samples < sample_interval:
# In this scenario, there is not enough samples in the interval, so we use the first sample
oldest_sample = first_sample
oldest_time_stamp = first_time_stamp
else:
# In this case, there is enough samples, so we take the last in the interval.
oldest_sample = ordered_logs[-(sample_interval)][1]
oldest_time_stamp = ordered_logs[-(sample_interval)][0]
# average_step_interval is how much does it take to increase the progress in 0.1
# current_time_stamp is the time of the last progress verification
average_step_interval = (current_time_stamp - oldest_time_stamp) / ((current_sample - oldest_sample) * 10)
logger.debug("Average Step Interval:" + ("{:.2f}".format(average_step_interval)))
# Plan
predicted_remaining_time = 10 * (1.0 - current_sample) * average_step_interval
logger.debug("Predicted Remaining Time: " + ("{:.2f}".format(predicted_remaining_time)))
reconfiguration_action = (False, 0)
elapsed_time = current_time_stamp - first_time_stamp
# The case for increasing the resources
if (elapsed_time + predicted_remaining_time) > deviation_factor * execution_time :
new_resources = reconfiguration_function(platform_component, progress_log)
reconfiguration_action = (True, new_resources)
logger.debug("Computation Must Be Reconfigured. New Resources: " + "{:.2f}".format(str(new_resources)))
# The case for keeping the resources
elif (elapsed_time + predicted) < deviation_factor * executionTime:
reconfiguration_action = (False, 0)
# Execute
if reconfiguration_action[0] == True and (current_time_stamp - last_reconfiguration > reconfiguration_interval):
new_resources = reconfiguration_action[1]
reconfiguration_port.updateResources(new_resources)
last_reconfiguration = current_time_stamp
# Update Samples
last_time_stamp = current_time_stamp
last_sample = current_sample
else :
logger.debug("Progress Unchanged.")
# The Loop will sleep for monitor_interval seconds
time.sleep(monitor_interval)
elapsed_time = time.time() - first_time_stamp
logger.debug("Elapsed Time: " + "{:.2f}".format(elapsed_time))
return
class AllocationPort(CCAPython.gov.cca.Port):
def __init__(self, portType, component):
super(AllocationPort, self).__init__(portType)
self.component = component
return
def getResources(self):
"""
This should return a resource description for the computation.
This description should contain:
- Number of nodes
- Number of cores per node
- Memory size per node
- Hostname (for building the host file)
"""
logger.debug("Setting Resources: " + str(self.component.resources))
reconfiguration_port = self.component.services.getPort("ComputationReconfigurationPort")
mape_process = Process(target = mape_k_loop, args=(self.component, reconfiguration_port))
mape_process.daemon = True
mape_process.start()
logger.debug("Monitoring Started.")
return self.component.resources
class QoSConfigurationPort(CCAPython.gov.cca.Port):
def __init__(self, portType, component):
super(QoSConfigurationPort, self).__init__(portType)
self.component = component
return
def setQoSContract(self, resources = None, qos_contract = None):
"""
The contextual contract must be supplied by the computational system to the inner platform.
There are two sets of information:
- The initial resource description. The Platform will use this description to instantiate
a self.resources attribute with the initial resources.
- The QoS requirements dict. For malleable scenario, I'm considering:
- execution_time: execution time estimative given the initial resources.
- execution_cost: execution cost restriction.
- deviation_factor: deviantion factor for the above restrictions.
- monitor_interval: interval between two monitoring loops.
- sample_interval: how far back in the progress log should the analysis consider.
- reconfiguration_interval: mininum interval between two reconfigurations.
- reconfiguration_function: defined by the application provider or component developer, that,
given the contract, the cluster state and the progress log, return a new set of resources.
The reconfiguration function should take as input:
- Cluster Statistics (see base.platform.infrastructure.Cluster for formatting)
- Computation Progress Log
- Current Resources
The output should be:
- A new resource set to be allocated and sent to the Computation. (node_count, node_configuration)
"""
self.component.resources = resources
self.component.qos_contract = qos_contract
return
class MalleablePlatformComponent(CCAPython.gov.cca.Component):
def __init__(self):
# By default, there is no contract defined at component creation.
# It must be set by the QoSConfigurationPort
self.qos_contract = None
# By default, there is no resources set defined at component creation.
# It must be set by the QoSConfigurationPort
# The type of this object must be base.platform.infrastructure.Cluster
self.resources = None
self.allocation_port = AllocationPort("elastichpc.base.platform.malleable.AllocationPort", self)
self.qosConfiguration_port = QoSConfigurationPort("elastichpc.base.platform.malleable.QoSConfigurationPort", self)
return
def setServices(self, services):
self.services = services
services.addProvidesPort(self.allocation_port, "AllocationPort", "elastichpc.base.platform.malleable.AllocationPort", None)
services.addProvidesPort(self.qosConfiguration_port, "QoSConfigurationPort", "elastichpc.base.platform.malleable.QoSConfigurationPort", None)
services.registerUsesPort("ComputationReconfigurationPort","elastichpc.base.computation.malleable.ReconfigurationPort", None)
return
|
jmhal/elastichpc
|
base/platform/malleable/__init__.py
|
Python
|
mit
| 8,510
|
import unittest
from katas.kyu_8.regular_ball_super_ball import Ball
class BallTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(Ball().ball_type, 'regular')
def test_equals_2(self):
self.assertEqual(Ball('super').ball_type, 'super')
|
the-zebulan/CodeWars
|
tests/kyu_8_tests/test_regular_ball_super_ball.py
|
Python
|
mit
| 281
|