repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
SummitKwan/python-neo | neo/io/axonio.py | Python | bsd-3-clause | 33,046 | 0.000424 | # -*- coding: utf-8 -*-
"""
Class for reading data from pCLAMP and AxoScope
files (.abf version 1 and 2), developed by Molecular device/Axon technologies.
- abf = Axon binary file
- atf is a text file based format from axon that could be
read by AsciiIO (but this file is less efficient.)
This code is a port of abfload and abf2load
written in Matlab (BSD-2-Clause licence) by :
- Copyright (c) 2009, Forrest Collman, fcollman@princeton.edu
- Copyright (c) 2004, Harald Hentschke
and available here:
http://www.mathworks.com/matlabcentral/fileexchange/22114-abf2load
Information on abf 1 and 2 formats is available here:
http://www.moleculardevices.com/pages/software/developer_info.html
This file supports the old (ABF1) and new (ABF2) format.
ABF1 (clampfit <=9) and ABF2 (clampfit >10)
All possible mode are possible :
- event-driven variable-length mode 1 -> return several Segments per Block
- event-driven fixed-length mode 2 or 5 -> return several Segments
- gap free mode -> return one (or sevral) Segment in the Block
Supported : Read
Author: sgarcia, jnowacki
Note: j.s.nowacki@gmail.com has a C++ library with SWIG bindings which also
reads abf files - would be good to cross-check
"""
import struct
import datetime
import os
from io import open, BufferedReader
import numpy as np
import quantities as pq
from neo.io.baseio import BaseIO
from neo.core import Block, Segment, AnalogSignal, Event
from neo.io.tools import iteritems
class StructFile(BufferedReader):
def read_f(self, fmt, offset=None):
if offset is not None:
self.seek(offset)
return struct.unpack(fmt, self.read(struct.calcsize(fmt)))
def write_f(self, fmt, offset=None, *args):
if offset is not None:
self.seek(offset)
self.write(struct.pack(fmt, *args))
def reformat_integer_v1(data, nbchannel, header):
"""
reformat when dtype is int16 for ABF version 1
"""
chans = [chan_num for chan_num in
header['nADCSamplingSeq'] if chan_num >= 0]
for n, i in enumerate(chans[:nbchannel]): # respect SamplingSeq
data[:, n] /= header['fInstrumentScaleFactor'][i]
data[:, n] /= header['fSignalGain'][i]
data[:, n] /= header['fADCProgrammableGain'][i]
if header['nTelegraphEnable'][i]:
data[:, n] /= header['fTelegraphAdditGain'][i]
data[:, n] *= header['fADCRange']
data[:, n] /= header['lADCResolution']
data[:, n] += header['fInstrumentOffset'][i]
data[:, n] -= header['fSignalOffset'][i]
def reformat_integer_v2(data, nbchannel, header):
"""
reformat when dtype is int16 for ABF version 2
"""
for i in range(nbchannel):
data[:, i] /= header['listADCInfo'][i]['fInstrumentScaleFactor']
data[:, i] /= header['listADCInfo'][i]['fSignalGain']
data[:, i] /= header['listADCInfo'][i]['fADCProgrammableGain']
if header['listADCInfo'][i]['nTelegraphEnable']:
data[:, i] /= header['listADCInfo'][i]['fTelegraphAdditGain']
data[:, i] *= header['protocol']['fADCRange']
data[:, i] /= header['protocol']['lADCResolution']
data[:, i] += header['listADCInfo'][i]['fInstrumentOffset']
data[:, i] -= header['listADCInfo'][i]['fSignalOffset']
def clean_string(s):
s = s.rstrip(b'\x00')
s = s.rstrip(b' ')
return s
class AxonIO(BaseIO):
"""
Class for reading data from pCLAMP and AxoScope
files (.abf version 1 and 2), developed by Molecular Device/Axon Technologies.
Usage:
>>> from neo import io
>>> r = io.AxonIO(filename='File_axon_1.abf')
>>> bl = r.read_block(lazy=False, cascade=True)
>>> print bl.segments
[<neo.core.segment.Segment object at 0x105516fd0>]
>>> print bl.segments[0].analogsignals
[<AnalogSignal(array([ 2.18811035, 2.19726562, 2.21252441, ...,
1.33056641, 1.3458252 , 1.3671875 ], dtype=float32) * pA,
[0.0 s, 191.2832 s], sampling rate: 10000.0 Hz)>]
>>> print bl.segments[0].events
[]
"""
is_readable = True
is_writable = False
supported_objects = [Block, Segment, AnalogSignal, Event]
readable_objects = [Block]
writeable_objects = []
has_header = False
is_streameable = False
read_params = {Block: []}
write_params = None
name = 'Axon'
extensions = ['abf']
mode = 'file'
def __init__(self, filename=None):
"""
This class read a abf file.
Arguments:
filename : the filename to read
"""
BaseIO.__init__(self)
self.filename = filename
def read_block(self, lazy=False, cascade=True):
header = self.read_header()
version = header['fFileVersionNumber']
bl = Block()
bl.file_origin = os.path.basename(self.filename)
bl.annotate(abf_version=str(version))
# date and time
if version < 2.:
YY = 1900
MM = 1
DD = 1
hh = int(header['lFileStartTime'] / 3600.)
mm = int((header['lFileStartTime'] - hh * 3600) / 60)
ss = header['lFileStartTime'] - hh * 3600 - mm * 60
ms = int(np.mod(ss, 1) * 1e6)
ss = int(ss)
elif version >= 2.:
YY = int(header['uFileStartDate'] / 10000)
MM = int((header['uFileStartDate'] - YY * 10000) / 100)
DD = int(header['uFileStartDate'] - YY * 10000 - MM * 100)
hh = int(header['uFileStartTimeMS'] / 1000. / 3600.)
mm = int((header['uFileStartTimeMS'] / 1000. - hh * 3600) / 60)
ss = header['uFileStartTimeMS'] / 1000. - hh * 3600 - mm * 60
ms = int(np.mod(ss, 1) * 1e6)
ss = int(ss)
bl.rec_datetime = datetime.datetime(YY, MM, DD, hh, mm, ss, ms)
if not cascade:
return bl
# file format
if header['nDataFormat'] == 0:
dt = np.dtype('i2')
elif header['nDataFormat'] == 1:
dt = np.dtype('f4')
if version < 2.:
nbchannel = header['nADCNumChannels']
head_offset = header['lDataSectionPtr'] * BL | OCKSIZE + header[
'nNumPointsIgnored'] * dt.itemsize
totalsize = header['lActualAcqLength']
elif version >= 2.:
nbchannel = header['sections']['ADCSection']['llNumEntries']
head_offset = header['sections']['DataSection'][
'uBlockIndex'] * BLOCKSIZE
| totalsize = header['sections']['DataSection']['llNumEntries']
data = np.memmap(self.filename, dt, 'r',
shape=(totalsize,), offset=head_offset)
# 3 possible modes
if version < 2.:
mode = header['nOperationMode']
elif version >= 2.:
mode = header['protocol']['nOperationMode']
if (mode == 1) or (mode == 2) or (mode == 5) or (mode == 3):
# event-driven variable-length mode (mode 1)
# event-driven fixed-length mode (mode 2 or 5)
# gap free mode (mode 3) can be in several episodes
# read sweep pos
if version < 2.:
nbepisod = header['lSynchArraySize']
offset_episode = header['lSynchArrayPtr'] * BLOCKSIZE
elif version >= 2.:
nbepisod = header['sections']['SynchArraySection'][
'llNumEntries']
offset_episode = header['sections']['SynchArraySection'][
'uBlockIndex'] * BLOCKSIZE
if nbepisod > 0:
episode_array = np.memmap(
self.filename, [('offset', 'i4'), ('len', 'i4')], 'r',
shape=nbepisod, offset=offset_episode)
else:
episode_array = np.empty(1, [('offset', 'i4'), ('len', 'i4')])
episode_array[0]['len'] = da |
veloutin/tilde | tilde/tests/test_example_config.py | Python | gpl-3.0 | 1,068 | 0.003745 | from __future__ import unicode_literals
import os
from twisted.trial import unittest
example_file_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
os.path.pardir,
os.path.pardir,
"share",
"example-tilde.ini",
)
from tilde.loader import load_config
from tilde.commands import ubuntu
class LoadTest(unittest.TestCase):
def setUp(self):
self.cfg = load_config(example_file_path)
def test_resulting_config(self):
srv = self.cfg["servers"]
self.assertIn("foo", srv)
self.assertIn("bar", srv)
foo = srv["foo"]
bar = | srv["bar"]
self.assertEquals("foo.domain", foo.hostname)
self.assertEquals("/backup/homes", foo.archive_root)
self.assertEquals("bar", bar.hostname, "Should default to name")
self.assertEquals("/data/homes", bar.root)
self.assertIs(foo.commands, ubuntu)
self.assertIsNot(bar.commands, ubuntu)
cmds = self.cfg["commands"]
self.assertIn("ubuntu", cmds)
self. | assertIn("custom", cmds)
|
sargo/exif-compare | src/exif_compare/utils.py | Python | mit | 446 | 0 | # -*- coding: utf-8 -*-
"""
Helper functions used in views.
"""
from json import dumps
from functools import wra | ps
from flask import Response
def jsonify(function):
"""
Creates a response with the JSON representation of wrapped function result.
"""
@wraps(functio | n)
def inner(*args, **kwargs):
return Response(dumps(function(*args, **kwargs)),
mimetype='application/json')
return inner
|
gabrielgmendonca/torrentcrawler | test.py | Python | gpl-3.0 | 1,757 | 0.015936 | import unittest
import os
from crawler import Crawler
domain = "http://www.legittorrents.info/"
URL = "http://www.legittorrents.info/index.php?page=torrents&search=&category=1&active=1"
line = '<td align="center" width="20" class="lista" style="text-align: center;"><a href="download.php?id=0819ccee9ebe25d7a02fe14496d58af10ef94aec&f=The+Tunnel+%282011%29+%28720p%29.torrent"><img src="images/download.gif" border="0" alt="torrent"/></a>'
url = "download.php?id=0819ccee9ebe25d7a02fe14496d58af10ef94aec&f=The+Tunnel+%282011%29+%28720p%29.torrent"
filename = "The+Tunnel+%282011%29+%28720p%29.torrent"
class TestCrawler(unittest.TestCase):
def test_invalid_url(self):
crawler = Crawler("blau",domain)
self.assertFalse(crawler.valid)
def test_valid_url(self):
crawler = Crawler(URL,domain)
self.assertTrue(crawler.valid)
def test_get_torrent_url(self):
crawler = Crawler(URL,domain)
self.assertEq | ual(crawler.get_Torrent_URL(line),url)
def test_get_torrents(self):
crawler = Crawler(URL,domain)
self.assertEqual(len(crawler.get_Torrents_List()),18)
def test_get_filename(self):
crawler = Crawler(URL,domain)
self.assertEqual(fi | lename,crawler.get_Filename(domain+url))
def test_download_file(self):
crawler = Crawler(URL,domain)
self.assertEqual(crawler.download_File(domain+url),filename)
self.assertTrue(os.path.isfile(filename))
def test_page_files(self):
crawler = Crawler(URL,domain)
target_links = map(crawler.get_Filename, crawler.get_Torrents_List())
dloaded_links = crawler.download_Page_Files()
self.assertEqual(sorted(target_links), sorted(dloaded_links))
if __name__ == "__main__":
unittest.main()
|
geminy/aidear | oss/qt/qt-everywhere-opensource-src-5.9.0/qtwebengine/src/3rdparty/chromium/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/port/browser_test.py | Python | gpl-3.0 | 4,089 | 0.000978 | # Copyright (C) 2014 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.layout_tests.models import test_run_results
from webkitpy.layout_tests.port import linux
from webkitpy.layout_tests.port import mac
from webkitpy.layout_tests.port import win
from webkitpy.layout_tests.port import browser_test_driver
def get_port_class_name(port_name):
if 'linux' in port_name:
return 'BrowserTestLinuxPort'
elif 'mac' in port_name:
return 'BrowserTestMacPort'
elif 'win' in port_name:
return 'BrowserTestWinPort'
return None
class BrowserTestPortOverrides(object):
"""Set of overrides that every browser test platform port should have. This
class should not be instantiated as certain functions depend on base. Port
to work.
"""
def _driver_class(self):
return browser_test_driver.BrowserTestDriver
def layout_tests_dir(self):
"""Overridden function from the base port class. Redirects everything
to src/chrome/test/data/printing/layout_tests.
"""
return self.path_from_chromium_base('chrome', 'test', 'data', 'printing', 'layout_tests') # pylint: disable=E1101
def check_sys_deps(self, needs_http):
"""This function is meant to be a no-op since we d | on't want to actually
check for system dependencies.
"""
return test_run_results.OK_EXIT_STATUS
def driver_name(self):
return 'browser_tests'
def default_timeout_ms(self):
timeout_ms = 10 * 1000
if self.get_option('configur | ation') == 'Debug': # pylint: disable=E1101
# Debug is usually 2x-3x slower than Release.
return 3 * timeout_ms
return timeout_ms
def virtual_test_suites(self):
return []
class BrowserTestLinuxPort(BrowserTestPortOverrides, linux.LinuxPort):
pass
class BrowserTestMacPort(BrowserTestPortOverrides, mac.MacPort):
def _path_to_driver(self, target=None):
return self._build_path_with_target(target, self.driver_name())
def default_timeout_ms(self):
timeout_ms = 20 * 1000
if self.get_option('configuration') == 'Debug': # pylint: disable=E1101
# Debug is usually 2x-3x slower than Release.
return 3 * timeout_ms
return timeout_ms
class BrowserTestWinPort(BrowserTestPortOverrides, win.WinPort):
def default_timeout_ms(self):
timeout_ms = 20 * 1000
if self.get_option('configuration') == 'Debug': # pylint: disable=E1101
# Debug is usually 2x-3x slower than Release.
return 3 * timeout_ms
return timeout_ms
|
mrknow/filmkodi | plugin.video.wizjatv/resources/lib/lib/client.py | Python | apache-2.0 | 22,250 | 0.009618 | # -*- coding: utf-8 -*-
'''
Specto Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,sys,cookielib,urllib,urllib2,urlparse,gzip,StringIO,HTMLParser,time,random,base64
from resources.lib.libraries import cache
from resources.lib.libraries import control
from resources.lib.libraries import workers
def shrink_host(url):
u = urlparse.urlparse(url)[1].split('.')
u = u[-2] + '.' + u[-1]
return u.encode('utf-8')
IE_USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; AS; rv:11.0) like Gecko'
FF_USER_AGENT = 'Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0'
OPERA_USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.111 Safari/537.36 OPR/34.0.2036.50'
IOS_USER_AGENT = 'Mozilla/5.0 (iPhone; CPU iPhone OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5376e Safari/8536.25'
ANDROID_USER_AGENT = 'Mozilla/5.0 (Linux; Android 4.4.2; Nexus 4 Build/KOT49H) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.114 Mobile Safari/537.36'
#SMU_USER_AGENT = 'URLResolver for Kodi/%s' % (addon_version)
def request(url, close=True, redirect=True, error=False, proxy=None, post=None, headers=None, mobile=False, limit=None, referer=None, cookie=None, compression=True, output='', timeout='30', XHR=False):
try:
#control.log('@@@@@@@@@@@@@@ - URL:%s POST:%s' % (url, post))
handlers = []
if not proxy == None:
handlers += [urllib2.ProxyHandler({'http':'%s' % (proxy)}), urllib2.HTTPHandler]
opener = urllib2.build_opener(*handlers)
opener = urllib2.install_opener(opener)
if output == 'cookie' or output == 'extended' or not close == True:
cookies = cookielib.LWPCookieJar()
handlers += [urllib2.HTTPHandler(), urllib2.HTTPSHandler(), urllib2.HTTPCookieProcessor(cookies)]
opener = urllib2.build_opener(*handlers)
opener = urllib2.install_opener(opener)
if (2, 7, 9) <= sys.version_info < (2, 7, 11):
try:
import ssl; ssl_context = ssl.create_default_context()
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
handlers += [urllib2.HTTPSHandler(context=ssl_context)]
opener = urllib2.build_opener(*handlers)
opener = urllib2.install_opener(opener)
except:
pass
if url.startswith('//'): url = 'http:' + url
try: headers.update(headers)
except: headers = {}
if 'User-Agent' in headers:
pass
elif not mobile == True:
#headers['User-Agent'] = agent()
headers['User-Agent'] = cache.get(randomagent, 1)
else:
headers['User-Agent'] = 'Apple-iPhone/701.341'
headers['User-Agent'] = 'Mozilla/5.0 (Linux; U; Android 4.0.3; ko-kr; LG-L160L Build/IML74K) AppleWebkit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30'
if 'Referer' in headers:
pass
elif referer == None:
headers['Referer'] = '%s://%s/' % (urlparse.urlparse(url).scheme, urlparse.urlparse(url).netloc)
else:
headers['Referer'] = referer
if not 'Accept-Language' in headers:
headers['Accept-Language'] = 'en-US'
if 'X-Requested-With' in headers:
pass
elif XHR == True:
headers['X-Requested-With'] = 'XMLHttpRequest'
if 'Cookie' in headers:
pass
elif not cookie == None:
headers['Cookie'] = cookie
if 'Accept-Encoding' in headers:
pass
elif compression and limit is None:
headers['Accept-Encoding'] | = 'gzip'
if redirect == False:
class NoRedirection(u | rllib2.HTTPErrorProcessor):
def http_response(self, request, response): return response
opener = urllib2.build_opener(NoRedirection)
opener = urllib2.install_opener(opener)
try: del headers['Referer']
except: pass
if isinstance(post, dict):
post = urllib.urlencode(post)
request = urllib2.Request(url, data=post, headers=headers)
try:
response = urllib2.urlopen(request, timeout=int(timeout))
except urllib2.HTTPError as response:
if response.code == 503:
cf_result = response.read(5242880)
try: encoding = response.info().getheader('Content-Encoding')
except: encoding = None
if encoding == 'gzip':
cf_result = gzip.GzipFile(fileobj=StringIO.StringIO(cf_result)).read()
if 'cf-browser-verification' in cf_result:
netloc = '%s://%s' % (urlparse.urlparse(url).scheme, urlparse.urlparse(url).netloc)
ua = headers['User-Agent']
cf = cache.get(cfcookie().get, 168, netloc, ua, timeout)
headers['Cookie'] = cf
request = urllib2.Request(url, data=post, headers=headers)
response = urllib2.urlopen(request, timeout=int(timeout))
elif error == False:
return
elif error == False:
return
if output == 'cookie':
try: result = '; '.join(['%s=%s' % (i.name, i.value) for i in cookies])
except: pass
try: result = cf
except: pass
if close == True: response.close()
return result
elif output == 'geturl':
result = response.geturl()
if close == True: response.close()
return result
elif output == 'headers':
result = response.headers
if close == True: response.close()
return result
elif output == 'chunk':
try: content = int(response.headers['Content-Length'])
except: content = (2049 * 1024)
if content < (2048 * 1024): return
result = response.read(16 * 1024)
if close == True: response.close()
return result
if limit == '0':
result = response.read(224 * 1024)
elif not limit == None:
result = response.read(int(limit) * 1024)
else:
result = response.read(5242880)
try: encoding = response.info().getheader('Content-Encoding')
except: encoding = None
if encoding == 'gzip':
result = gzip.GzipFile(fileobj=StringIO.StringIO(result)).read()
if 'sucuri_cloudproxy_js' in result:
su = sucuri().get(result)
headers['Cookie'] = su
request = urllib2.Request(url, data=post, headers=headers)
response = urllib2.urlopen(request, timeout=int(timeout))
if limit == '0':
result = response.read(224 * 1024)
elif not limit == None:
result = response.read(int(limit) * 1024)
else:
result = response.read(5242880)
try: encoding = response.info().getheader('Content-Encoding')
except: encoding = None
if encoding == 'gzip':
result = gzip.GzipFile(fileobj=StringIO.StringIO(result)).read()
if 'Blazingfast.io' in result and 'xhr.open' in result:
netloc = '%s://%s' % (urlparse.urlpa |
heromod/migrid | mig/cgi-bin/logout.py | Python | gpl-2.0 | 1,078 | 0.002783 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER --- |
#
# logout - force-expire active login session
# Copyright (C) 2003-2014 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
# |
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
import cgi
import cgitb
cgitb.enable()
from shared.functionality.logout import main
from shared.cgiscriptstub import run_cgi_script
run_cgi_script(main)
|
superphy/insilico-subtyping | phylotyper/data/download/download_wzx_genes.py | Python | apache-2.0 | 1,774 | 0.009019 | #!/usr/bin/env python
"""Download Wzx gene sequences from Genbank
Example:
$ python download_wzx_genes.py .
"""
import argparse
import logging
import re
from utils import DownloadUtils, SubtypeParser, GeneFilter
__author__ = "Matthew Whiteside"
__copyright__ = "Copyright Government of Canada 2012-2015. Funded by the Government of Canada Genomics Research and Development Initiative"
__license__ = "APL"
__version__ = "2.0"
__maintainer__ = "Matthew Whiteside"
__email__ = "mdwhitesi@gmail.com"
logging.basicConfig(
filename='download_wzx_genes.log',
level=logging.DEBUG,
format='%(asctime)s %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p',
filemode='w')
if __name__ == "__main__":
"""Run pipeline downloading sequences
"""
# Parse command-line args
parser = argparse.ArgumentParser(description='Download and store NCBI genes sequences')
parser.add_argument('output_directory', action="store")
parser.add_argument('acc_file', action="store")
args = parser.parse_args()
# Initialize gene filter
seq_tests = [lambda x: len(x) > 1000]
gfilter = GeneFilter(sequence_tests=seq_tests)
# Initialize Subtype parser
opattern = r"(?:\b|serogroup\:)([o0]x?\d+(?:[a-z]{1,2}) | ?)(?:\b|\:)"
parser = SubtypeParser([re.compile(opattern, flags=re.IGNORECASE)],
source_fields=['organism','strain','serotype','serovar','note'],
annotation_fields=['source','serotype','organism','serovar'])
# Initialize Download object
dutil = DownloadUtils(args.output_directory, 'Escherichia coli', ['wzx', 'O-antigen flippase'], parser, gfilter)
# Downlo | ad
dutil.download_by_accession(args.acc_file)
# Parse genbank files for known intimin types
dutil.parse()
|
huyphan/pyyawhois | test/record/parser/test_response_whois_nic_fr_tf_status_available.py | Python | mit | 1,779 | 0.003935 |
# This file is autogenerated. Do not edit it manually.
# If you want change the content of this file, edit
#
# spec/fixtures/responses/whois.nic.fr/tf/status_available
#
# and regenerate the tests with the following script
#
# $ scripts/generate_tests.py
#
from nose.tools import *
from dateutil.parser import parse as time_parse
import yawhois
class TestWhoisNicFrTfStatusAvailable(object):
def setUp(self):
fixture_ | path = "spec/fixtures/responses/whois.nic.fr/tf/status_available.txt"
host = "whois.nic.fr"
part = yawhois.record.Part(open(fixture_path, "r").read(), host)
self.record = yawhois.record.Record(None, [par | t])
def test_status(self):
eq_(self.record.status, 'available')
def test_available(self):
eq_(self.record.available, True)
def test_nameservers(self):
eq_(self.record.nameservers.__class__.__name__, 'list')
eq_(self.record.nameservers, [])
def test_admin_contacts(self):
eq_(self.record.admin_contacts.__class__.__name__, 'list')
eq_(self.record.admin_contacts, [])
def test_registered(self):
eq_(self.record.registered, False)
def test_created_on(self):
eq_(self.record.created_on, None)
def test_registrant_contacts(self):
eq_(self.record.registrant_contacts.__class__.__name__, 'list')
eq_(self.record.registrant_contacts, [])
def test_technical_contacts(self):
eq_(self.record.technical_contacts.__class__.__name__, 'list')
eq_(self.record.technical_contacts, [])
def test_updated_on(self):
eq_(self.record.updated_on, None)
def test_expires_on(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.expires_on)
|
tacksoo/excel2pdf | flask_app.py | Python | mit | 4,839 | 0.010126 | # Started based on this sample:
# http://stackoverflow.com/questions/27628053/uploading-and-downloading-files-with-flask
from flask import Flask, request, redirect, url_for, Response, session
import openpyxl # using version 1.6.1
import docx # using python-docx 0.8.4
import os
import zipstream
import zipfile
import time
app = Flask(__name__)
from schedule import second
app.register_blueprint(second, url_prefix='/schedule')
def get_column_by_name(sheet,name):
''' Return Excel column (A,B,C...) based on column heading '''
for c in range(ord('A'),ord('Z')+1):
# hard coded to row two, find a better way to do this
if sheet.cell( chr(c) + '2' ).value == name:
return chr(c)
def get_names(sheet,column):
names = ''
# lastname
count = 3
while sheet.cell(column + str(count)).value != None:
lastname = sheet.cell(column + str(count)).value
firstname = sheet.cell(chr(ord(column) + 1) + str(count)).value
if sheet.cell(chr(ord(column) + 8) + str(count)).value == 'PT':
names += lastname + ',' + firstname + '\n'
count += 1
return names
def create_aca_file(firstname,lastname,filepath):
document = docx.Document(filepath)
tables = document.tables
table = tables[0]
# add name
cell = table.cell(0,0)
cell.text = u'To: ' + firstname + ' ' + lastname
run = cell.paragraphs[0].runs[0]
ru | n.font.bold = True
cell2 = table.cell(0,1)
# add date
ce | ll2.text = u'Date: ' + time.strftime("%m/%d/%Y")
run2 = cell2.paragraphs[0].runs[0]
run2.font.bold = True
document.save('aca_form_' + firstname + '_' + lastname + '.docx')
@app.route('/')
def form():
return """
<html>
<body>
<h2>ACA documents generator</h2>
<p>Step 1: Upload directory excel file</p>
<form action="/transform" method="post" enctype="multipart/form-data">
<input type="file" name="data_file" />
<input type="submit" />
</form>
</body>
</html>
"""
@app.route('/transform', methods=["POST"])
def transform_view():
file = request.files['data_file']
if not file:
return "No file"
# openpyxl documentation found here: https://media.readthedocs.org/pdf/openpyxl/latest/openpyxl.pdf
wb = openpyxl.load_workbook(file)
sheet = wb.get_sheet_by_name('Last Name')
lastname_column = get_column_by_name(sheet,'Last Name')
names = get_names(sheet,lastname_column)
session['names'] = names
# if csv list is needed, do the following instead
#response = make_response(names)
#response.headers["Content-Disposition"] = "attachment; filename=part_time_faculty.csv"
return redirect(url_for('get_acaforms')) # url_for calls the function
@app.route('/getacaforms', methods=["GET"])
def get_acaforms():
my_dir = os.path.dirname(__file__)
file_path = os.path.join(my_dir, 'aca_form.docx')
lines = session['names'].split("\n")
for line in lines:
if line != "": # need to check if line is non-empty, ouch!
tokens = line.split(",")
create_aca_file(tokens[0],tokens[1],file_path)
return redirect(url_for('list_files'))
@app.route('/listfiles', methods=['GET'])
def list_files():
files = os.listdir('.')
zf = zipfile.ZipFile('aca-forms.zip', mode='w')
for f in files:
if f.endswith(".docx"):
zf.write(f)
zf.close()
return """
<html>
<body>
<p><a href='/download'>Click here to download ACA forms without course info</a></p>
<p>Step 2: Upload schedule.xlsx excel file</p>
<form action="/schedule" method="post" enctype="multipart/form-data">
<input type="file" name="data_file" />
<input type="submit" />
</form>
</body>
</html>
"""
@app.route('/download', methods=['GET'])
def download():
''' allow user to download zip file '''
z = zipstream.ZipFile(mode='w')
#my_dir = os.path.dirname(__file__)
#file_path = os.path.join(my_dir, 'aca_form.docx')
z.write('aca-forms.zip')
response = Response(z, mimetype='application/zip')
response.headers['Content-Disposition'] = 'attachment; filename={}'.format('aca-forms.zip')
return response
@app.route('/package', methods=['GET'], endpoint='zipball')
def zipball():
''' example of how to zip file and serve to user '''
z = zipstream.ZipFile(mode='w')
my_dir = os.path.dirname(__file__)
file_path = os.path.join(my_dir, 'aca_form.docx')
z.write(file_path)
response = Response(z, mimetype='application/zip')
response.headers['Content-Disposition'] = 'attachment; filename={}'.format('files.zip')
return response
app.secret_key = os.urandom(24)
|
sibis-platform/ncanda-data-integration | scripts/redcap/comments_search.py | Python | bsd-3-clause | 2,070 | 0.010628 | #!/usr/bin/env python
#!/usr/bin/env python
##
## See COPYING file distributed along with the ncanda-data-integration package
## for the copyright and license terms
#### $Revision: 2110 $
## $LastChangedBy: nicholsn $
## $LastChangedDate: 2015-08-07 09:10:29 -0700 (Fri, 07 Aug 2015) $
##
"""
Scan Report Comment Search
======================
This code searches through comments to find those with No MRI. In addition, a list of subjects that were skipped is also generated.
"""
from __future__ import print_function
#import needed libraries
from past.builtins import execfile
from builtins import str
import time
start = time.time()
import pandas as pd
import re
import csv
execfile('./visit_years_y1_y2.py')
#import year1 and year2 csv file
today=time.strftime("%m%d%Y")
myfile_name='./yr1_yr2_scannotes_'+today+'.csv'
y1_y2 = pd.read_csv(myfile_name)
#ID for ignored Subjects
year1_ignore = []
year2_ignore = []
i = 0
while i < (len(y1_y2)):
if y1_y2['Year1_ignore'][i] == 1:
year1_ignore.append(y1_y2['study_id'][i])
if y1_y2['Year2_ignore'][i] == 1:
year2_ignore.append(y1_y2['study_id'][i])
i += 1
#ID for no MRI subject
year1_noscan = []
year2_noscan = []
i = 0
while i < (len(y1_y2)):
if type(y1_y2['Year1_notes'][i]) == type('str'):
if re.match(".*no mr | i", y1_y2['Year1_notes'][i].lower()) != None:
year1_noscan.append(y1_y2['study_id'][i])
if type(y1_y2['Year2_notes'][i]) == type('str'):
if re.match(".*no mri", y1_y2['Year2_notes'][i].lower()) != None:
year2_noscan.append( | y1_y2['study_id'][i])
i += 1
#write files to name
f = open("./Year1_ignore.txt", "w")
f.write("\n".join([str(x) for x in year1_ignore]))
f.close()
f = open("./Year2_ignore.txt", "w")
f.write("\n".join([str(x) for x in year2_ignore]))
f.close()
f = open("./Year1_NoScan.txt", "w")
f.write("\n".join([str(x) for x in year1_noscan]))
f.close()
f = open("./Year2_NoScan.txt", "w")
f.write("\n".join([str(x) for x in year2_noscan]))
f.close()
elapsed = (time.time() - start)
print(elapsed) |
ProjetPP/PPP-datamodel-Python | ppp_datamodel/nodes/missing.py | Python | agpl-3.0 | 366 | 0.005587 | # coding: | utf8
"""Contains the class representing a “missing” leaf."""
from .abstractnode import register, AbstractNode
@register
class Missing(AbstractNode):
"""Represents a “missing” node.
https://github.com/ProjetPP/Documentation/blob/master/data-model.md#missing
"""
__slots__ = ()
_typ | e = 'missing'
_possible_attributes = ()
|
DecisionSystemsGroup/DSGos | airootfs/usr/share/DSGos-Installer/DSGos_Installer/hardware/catalyst_legacy.py | Python | mit | 7,766 | 0.001803 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# catalyst-legacy.py
#
# Copyright © 2013-2015 DSGos
#
# This file is part of DSGos_Installer.
# Based on code by Wayne Hartmann
#
# DSGos_Installer is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# DSGos_Installer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# The following additional terms are in effect as per Section 7 of the license:
#
# The preservation of all legal notices and author attributions in
# the material or in the Appropriate Legal Notices displayed
# by works containing it is required.
#
# You should have received a copy of the GNU General Public License
# along with DSGos_Installer; If not, see <http://www.gnu.org/licenses/>.
""" AMD/ATI proprietary driver installation """
try:
from hardware.hardware import Hardware
except ImportError:
from hardware import Hardware
import os
CLASS_NAME = "CatalystLegacy"
CLASS_ID = "0x03"
VENDOR_ID = "0x1002"
PRIORITY = 0
"""
Since Catalyst 12.4, AMD has separated its development for Radeon HD 2xxx,
3xxx and 4xxx cards into the legacy Catalyst driver. For Radeon HD 5xxx and
newer, there is the regular Catalyst driver. Regardless of the driver you need,
you will also need the Catalyst utilities.
"""
DEVICES = [
"0x9640", "0x9641", "0x9642", "0x9643", "0x9644", "0x9645", "0x9647",
"0x9648", "0x9649", "0x964a", "0x964b", "0x964c", "0x964e", "0x964f",
"0x9903", "0x9904", "0x990f", "0x9905", "0x9906", "0x9907", "0x9908",
"0x9909", "0x9992", "0x9993", "0x9994", "0x990a", "0x9900", "0x9901",
"0x9990", "0x9991", "0x9803", "0x9804", "0x9805", "0x9802", "0x9808",
"0x9809", "0x9806", "0x9807", "0x9610", "0x9611", "0x9612", "0x9613",
"0x9614", "0x9615", "0x9616", "0x9710", "0x9711", "0x9712", "0x9713",
"0x9714", "0x9715", "0x68f8", "0x68f9", "0x68fe", "0x68fa", "0x689b",
"0x689e", "0x6898", "0x6899", "0x689c", "0x689d", "0x68b8", "0x68b9",
"0x68be", "0x68ba", "0x68bf", "0x68da", "0x68d8", "0x68d9", "0x68de",
"0x6738", "0x6739", "0x673e", "0x6778", "0x677b", "0x6772", "0x6779",
"0x6770", "0x671f", "0x6718", "0x6719", "0x671c", "0x671d", "0x675f",
"0x6751", "0x675b", "0x675d", "0x6758", "0x6759", "0x6750", "0x9400",
"0x9401", "0x9402", "0x9403", "0x9405", "0x950f", "0x9513", "0x9451",
"0x9441", "0x9443", "0x94c0", "0x94c7", "0x94c4", "0x94c5", "0x94c1",
"0x94c3", "0x94cc", "0x94c6", "0x95c0", "0x95c5", "0x95c7", "0x95c9",
"0x95c6", "0x958e", "0x958a", "0x9586", "0x9587 | ", "0x9580", "0x9588",
"0x9589", "0x9590", "0x9598", "0x9599", "0x9596", "0x9597", "0x9500",
"0x9515", "0x9505", "0x9501", "0x9507", "0x9519", "0x9517", "0x9540",
"0x9541", "0x9542", "0x954e", "0x954f" | , "0x9487", "0x948f", "0x9498",
"0x9490", "0x9495", "0x94b5", "0x94b3", "0x94b1", "0x94b4", "0x944c",
"0x9450", "0x9452", "0x9442", "0x9440", "0x944e", "0x9460", "0x9462",
"0x6838", "0x6839", "0x683b", "0x683d", "0x683f", "0x6858", "0x6859",
"0x6849", "0x6850", "0x6818", "0x6819", "0x6798", "0x679a", "0x6799",
"0x679e", "0x68a0", "0x68b0", "0x68b1", "0x68a1", "0x68a8", "0x6890",
"0x68c0", "0x68c1", "0x68d0", "0x68d1", "0x68c7", "0x68e0", "0x68e1",
"0x68f0", "0x68f1", "0x68e4", "0x68e5", "0x94cb", "0x94c9", "0x94c8",
"0x9581", "0x9583", "0x958b", "0x95c4", "0x95c2", "0x9591", "0x9593",
"0x9506", "0x9508", "0x9504", "0x9509", "0x9553", "0x9552", "0x955f",
"0x9555", "0x9491", "0x9480", "0x9488", "0x948a", "0x94a0", "0x94a1",
"0x945a", "0x945b", "0x945e", "0x944a", "0x944b", "0x6720", "0x6721",
"0x6724", "0x6725", "0x6764", "0x6765", "0x6763", "0x6761", "0x6760",
"0x6744", "0x6745", "0x6742", "0x6743", "0x6741", "0x6740", "0x6820",
"0x6821", "0x6824", "0x6825", "0x6830", "0x6827", "0x682d", "0x682f",
"0x6831", "0x6823", "0x6826", "0x6843", "0x6840", "0x6841", "0x6842",
"0x6800", "0x6801", "0x68f1", "0x68e8", "0x68e9", "0x6888", "0x6889",
"0x688a", "0x688d", "0x688c", "0x68a9", "0x6880", "0x68c8", "0x68c9",
"0x958f", "0x9595", "0x959b", "0x9557", "0x9489", "0x94a3", "0x947a",
"0x947b", "0x946a", "0x946b", "0x6728", "0x6729", "0x6722", "0x6723",
"0x6726", "0x6727", "0x6766", "0x6767", "0x6768", "0x6762", "0x6700",
"0x6701", "0x6702", "0x6703", "0x6704", "0x6705", "0x6706", "0x6707",
"0x6708", "0x6709", "0x674a", "0x6746", "0x6747", "0x6748", "0x6749",
"0x940f", "0x940b", "0x940a", "0x944f", "0x9447", "0x95cc", "0x958c",
"0x958d", "0x9511", "0x949c", "0x949f", "0x949e", "0x9444", "0x9456",
"0x9446", "0x6828", "0x6808", "0x684c", "0x6809", "0x6780", "0x6784",
"0x6788", "0x678a", "0x68f2", "0x95cd", "0x95ce", "0x95cf"]
class CatalystLegacy(Hardware):
def __init__(self):
Hardware.__init__(self, CLASS_NAME, CLASS_ID, VENDOR_ID, DEVICES, PRIORITY)
@staticmethod
def get_packages():
pkgs = ["catalyst-hook", "catalyst-libgl", "catalyst-utils", "acpid", "qt4"]
if os.uname()[-1] == "x86_64":
pkgs.extend(["lib32-catalyst-libgl", "lib32-catalyst-utils", "lib32-opencl-catalyst"])
return pkgs
@staticmethod
def add_repositories(path):
""" Adds [xorg116] and [catalyst-hd234k] repos to pacman.conf """
with open(path, 'r') as pacman_conf:
lines = pacman_conf.readlines()
with open(path, "w") as pacman_conf:
for line in lines:
# xorg11x needs to be present before core repository
if "[core]" in line:
line = "[xorg116]\n"
line += "Server = http://catalyst.wirephire.com/repo/xorg116/$arch\n"
line += "SigLevel = Optional TrustAll\n"
line += "## Mirrors, if the primary server does not work or is too slow:\n"
line += "#Server = http://mirror.rts-informatique.fr/archlinux-catalyst/repo/xorg116/$arch\n"
line += "#Server = http://mirror.hactar.bz/Vi0L0/xorg116/$arch\n\n"
line += "[catalyst-hd234k]\n"
line += "http://catalyst.wirephire.com/repo/catalyst-hd234k/$arch\n"
line += "SigLevel = Optional TrustAll\n"
line += "## Mirrors, if the primary server does not work or is too slow:\n"
line += "#Server = http://70.239.162.206/catalyst-mirror/repo/catalyst-hd234k/$arch\n"
line += "#Server = http://mirror.rts-informatique.fr/archlinux-catalyst/repo/catalyst-hd234k/$arch\n"
line += "#Server = http://mirror.hactar.bz/Vi0L0/catalyst-hd234k/$arch\n\n"
line += "[core]\n"
pacman_conf.write(line)
def pre_install(self, dest_dir):
# Catalyst needs an extra repository and a downgraded Xorg
# DSGos_Installer uses /tmp/pacman.conf to do the installation
self.add_repositories("/tmp/pacman.conf")
def post_install(self, dest_dir):
# Add repos to user's pacman.conf
path = os.path.join(dest_dir, "etc/pacman.conf")
self.add_repositories(path)
super().chroot(["systemctl", "enable", "atieventsd"])
super().chroot(["systemctl", "enable", "catalyst-hook"])
super().chroot(["systemctl", "enable", "temp-links-catalyst"])
super().chroot(["aticonfig", "--initial"], dest_dir)
@staticmethod
def is_proprietary():
return True
|
go-lab/appcomposer | alembic/versions/16f121110a0f_add_same_tool_and_tool_id.py | Python | bsd-2-clause | 1,266 | 0.009479 | """Add same_tool and tool_id
Revision ID: 16f121110a0f
Revises: 2c58f1b857f1
Create Date: 2015-11-09 12:28:43.019410
"""
# revision identifiers, used by Alembic.
revision = '16f121110a0f'
down_revision = '2c58f1b857f1'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('ActiveTranslationMessages', sa.Column('same_tool', sa.Boolean(), nulla | ble=True))
op.add_column('ActiveTranslationMessages', sa.Column('tool_id', sa.Unicode(length=255), nullable=True))
op.create_index(u | 'ix_ActiveTranslationMessages_same_tool', 'ActiveTranslationMessages', ['same_tool'], unique=False)
op.create_index(u'ix_ActiveTranslationMessages_tool_id', 'ActiveTranslationMessages', ['tool_id'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(u'ix_ActiveTranslationMessages_tool_id', table_name='ActiveTranslationMessages')
op.drop_index(u'ix_ActiveTranslationMessages_same_tool', table_name='ActiveTranslationMessages')
op.drop_column('ActiveTranslationMessages', 'tool_id')
op.drop_column('ActiveTranslationMessages', 'same_tool')
### end Alembic commands ###
|
fabian4/trove | trove/extensions/common/views.py | Python | apache-2.0 | 1,343 | 0 | # Copyright [2015] Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class UserView(object):
def __init__(self, user):
self.user = user
def data(self):
user_dict = {
"name": self.user.name,
"host": self.user.host | ,
"databases": self.user.databases
}
return {"user": user_dict}
class RootCreatedView(UserView):
def data(self):
user_dict = {
"name": self.user.name,
"password": self.user.password
}
return {"user": user_dict}
class RootEnabledView | (object):
def __init__(self, is_root_enabled):
self.is_root_enabled = is_root_enabled
def data(self):
return {'rootEnabled': self.is_root_enabled}
|
yousseb/django_pytds | django_pytds/patches.py | Python | mit | 485 | 0.006186 | from __future__ import unicode_literals
import sys
if sys.version_info >= (2, 7):
# Django Ticket #17671 - Allow using a cursor as a ContextManager
# in Python 2.7
from django.db.backends.util import CursorWrapper
if not hasattr(CursorWrapper, '__enter__'):
enter = lambda self: self
| exit = lambda self, type, value, traceback: self.cursor.__exit__(type, value, traceback)
CursorWr | apper.__enter__ = enter
CursorWrapper.__exit__ = exit
|
PXke/invenio | invenio/modules/redirector/redirect_methods/goto_plugin_cern_hr_documents.py | Python | gpl-2.0 | 7,113 | 0.005624 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
This implements a redirection for CERN HR Documents in the CERN Document
Server. It's useful as a reference on how goto plugins could be implemented.
"""
import time
import re
from invenio.legacy.search_engine import perform_request_search
from invenio.legacy.bibrecord import get_fieldvalues
from invenio.legacy.bibdocfile.api import BibRecDocs
def make_cern_ssr_docname(lang, edition, modif=0):
if modif:
return "CERN_SSR_%(lang)s_ed%(edition)02d_modif%(modif)02d" % {
| 'lang': lang,
'edition': edition,
'modif': modif
}
else:
return "CERN_SSR_%(lang)s_ed%(edition)02d" % {
'lang': lang,
'edition': edition,
}
_RE_REVISION = re.compile(r"rev(\d\d)")
def _get_revision(docname):
"""
Return the revision in a docname. E.g.:
CERN_ | Circ_Op_en_02_rev01_Implementation measures.pdf -> 1
CERN_Circ_Op_en_02_rev02_Implementation measures.PDF -> 2
"""
g = _RE_REVISION.search(docname)
if g:
return int(g.group(1))
return 0
def _register_document(documents, docname, key):
"""
Register in the documents mapping the docname to key, but only if the
docname has a revision higher of the docname already associated with a key
"""
if key in documents:
if _get_revision(docname) > _get_revision(documents[key]):
documents[key] = docname
else:
documents[key] = docname
def goto(type, document='', number=0, lang='en', modif=0):
today = time.strftime('%Y-%m-%d')
if type == 'SSR':
## We would like a CERN Staff Rules and Regulations
recids = perform_request_search(cc='Staff Rules and Regulations', f="925__a:1996-01-01->%s 925__b:%s->9999-99-99" % (today, today))
recid = recids[-1]
reportnumber = get_fieldvalues(recid, '037__a')[0]
edition = int(reportnumber[-2:]) ## e.g. CERN-STAFF-RULES-ED08
return BibRecDocs(recid).get_bibdoc(make_cern_ssr_docname(lang, edition, modif)).get_file('.pdf').get_url()
elif type == "OPER-CIRC":
recids = perform_request_search(cc="Operational Circulars", p="reportnumber=\"CERN-OPER-CIRC-%s-*\"" % number, sf="925__a")
recid = recids[-1]
documents = {}
bibrecdocs = BibRecDocs(recid)
for docname in bibrecdocs.get_bibdoc_names():
ldocname = docname.lower()
if 'implementation' in ldocname:
_register_document(documents, docname, 'implementation_en')
elif 'application' in ldocname:
_register_document(documents, docname, 'implementation_fr')
elif 'archiving' in ldocname:
_register_document(documents, docname, 'archiving_en')
elif 'archivage' in ldocname:
_register_document(documents, docname, 'archiving_fr')
elif 'annexe' in ldocname or 'annexes_fr' in ldocname:
_register_document(documents, docname, 'annex_fr')
elif 'annexes_en' in ldocname or 'annex' in ldocname:
_register_document(documents, docname, 'annex_en')
elif '_en_' in ldocname or '_eng_' in ldocname or '_angl_' in ldocname:
_register_document(documents, docname, 'en')
elif '_fr_' in ldocname:
_register_document(documents, docname, 'fr')
return bibrecdocs.get_bibdoc(documents[document]).get_file('.pdf').get_url()
elif type == 'ADMIN-CIRC':
recids = perform_request_search(cc="Administrative Circulars", p="reportnumber=\"CERN-ADMIN-CIRC-%s-*\"" % number, sf="925__a")
recid = recids[-1]
documents = {}
bibrecdocs = BibRecDocs(recid)
for docname in bibrecdocs.get_bibdoc_names():
ldocname = docname.lower()
if 'implementation' in ldocname:
_register_document(documents, docname, 'implementation-en')
elif 'application' in ldocname:
_register_document(documents, docname, 'implementation-fr')
elif 'archiving' in ldocname:
_register_document(documents, docname, 'archiving-en')
elif 'archivage' in ldocname:
_register_document(documents, docname, 'archiving-fr')
elif 'annexe' in ldocname or 'annexes_fr' in ldocname:
_register_document(documents, docname, 'annex-fr')
elif 'annexes_en' in ldocname or 'annex' in ldocname:
_register_document(documents, docname, 'annex-en')
elif '_en_' in ldocname or '_eng_' in ldocname or '_angl_' in ldocname:
_register_document(documents, docname, 'en')
elif '_fr_' in ldocname:
_register_document(documents, docname, 'fr')
return bibrecdocs.get_bibdoc(documents[document]).get_file('.pdf').get_url()
def register_hr_redirections():
"""
Run this only once
"""
from invenio.modules.redirector.api import register_redirection
plugin = 'goto_plugin_cern_hr_documents'
## Staff rules and regulations
for modif in range(1, 20):
for lang in ('en', 'fr'):
register_redirection('hr-srr-modif%02d-%s' % (modif, lang), plugin, parameters={'type': 'SSR', 'lang': lang, 'modif': modif})
for lang in ('en', 'fr'):
register_redirection('hr-srr-%s' % lang, plugin, parameters={'type': 'SSR', 'lang': lang, 'modif': 0})
## Operational Circulars
for number in range(1, 10):
for lang in ('en', 'fr'):
register_redirection('hr-oper-circ-%s-%s' % (number, lang), plugin, parameters={'type': 'OPER-CIRC', 'document': lang, 'number': number})
for number, special_document in ((2, 'implementation'), (2, 'annex'), (3, 'archiving'), (3, 'annex')):
for lang in ('en', 'fr'):
register_redirection('hr-circ-%s-%s-%s' % (number, special_document, lang), plugin, parameters={'type': 'OPER-CIRC', 'document': '%s-%s' % (special_document, lang), 'number': number})
## Administrative Circulars:
for number in range(1, 32):
for lang in ('en', 'fr'):
register_redirection('hr-admin-circ-%s-%s' % (number, lang), plugin, parameters={'type': 'ADMIN-CIRC', 'document': lang, 'number': number})
if __name__ == "__main__":
register_hr_redirections()
|
Demon-Hunter/tbag | tbag/core/web.py | Python | mit | 6,329 | 0.000526 | # -*- coding:utf-8 -*-
"""
web基类
Author: huangtao
Date: 2017/8/8
Update: 2017/12/12 1. 增加do_prepare/do_complete函数;
2017/12/17 1. 增加中间件;
2017/12/18 1. 修改错误返回类型;
2. 增加 query_params 及 data 属性方法;
3. 删除 do_http_error 方法;
2018/01/17 1. 跨域增加设置 Access-Control-Allow-Headers;
2018/01/18 1. 返回datetime类型时间转换为UTC时间;
2018/03/22 1. 修改__query_params为_query_params,__data为_data;
"""
import json
import datetime
from tornado.options import options
from tornado.web import RequestHandler
from tbag.core import exceptions
from tbag.utils import datetime_help
class WebHandler(RequestHandler):
""" web基类
"""
@property
def query_params(self):
if not hasattr(self, '_query_params'):
self._query_params = {}
for key in self.request.arguments.keys():
value = self.get_argument(key)
self._query_params[key] = value
return self._query_params
@property
def data(self):
if not hasattr(self, '_data'):
self._data = None
if self.request.body:
try:
self._data = json.loads(self.request.body.decode('utf-8'))
except:
raise exceptions.ValidationError('请求的body是非json格式')
return self._data
def _to_representation(self, instance):
""" 针对datetime类型数据做序列化
"""
if isinstance(instance, datetime.datetime):
return datetime_help.get_time_str(instance, datetime_help.UTC)
if isinstance(instance, datetime.date):
return instance.isoformat()
if isinstance(instance, list):
return [self._to_representation(item) for item in instance]
if isinstance(instance, dict):
for key in instance.keys():
instance[key] = self._to_representation(instance[key])
return instance
else:
return instance
def get_param(self, key, defaut=None):
""" 获取uri里边携带的参数
* 直接调用 self.get_argument 如果不附加默认值,如果参数不存在,将会抛异常
@param key 参数名
@param defaut 默认如果参数不存在,就赋值None
@return value 返回的参数值
"""
value = self.get_argument(key, defaut)
return value
def get_params(self, *keys):
""" 获取uri里边携带的参数
@param keys 参数名列表
@return values 返回的参数值列表
"""
values = []
for key in keys:
value = self.get_param(key)
values.append(value)
return values
def get_body(self, parse_json=True):
""" 提取http请求的body数据
@param parse_json 是否将body数据解析成json格式
@return body http请求的body数据
| """
body = self.request.body
if not body:
return None
if parse_json:
try:
body = json.loads(body.decode('utf8'))
except:
exceptions.ValidationError(msg='请求body数据格式错误')
return body
def do_success(self, data={}, msg='success'):
""" API成功返回
"""
result = {
'code': 0,
'msg': msg,
'da | ta': self._to_representation(data)
}
self.do_finish(result)
def do_failed(self, code=400, msg='error', data={}):
""" API失败返回
"""
result = {
'code': code,
'msg': msg,
'data': self._to_representation(data)
}
self.set_status(200, 'OK')
self.do_finish(result)
def do_finish(self, result):
""" 写入result
"""
# 跨域
cors = options.cors
if cors:
self.set_header('Access-Control-Allow-Origin', '*')
self.set_header('Access-Control-Allow-Headers', '*')
self.finish(result)
def write_error(self, status_code, **kwargs):
""" 这儿可以捕获自定义异常类
* 此重写了父类函数
"""
exc_info = kwargs.get('exc_info')
ex = exc_info[1]
if isinstance(ex, exceptions.CustomException):
self.do_failed(ex.code, ex.msg, ex.data)
else:
self.do_failed(500, '服务器内部错误')
async def head(self, *args, **kwargs):
await self.process('_head_', *args, **kwargs)
async def get(self, *args, **kwargs):
await self.process('_get_', *args, **kwargs)
async def post(self, *args, **kwargs):
await self.process('_post_', *args, **kwargs)
async def put(self, *args, **kwargs):
await self.process('_put_', *args, **kwargs)
async def delete(self, *args, **kwargs):
await self.process('_delete_', *args, **kwargs)
async def patch(self, *args, **kwargs):
await self.process('_patch_', *args, **kwargs)
async def options(self, *args, **kwargs):
await self.process('_options_', *args, **kwargs)
async def process(self, func_name, *args, **kwargs):
""" 处理请求
@param func_name 方法名 [_head_, _get_, _post_, _put_, _delete_, _patch_, _options_]
@note 此处执行处理请求前的准备工作和处理请求完成的收尾工作
"""
func = getattr(self, func_name, None)
if not func:
raise exceptions.NotFound()
await self.do_prepare()
await func(*args, **kwargs)
await self.do_complete()
async def do_prepare(self):
""" 准备工作
* 在执行http方法之前,可以做类似统计、权限校验等操作
"""
# 中间件
middlewares = options.middlewares
for m in middlewares:
await m.prepare(self)
async def do_complete(self):
""" 完成工作
* 在执行http方法之后,可以做类似统计、日志记录等操作
"""
# 中间件
middlewares = options.middlewares
for m in middlewares:
await m.finish(self)
|
Elandril/SickRage | sickbeard/providers/tntvillage.py | Python | gpl-3.0 | 20,653 | 0.006779 | # Author: Giovanni Borri
# Modified by gborri, https://github.com/gborri for TNTVillage
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import re
import traceback
import datetime
import sickbeard
import generic
from sickbeard.common import Quality
from sickbeard import logger
from sickbeard import tvcache
from sickbeard import db
from sickbeard import classes
from sickbeard import helpers
from sickbeard import show_name_helpers
from sickbeard.bs4_parser import BS4Parser
from unidecode import unidecode
from sickbeard.helpers import sanitizeSceneName
from sickbeard.name_parser.parser import NameParser, InvalidNameException, InvalidShowException
from sickrage.helper.exceptions import AuthException
category_excluded = {
'Sport' : 22,
'Teatro' : 23,
'Video Musicali' : 21,
'Film' : 4,
'Musica' : 2,
'Students Releases' : 13,
'E Books' : 3,
'Linux' : 6,
'Macintosh' : 9,
'Windows Software' : 10,
'Pc Game' : 11,
'Playstation 2' : 12,
'Wrestling' : 24,
'Varie' : 25,
'Xbox' : 26,
'Immagini sfondi' : 27,
'Altri Giochi' : 28,
'Fumetteria' : 30,
'Trash' : 31,
'PlayStation 1' : 32,
'PSP Portable' : 33,
'A Book' : 34,
'Podcast' : 35,
'Edicola' : 36,
'Mobile' : 37,
}
class TNTVillageProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "TNTVillage")
self.support | sBacklog = True
self.public = False
self.enabled = False
self._uid = None
self._hash = None
self.username = None
self.password = None
self.ratio = None
self.cat = None
self.page = 10
self.subtitle = None
| self.minseed = None
self.minleech = None
self.hdtext = [
' - Versione 720p',
' Versione 720p',
' V 720p',
' V 720',
' V HEVC',
' V HEVC',
' V 1080',
' Versione 1080p',
' 720p HEVC',
' Ver 720',
' 720p HEVC',
' 720p',
]
self.category_dict = {
'Serie TV' : 29,
'Cartoni' : 8,
'Anime' : 7,
'Programmi e Film TV' : 1,
'Documentari' : 14,
'All' : 0,
}
self.urls = {'base_url' : 'http://forum.tntvillage.scambioetico.org',
'login' : 'http://forum.tntvillage.scambioetico.org/index.php?act=Login&CODE=01',
'detail' : 'http://forum.tntvillage.scambioetico.org/index.php?showtopic=%s',
'search' : 'http://forum.tntvillage.scambioetico.org/?act=allreleases&%s',
'search_page' : 'http://forum.tntvillage.scambioetico.org/?act=allreleases&st={0}&{1}',
'download' : 'http://forum.tntvillage.scambioetico.org/index.php?act=Attach&type=post&id=%s',
}
self.sub_string = ['sub', 'softsub']
self.url = self.urls['base_url']
self.cache = TNTVillageCache(self)
self.categories = "cat=29"
self.cookies = None
def isEnabled(self):
return self.enabled
def imageName(self):
return 'tntvillage.png'
def getQuality(self, item, anime=False):
quality = Quality.sceneQuality(item[0], anime)
return quality
def _checkAuth(self):
if not self.username or not self.password:
raise AuthException("Your authentication credentials for " + self.name + " are missing, check your config.")
return True
def _doLogin(self):
login_params = {'UserName': self.username,
'PassWord': self.password,
'CookieDate': 0,
'submit': 'Connettiti al Forum',
}
response = self.getURL(self.urls['login'], post_data=login_params, timeout=30)
if not response:
logger.log(u'Unable to connect to ' + self.name + ' provider.', logger.ERROR)
return False
if re.search('Sono stati riscontrati i seguenti errori', response) \
or re.search('<title>Connettiti</title>', response):
logger.log(u'Invalid username or password for ' + self.name + ' Check your settings', logger.ERROR)
return False
return True
def _get_season_search_strings(self, ep_obj):
search_string = {'Season': []}
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
if ep_obj.show.air_by_date or ep_obj.show.sports:
ep_string = show_name + ' ' + str(ep_obj.airdate).split('-')[0]
elif ep_obj.show.anime:
ep_string = show_name + ' ' + "%d" % ep_obj.scene_absolute_number
else:
ep_string = show_name + ' S%02d' % int(ep_obj.scene_season) #1) showName SXX
search_string['Season'].append(ep_string)
return [search_string]
def _get_episode_search_strings(self, ep_obj, add_string=''):
search_string = {'Episode': []}
if not ep_obj:
return []
if self.show.air_by_date:
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + ' ' + \
str(ep_obj.airdate).replace('-', '|')
search_string['Episode'].append(ep_string)
elif self.show.sports:
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + ' ' + \
str(ep_obj.airdate).replace('-', '|') + '|' + \
ep_obj.airdate.strftime('%b')
search_string['Episode'].append(ep_string)
elif self.show.anime:
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + ' ' + \
"%i" % int(ep_obj.scene_absolute_number)
search_string['Episode'].append(ep_string)
else:
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + ' ' + \
sickbeard.config.naming_ep_type[2] % {'seasonnumber': ep_obj.scene_season,
'episodenumber': ep_obj.scene_episode} + ' %s' % add_string
search_string['Episode'].append(re.sub('\s+', ' ', ep_string))
return [search_string]
def _reverseQuality(self, quality):
quality_string = ''
if quality == Quality.SDTV:
quality_string = ' HDTV x264'
if quality == Quality.SDDVD:
quality_string = ' DVDRIP'
elif quality == Quality.HDTV:
quality_string = ' 720p HDTV x264'
elif quality == Quality.FULLHDTV:
quality_string = ' 1080p HDTV x264'
elif quality == Quality |
spierepf/mpf | tests/test_SmartVirtualPlatform.py | Python | mit | 3,495 | 0.004864 | from MpfTestCase import MpfTestCase
from mock import MagicMock
class TestSmartVirtualPlatform(MpfTestCase):
def getConfigFile(self):
return 'test_smart_virtual.yaml'
def getMachinePath(self):
return '../tests/machine_files/platform/'
def get_platform(self):
return 'smart_virtual'
def test_eject(self):
# tests that firing a coil in a ball device with a ball in it
# successfully activates the right switches to simulate the ball
# leaving that device and entering the target device.
self.machine.s | witch_controller.process_switch('device1_s1', 1)
# have to stop() it since the ball is unexpected and it will eject it
# otherwise.
self.machine.ball_devices.device1.stop()
self.advance_time_and_run(.6)
self.assertEqual(1, self.machine.ball_devices.device1.balls)
self.assertEqual(0, self.machine.ball_devices.device2.balls)
self.assertEqual(False,
| self.machine.switch_controller.is_active('device2_s1'))
self.assertEqual(False,
self.machine.switch_controller.is_active('device2_s2'))
self.assertEqual(True,
self.machine.switch_controller.is_active('device1_s1'))
self.assertEqual(False,
self.machine.switch_controller.is_active('device1_s2'))
self.machine.coils.device1.pulse()
self.advance_time_and_run(1)
self.assertEqual(0, self.machine.ball_devices.device1.balls)
self.assertEqual(1, self.machine.ball_devices.device2.balls)
self.assertEqual(True,
self.machine.switch_controller.is_active('device2_s1'))
self.assertEqual(False,
self.machine.switch_controller.is_active('device2_s2'))
self.assertEqual(False,
self.machine.switch_controller.is_active('device1_s1'))
self.assertEqual(False,
self.machine.switch_controller.is_active('device1_s2'))
def test_eject_with_no_ball(self):
# tests that firing a coil of a device with no balls in it does not
# put a ball in the target device.
self.assertEqual(0, self.machine.ball_devices.device1.balls)
self.assertEqual(0, self.machine.ball_devices.device2.balls)
self.assertEqual(False,
self.machine.switch_controller.is_active('device2_s1'))
self.assertEqual(False,
self.machine.switch_controller.is_active('device2_s2'))
self.assertEqual(False,
self.machine.switch_controller.is_active('device1_s1'))
self.assertEqual(False,
self.machine.switch_controller.is_active('device1_s2'))
self.machine.coils.plunger.pulse()
self.advance_time_and_run(1)
self.assertEqual(0, self.machine.ball_devices.device1.balls)
self.assertEqual(0, self.machine.ball_devices.device2.balls)
self.assertEqual(False,
self.machine.switch_controller.is_active('device2_s1'))
self.assertEqual(False,
self.machine.switch_controller.is_active('device2_s2'))
self.assertEqual(False,
self.machine.switch_controller.is_active('device1_s1'))
self.assertEqual(False,
self.machine.switch_controller.is_active('device1_s2'))
def test_start_active_switches(self):
# tests that the virtual_platform_start_active_switches really do start
# active.
self.assertEqual(3, self.machine.ball_devices.trough.balls) |
reuteras/sr_nyheter_m | news.py | Python | gpl-3.0 | 3,138 | 0.003829 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Peter Reuterås
#
# This program is | free software: you can redistribute it and/or modify
# it under the terms of the GNU | General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Main program"""
from optparse import OptionParser
from datetime import datetime
import srnewslib
import signal
import sys
class TimeoutException(Exception):
pass
def timeout_handler(signum, frame):
raise TimeoutException()
def register_handler():
old_handler = signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(55)
return old_handler
def main():
statusUpdated = False
parser = OptionParser(version="%prog 1.1")
parser.add_option("-c", action="store_true", dest="config",
help="Specify config file", default=False)
(options, args) = parser.parse_args()
if options.config:
config = args[0]
else:
config = "news.cfg"
CONFIG = srnewslib.config_init(config)
dbc = srnewslib.db_init()
api = srnewslib.get_twitter_api()
last_id = srnewslib.get_last_messageid(dbc)
messages = api.user_timeline(screen_name=CONFIG.get('twitter', 'follow'), since_id=last_id)
for message in messages:
if not srnewslib.published(dbc, message.id):
(text, url, tag) = srnewslib.get_text_and_url(message.text)
url = message.entities['urls'][0]['expanded_url']
longUrl = srnewslib.get_long_url(url)
(artikel, programid) = srnewslib.get_article_info_from_url(longUrl)
mobUrl = srnewslib.get_mobile_url(artikel, programid)
shortUrl = srnewslib.get_short_url(mobUrl)
tweet = text + " " + shortUrl + " " + tag
api.update_status(tweet, message.id)
srnewslib.add_to_db(dbc, message.id)
statusUpdated = True
if statusUpdated == True:
try:
api.update_status("Kontot stängs snart. Följ Sveriges Radios @" + CONFIG.get('twitter', 'follow') + " för att inte missa nyheter. Mer info http://goo.gl/JA6W3. Tid: " + str(datetime.now().strftime("%Y%m%d %H:%M")))
except:
# Handle duplicate messages
pass
if __name__ == '__main__':
old_handler = register_handler()
try:
main()
except TimeoutException:
msg = "Timeout in talking to twitter.\n"
msg = msg.encode('utf-8')
sys.stderr.write(msg)
sys.exit(1)
finally:
signal.signal(signal.SIGALRM, old_handler)
signal.alarm(0)
|
rahulunair/nova | nova/api/wsgi.py | Python | apache-2.0 | 8,715 | 0 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""WSGI primitives used throughout the nova WSGI apps."""
import os
from oslo_log import log as logging
from paste import deploy
import routes.middleware
import webob
import nova.conf
from nova import exception
from nova.i18n import _, _LE
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
class Request(webob.Request):
def __init__(self, environ, *args, **kwargs):
if CONF.wsgi.secure_proxy_ssl_header:
scheme = environ.get(CONF.wsgi.secure_proxy_ssl_header)
if scheme:
environ['wsgi.url_scheme'] = scheme
super(Request, self).__init__(environ, *args, **kwargs)
class Application(object):
"""Base WSGI application wrapper. Subclasses need to implement __call__."""
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [app:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[app:wadl]
latest_version = 1.3
paste.app_factory = nova.api.fancy_api:Wadl.factory
which would result in a call to the `Wadl` class as
import nova.api.fancy_api
fancy_api.Wadl(latest_version='1.3')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
return cls(**local_config)
def __call__(self, environ, start_response):
r"""Subclasses will probably want to implement __call__ like this:
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
# Any of the following objects work as responses:
# Option 1: simple string
res = 'message\n'
# Option 2: a nicely formatted HTTP exception page
res = exc.HTTPForbidden(explanation='Nice try')
# Option 3: a webob Response object (in case you need to play with
# headers, or you want to be treated like an iterable, or ...)
res = Response()
res.app_iter = open('somefile')
# Option 4: any wsgi app to be run next
res = self.application
# Option 5: you can get a Response object for a wsgi app, too, to
# play with headers etc
res = req.get_response(self.application)
# You can then just return your response...
return res
# ... or set req.response and return None.
req.response = res
See the end of http://pythonpaste.org/webob/modules/dec.html
for more info.
"""
raise NotImplementedError(_('You must implement __call__'))
class Middleware(Application):
"""Base WSGI middleware.
These classes require an application to be
initialized that will be called next. By default the middleware will
simply call its wrapped app, or you can override __call__ to customize its
behavior.
"""
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [filter:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[filter:analytics]
redis_host = 127.0.0.1
paste.filter_factory = nova.api.analytics:Analytics.factory
which would result in a call to the `Analytics` class as
import nova.api.analytics
analytics.Analytics(app_from_paste, redis_host='127.0.0.1')
You could of course re-implement the `factory` method in subclasses,
but using the kw | arg passing it shouldn't be necessary.
"""
def _factory(app):
return cls(app, **local_config)
return _factory
def __init__(self, applicati | on):
self.application = application
def process_request(self, req):
"""Called on each request.
If this returns None, the next application down the stack will be
executed. If it returns a response then that response will be returned
and execution will stop here.
"""
return None
def process_response(self, response):
"""Do whatever you'd like to the response."""
return response
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
response = self.process_request(req)
if response:
return response
response = req.get_response(self.application)
return self.process_response(response)
class Router(object):
"""WSGI middleware that maps incoming requests to WSGI apps."""
def __init__(self, mapper):
"""Create a router for the given routes.Mapper.
Each route in `mapper` must specify a 'controller', which is a
WSGI app to call. You'll probably want to specify an 'action' as
well and have your controller be an object that can route
the request to the action-specific method.
Examples:
mapper = routes.Mapper()
sc = ServerController()
# Explicit mapping of one route to a controller+action
mapper.connect(None, '/svrlist', controller=sc, action='list')
# Actions are all implicitly defined
mapper.resource('server', 'servers', controller=sc)
# Pointing to an arbitrary WSGI app. You can specify the
# {path_info:.*} parameter so the target app can be handed just that
# section of the URL.
mapper.connect(None, '/v1.0/{path_info:.*}', controller=BlogApp())
"""
self.map = mapper
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
self.map)
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
"""Route the incoming request to a controller based on self.map.
If no match, return a 404.
"""
return self._router
@staticmethod
@webob.dec.wsgify(RequestClass=Request)
def _dispatch(req):
"""Dispatch the request to the appropriate controller.
Called by self._router after matching the incoming request to a route
and putting the information into req.environ. Either returns 404
or the routed WSGI app's response.
"""
match = req.environ['wsgiorg.routing_args'][1]
if not match:
return webob.exc.HTTPNotFound()
app = match['controller']
return app
class Loader(object):
"""Used to load WSGI applications from paste configurations."""
def __init__(self, config_path=None):
"""Initialize the loader, and attempt to find the config.
:param config_path: Full or relative path to the paste config.
:returns: None
"""
self.config_path = None
config_path = config_path or CONF.wsgi.api_paste_config
if not os.path.isabs(config_path):
self.config_path = CONF.find_file(config_path)
elif os.path.exists(config_path):
self.config_path = config_path
if not self.config_path:
raise exception.ConfigNotFound(path=config_path)
def load_app(self, name):
"""Return the paste URLMap wrapped WSGI application.
:param name: Name of the applicati |
tensorflow/tfx | tfx/utils/name_utils.py | Python | apache-2.0 | 2,836 | 0.010931 | # Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility related to python object names."""
import importlib
from typing import Any, Sequence
def _is_valid_name(value: str) -> bool:
return (isinstance(value, str) and
all(token.isidentifier() for token in value.split('.')))
def _get_nested_attr(obj: Any, attrs: Sequence[str]):
if len(attrs) == 1:
return getattr(obj, attrs[0])
else:
attr, *rest = attrs
if not hasattr(obj, attr):
raise AttributeError(f'{obj} does not have {attr} attribute.')
return _get_nested_attr(getattr(obj, attr), rest)
def _get_qualified_name(value: Any) -> str:
if hasattr(value, '__qualname__'):
return value.__qualname__
elif hasattr(value, '__name__'):
return value.__name__
raise ValueError(f'{value} does not have a name.')
def get_full_name(value: Any, strict_check: bool = True) -> str:
"""Get fully qualified name of the given class or function."""
if not hasattr(value, '__module__') or not hasattr(value, '__name__'):
raise ValueError(f'{value} does not have a name.')
name = _get_qualified_name(value)
if strict_check:
if not _is_valid_name(name):
# Locally defined classes have invalid name (foo.<local>.MyClass)
raise ValueError(f'{value} does not have a qualified name.')
mod = importlib.import_module(value.__module__)
try:
_get_nested_attr(mod, name.split('.'))
except Att | ributeError as e:
# Dynamically created classes is not importable and should not be accessed
# by the full_name.
raise ValueError(f'{value} is not importable.') from e
return f'{value.__module__}.{name}'
def resolve_full_name(full_name: str) -> Any:
"""Resolves reference (class, function, value, etc.) of the full_name."""
if not _is_valid_name(full_name):
raise ValueError(f'{full_name!r} is not a valid name.')
se | gments = full_name.split('.')
errors = []
for i in range(len(segments) - 1, 0, -1):
module_name = '.'.join(segments[:i])
try:
mod = importlib.import_module(module_name)
result = _get_nested_attr(mod, segments[i:])
break
except (ImportError, AttributeError) as e:
errors.append(e)
continue
else:
raise ValueError(f'Cannot find {full_name}: {errors}')
return result
|
phobson/statsmodels | statsmodels/tsa/statespace/dynamic_factor.py | Python | bsd-3-clause | 57,954 | 0.000155 | """
Dynamic factor model
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
from warnings import warn
from statsmodels.compat.collections import OrderedDict
import numpy as np
import pandas as pd
from .kalman_filter import KalmanFilter, FilterResults
from .mlemodel import MLEModel, MLEResults, MLEResultsWrapper
from .tools import (
companion_matrix, diff, is_invertible,
constrain_stationary_univariate, unconstrain_stationary_univariate,
constrain_stationary_multivariate, unconstrain_stationary_multivariate
)
from sc | ipy.linalg import solve_discrete_lyapunov
from statsmodels.multivariate.pca import PCA
from statsmodels.regression.linear_model import OLS
from statsmodels.tsa.vector_ar.var_model import VAR
from statsmodels.tools.tools import Bunch
from statsmodels.tools.data import _is | _using_pandas
from statsmodels.tsa.tsatools import lagmat
from statsmodels.tools.decorators import cache_readonly
from statsmodels.tools.sm_exceptions import ValueWarning
import statsmodels.base.wrapper as wrap
class DynamicFactor(MLEModel):
r"""
Dynamic factor model
Parameters
----------
endog : array_like
The observed time-series process :math:`y`
exog : array_like, optional
Array of exogenous regressors for the observation equation, shaped
nobs x k_exog.
k_factors : int
The number of unobserved factors.
factor_order : int
The order of the vector autoregression followed by the factors.
error_cov_type : {'scalar', 'diagonal', 'unstructured'}, optional
The structure of the covariance matrix of the observation error term,
where "unstructured" puts no restrictions on the matrix, "diagonal"
requires it to be any diagonal matrix (uncorrelated errors), and
"scalar" requires it to be a scalar times the identity matrix. Default
is "diagonal".
error_order : int, optional
The order of the vector autoregression followed by the observation
error component. Default is None, corresponding to white noise errors.
error_var : boolean, optional
Whether or not to model the errors jointly via a vector autoregression,
rather than as individual autoregressions. Has no effect unless
`error_order` is set. Default is False.
enforce_stationarity : boolean, optional
Whether or not to transform the AR parameters to enforce stationarity
in the autoregressive component of the model. Default is True.
**kwargs
Keyword arguments may be used to provide default values for state space
matrices or for Kalman filtering options. See `Representation`, and
`KalmanFilter` for more details.
Attributes
----------
exog : array_like, optional
Array of exogenous regressors for the observation equation, shaped
nobs x k_exog.
k_factors : int
The number of unobserved factors.
factor_order : int
The order of the vector autoregression followed by the factors.
error_cov_type : {'diagonal', 'unstructured'}
The structure of the covariance matrix of the error term, where
"unstructured" puts no restrictions on the matrix and "diagonal"
requires it to be a diagonal matrix (uncorrelated errors).
error_order : int
The order of the vector autoregression followed by the observation
error component.
error_var : boolean
Whether or not to model the errors jointly via a vector autoregression,
rather than as individual autoregressions. Has no effect unless
`error_order` is set.
enforce_stationarity : boolean, optional
Whether or not to transform the AR parameters to enforce stationarity
in the autoregressive component of the model. Default is True.
Notes
-----
The dynamic factor model considered here is in the so-called static form,
and is specified:
.. math::
y_t & = \Lambda f_t + B x_t + u_t \\
f_t & = A_1 f_{t-1} + \dots + A_p f_{t-p} + \eta_t \\
u_t & = C_1 u_{t-1} + \dots + C_1 f_{t-q} + \varepsilon_t
where there are `k_endog` observed series and `k_factors` unobserved
factors. Thus :math:`y_t` is a `k_endog` x 1 vector and :math:`f_t` is a
`k_factors` x 1 vector.
:math:`x_t` are optional exogenous vectors, shaped `k_exog` x 1.
:math:`\eta_t` and :math:`\varepsilon_t` are white noise error terms. In
order to identify the factors, :math:`Var(\eta_t) = I`. Denote
:math:`Var(\varepsilon_t) \equiv \Sigma`.
Options related to the unobserved factors:
- `k_factors`: this is the dimension of the vector :math:`f_t`, above.
To exclude factors completely, set `k_factors = 0`.
- `factor_order`: this is the number of lags to include in the factor
evolution equation, and corresponds to :math:`p`, above. To have static
factors, set `factor_order = 0`.
Options related to the observation error term :math:`u_t`:
- `error_order`: the number of lags to include in the error evolution
equation; corresponds to :math:`q`, above. To have white noise errors,
set `error_order = 0` (this is the default).
- `error_cov_type`: this controls the form of the covariance matrix
:math:`\Sigma`. If it is "dscalar", then :math:`\Sigma = \sigma^2 I`. If
it is "diagonal", then
:math:`\Sigma = \text{diag}(\sigma_1^2, \dots, \sigma_n^2)`. If it is
"unstructured", then :math:`\Sigma` is any valid variance / covariance
matrix (i.e. symmetric and positive definite).
- `error_var`: this controls whether or not the errors evolve jointly
according to a VAR(q), or individually according to separate AR(q)
processes. In terms of the formulation above, if `error_var = False`,
then the matrices :math:C_i` are diagonal, otherwise they are general
VAR matrices.
References
----------
.. [1] Lutkepohl, Helmut. 2007.
New Introduction to Multiple Time Series Analysis.
Berlin: Springer.
"""
def __init__(self, endog, k_factors, factor_order, exog=None,
error_order=0, error_var=False, error_cov_type='diagonal',
enforce_stationarity=True, **kwargs):
# Model properties
self.enforce_stationarity = enforce_stationarity
# Factor-related properties
self.k_factors = k_factors
self.factor_order = factor_order
# Error-related properties
self.error_order = error_order
self.error_var = error_var and error_order > 0
self.error_cov_type = error_cov_type
# Exogenous data
self.k_exog = 0
if exog is not None:
exog_is_using_pandas = _is_using_pandas(exog, None)
if not exog_is_using_pandas:
exog = np.asarray(exog)
# Make sure we have 2-dimensional array
if exog.ndim == 1:
if not exog_is_using_pandas:
exog = exog[:, None]
else:
exog = pd.DataFrame(exog)
self.k_exog = exog.shape[1]
# Note: at some point in the future might add state regression, as in
# SARIMAX.
self.mle_regression = self.k_exog > 0
# We need to have an array or pandas at this point
if not _is_using_pandas(endog, None):
endog = np.asanyarray(endog, order='C')
# Save some useful model orders, internally used
k_endog = endog.shape[1] if endog.ndim > 1 else 1
self._factor_order = max(1, self.factor_order) * self.k_factors
self._error_order = self.error_order * k_endog
# Calculate the number of states
k_states = self._factor_order
k_posdef = self.k_factors
if self.error_order > 0:
k_states += self._error_order
k_posdef += k_endog
if k_states == 0:
k_states = 1
k_posdef = 1
# Test for non-multivariate endog
if k_endog < 2:
raise ValueError('The dynamic factors model is only valid for'
|
stephanehenry27/Sickbeard-anime | sickbeard/encodingKludge.py | Python | gpl-3.0 | 2,060 | 0.003398 | # Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import os
from sickbeard import logger
import sickbeard
# This module tries to deal with the apparently random behavior of python when dealing with unicode <-> utf-8
# encodings. It tries to just use unicode, but if that fails then it tries forcing it to utf-8. Any functions
# which return something should always return unicode.
def fixStupidEncodings(x, silent=False):
if type(x) == str:
try:
return x.decode(sickbeard.SYS_ENCODING)
except UnicodeDecodeError:
logger.log(u"Unable to decode value: "+repr(x), logger.ERROR)
return None
elif type(x) == unicode:
return x
else:
logger.log(u"Unknown value passed in, ignoring it: "+str(type(x))+" ("+repr(x)+":"+repr(type(x))+")", logger.DEBUG if silent else logger.ERROR)
return None
return None
def fixListEncodings(x):
if type(x) != list:
return x
else:
return filter(lambda x: x != None, map(fixStupidEncodings, x))
def ek(func, *args):
result = None
result = func(*[x.encode(sickbeard.SYS_ENCODING) i | f type(x) in (str, unicode) els | e x for x in args])
if type(result) == list:
return fixListEncodings(result)
elif type(result) == str:
return fixStupidEncodings(result)
else:
return result
|
bdh1011/wau | venv/lib/python2.7/site-packages/twisted/conch/scripts/ckeygen.py | Python | mit | 7,540 | 0.003714 | # -*- test-case-name: twisted.conch.test.test_ckeygen -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Implementation module for the `ckeygen` command.
"""
import sys, os, getpass, socket
if getpass.getpass == getpass.unix_getpass:
try:
import termios # hack around broken termios
termios.tcgetattr, termios.tcsetattr
except (ImportError, AttributeError):
sys.modules['termios'] = None
reload(getpass)
from twisted.conch.ssh import keys
from twisted.python import failure, filepath, log, usage, randbytes
class GeneralOptions(usage.Options):
synopsis = """Usage: ckeygen [options]
"""
longdesc = "ckeygen manipulates public/private keys in various ways."
optParameters = [['bits', 'b', 1024, 'Number of bits in the key to create.'],
['filename', 'f', None, 'Filename of the key file.'],
['type', 't', None, 'Specify type of key to create.'],
['comment', 'C', None, 'Provide new comment.'],
['newpass', 'N', None, 'Provide new passphrase.'],
['pass', 'P', None, 'Provide old passphrase.']]
optFlags = [['fingerprint', 'l', 'Show fingerprint of key file.'],
['changepass', 'p', 'Change passphrase of private key file.'],
['quiet', 'q', 'Quiet.'],
['no-passphrase', None, "Create the key with no passphrase."],
['showpub', 'y', 'Read private key file and print public key.']]
compData = usage.Completions(
optActions={"type": usage.CompleteList(["rsa", "dsa"])})
def run():
options = GeneralOptions()
try:
options.parseOptions(sys.argv[1:])
except usage.UsageError, u:
print 'ERROR: %s' % u
options.opt_help()
sys.exit(1)
log.discardLogs()
log.deferr = handleError # HACK
if options['type']:
if options['type'] == 'rsa':
generateRSAkey(options)
elif options['type'] == 'dsa':
generateDSAkey(options)
else:
sys.exit('Key type was %s, must be one of: rsa, dsa' % options['type'])
elif options['fingerprint']:
printFingerprint(options)
elif options['changepass']:
changePassPhrase(options)
elif options['showpub']:
displayPublicKey(options)
else:
options.opt_help()
sys.exit(1)
def handleError():
global exitStatus
exitStatus = 2
log.err(failure.Failure())
raise
def generateRSAkey(options):
from Crypto.PublicKey import RSA
print 'Generating public/private rsa key pair.'
key = RSA.generate(int(options['bits']), randbytes.secureRandom)
_saveKey(key, options)
def generateDSAkey(options):
from Crypto.PublicKey import DSA
print 'Generating public/private dsa key pair.'
key = DSA.generate(int(options['bits']), randbytes.secureRandom)
_saveKey(key, options)
def printFingerprint(options):
if not options['filename']:
filename = os.path.expanduser('~/.ssh/id_rsa')
options['filename'] = raw_input('Enter file in which the key is (%s): ' % filename)
if os.path.exists(options['filename']+'.pub'):
options['filename'] += '.pub'
try:
key = keys.Key.fromFile(options['filename'])
obj = key.keyObject
print '%s %s %s' % (
obj.size() + 1,
key.fingerprint(),
os.path.basename(options['filename']))
except:
sys.exit('bad key')
def changePassPhrase(options):
if not options['filename']:
filename = os.path.expanduser('~/.ssh/id_rsa')
options['filename'] = raw_input(
'Enter file in which the key is (%s): ' % filename)
try:
key = keys.Key.fromFile(options['filename']).keyObject
except keys.EncryptedKeyError as e:
# Raised if password not supplied for an encrypted key
if not options.get('pass'):
options['pass'] = getpass.getpass('Enter old passphrase: ')
try:
key = keys.Key.fromFile(
options['filename'], passphrase=options['pass']).keyObject
except keys.BadKeyError:
sys.exit('Could not change passphrase: old passphrase error')
except keys.EncryptedKeyError as e:
sys.exit('Could not change passphrase: %s' % (e,))
except keys.BadKeyError as e:
sys.exit('Could not change passphrase: %s' % (e,))
if not options.get('newpass'):
while 1:
p1 = getp | ass.getpass(
'Enter new passphrase (empty for no passphrase): ')
p2 = getpass.getpass('Enter same passphrase again: ')
if p1 == p2:
break
print 'Passphrases do not match. Try again.'
options['newpass'] = p1
try:
newkeydata = keys.Key(key).toString('openssh',
| extra=options['newpass'])
except Exception as e:
sys.exit('Could not change passphrase: %s' % (e,))
try:
keys.Key.fromString(newkeydata, passphrase=options['newpass'])
except (keys.EncryptedKeyError, keys.BadKeyError) as e:
sys.exit('Could not change passphrase: %s' % (e,))
fd = open(options['filename'], 'w')
fd.write(newkeydata)
fd.close()
print 'Your identification has been saved with the new passphrase.'
def displayPublicKey(options):
if not options['filename']:
filename = os.path.expanduser('~/.ssh/id_rsa')
options['filename'] = raw_input('Enter file in which the key is (%s): ' % filename)
try:
key = keys.Key.fromFile(options['filename']).keyObject
except keys.EncryptedKeyError:
if not options.get('pass'):
options['pass'] = getpass.getpass('Enter passphrase: ')
key = keys.Key.fromFile(
options['filename'], passphrase = options['pass']).keyObject
print keys.Key(key).public().toString('openssh')
def _saveKey(key, options):
if not options['filename']:
kind = keys.objectType(key)
kind = {'ssh-rsa':'rsa','ssh-dss':'dsa'}[kind]
filename = os.path.expanduser('~/.ssh/id_%s'%kind)
options['filename'] = raw_input('Enter file in which to save the key (%s): '%filename).strip() or filename
if os.path.exists(options['filename']):
print '%s already exists.' % options['filename']
yn = raw_input('Overwrite (y/n)? ')
if yn[0].lower() != 'y':
sys.exit()
if options.get('no-passphrase'):
options['pass'] = b''
elif not options['pass']:
while 1:
p1 = getpass.getpass('Enter passphrase (empty for no passphrase): ')
p2 = getpass.getpass('Enter same passphrase again: ')
if p1 == p2:
break
print 'Passphrases do not match. Try again.'
options['pass'] = p1
keyObj = keys.Key(key)
comment = '%s@%s' % (getpass.getuser(), socket.gethostname())
filepath.FilePath(options['filename']).setContent(
keyObj.toString('openssh', options['pass']))
os.chmod(options['filename'], 33152)
filepath.FilePath(options['filename'] + '.pub').setContent(
keyObj.public().toString('openssh', comment))
print 'Your identification has been saved in %s' % options['filename']
print 'Your public key has been saved in %s.pub' % options['filename']
print 'The key fingerprint is:'
print keyObj.fingerprint()
if __name__ == '__main__':
run()
|
lowRISC/ibex | vendor/google_riscv-dv/pygen/experimental/riscv_instr_base.py | Python | apache-2.0 | 20,190 | 0.005002 | """Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import utils
import random
import constraint
from bitstring import BitArray, BitStream
class riscv_instr_base:
max_instr_length = 11
# Missing parts: latency
def __init__(self, name=""):
self.name = name
self.instr_group = "Instruction Group"
self.instr_format = "Instruction Format"
self.instr_category = "Instruction Category"
self.instr_name = "Instruction Name"
self.instr_imm_t = "Instruction Immediate Type"
self.instr_src2 = "Instruction Source 2"
self.instr_src1 = "Instruction Source 1"
self.instr_rd = "Instruction Destination"
self.imm = "Instruction Immediate"
self.imm_length = "Instruction Immediate Length"
self.imm_str = ""
self.csr = "CSR"
self.comment = ""
self.has_label = 1
self.label = ""
self.idx = -1
self.atomic = 0 # As of now, we don't support atomic instructions.
self.is_compressed = 0 # As of now, compressed instructions are not supported
self.is_illegal_instr = 0
self.is_local_numeric_label = 0
self.is_pseudo_instr = "Is it a pseudo instruction or not"
self.branch_assigned = 0
self.process_load_store = 1
self.solution = "A random solution which meets given constraints"
self.problem = constraint.Problem(constraint.MinConflictsSolver())
# Convert an instruction to its assembly form.
def convert2asm(self):
asm = name = self.solution[self.instr_name]
format = self.solution[self.instr_format]
category = self.solution[self.instr_category]
src2 = self.solution[self.instr_src2]
src1 = self.solution[self.instr_src1]
destination = self.solution[self.instr_rd]
csr = self.solution[self.csr]
if category != "SYSTEM":
if format == "J_FORMAT" or format == "U_FORMAT":
asm += " {}, {}".format(destination, self.get_imm())
elif format == "I_FORMAT":
if name == "NOP":
asm = "nop"
elif name == "FENCE":
asm = "fence"
elif name == "FENCEI":
asm = "fence.i"
elif category == "LOAD":
asm += " {}, {}({})".format(destination, self.get_imm(), src1)
elif category == "CSR":
asm += " {}, {}, {}".format(destination, hex(csr), self.get_imm())
else:
asm += " {}, {}, {}".format(destination, src1, self.get_im | m())
elif format == "S_FORMAT" or format == "B_FORMAT":
if category == "STORE":
asm += " {}, {}({})".format(src2, self.get_imm(), src1)
else:
asm += " {}, {}, {}".format(src1, src2, self.get_imm())
elif format == "R_FORMAT":
if category == "CSR":
| asm += " {}, {}, {}".format(destination, hex(csr), src1)
else:
asm += " {}, {}, {}".format(destination, src1, src2)
else:
if name == "BREAK":
asm = ".option norvc;ebreak;.option rvc;"
if self.comment != "":
asm += " # {}".format(self.comment)
return asm.lower()
# Instruction to binary format
# TODO: to do
def convert2bin(self, sol):
name = sol[self.instr_name]
format = sol[self.instr_format]
imm = sol[self.imm]
rd = sol[self.instr_rd]
if format == "J_FORMAT":
binary = ""
def post_randomize(self):
imm_length = self.solution[self.imm_length]
imm_t = self.solution[self.instr_imm_t]
imm = self.solution[self.imm]
imm_bit = BitArray(int=imm, length=32)
imm_mask = BitArray(uint=4294967295, length=32)
imm_mask = imm_mask << imm_length
if imm_t == "UIMM" or imm_t == "NZUIMM":
imm_bit = imm_bit & ~imm_mask
imm = imm_bit.int
else:
if imm_bit[-imm_length]:
imm_bit = imm_bit | imm_mask
imm = imm_bit.int
else:
imm_bit = imm_bit & ~imm_mask
imm = imm_bit.int
if (imm_t == "NZIMM" or imm_t == "NZUIMM") and imm == 0:
imm = random.randrange(1, 2**(imm_length - 1) - 1)
if self.imm_str == "":
self.imm_str = int(imm)
def get_imm(self):
return self.imm_str
def problem_definition(self,
no_branch=0,
no_load_store=1,
enable_hint_instr=0,
no_name_c=0):
# Adding variables to the problem
self.problem.addVariable(self.instr_group, utils.riscv_instr_group_t)
self.problem.addVariable(self.instr_format, utils.riscv_instr_format_t)
self.problem.addVariable(self.instr_category, utils.riscv_instr_category_t)
self.problem.addVariable(self.instr_name, utils.riscv_instr_name_t)
self.problem.addVariable(self.instr_imm_t, utils.imm_t)
self.problem.addVariables([self.instr_src2, self.instr_src1, self.instr_rd],
utils.riscv_reg_t)
self.problem.addVariable(self.imm_length, [5, 6, 8, 11, 20])
# problem.addVariable(self.imm, range(0x00000000, 0xffffffff)) # doesn't
# work because: OverflowError: Python int too large to convert to C ssize_t
# Need to change the constraint to a soft constraint, as the default_c in
# the pseudo instruction class is in conflict with this one
if self.imm not in self.problem._variables:
self.problem.addVariable(self.imm, range(0x0000, 0xffff))
self.problem.addVariable(self.csr, range(0x000, 0xfff))
def default_c(is_pseudo_instr):
if not is_pseudo_instr:
return True
def name_c(name, group, format, category, imm_t):
condition = (
# Load instructions
(name == "LB" and group == "RV32I" and format == "I_FORMAT" and
category == "LOAD" and imm_t == "IMM") or
(name == "LH" and group == "RV32I" and format == "I_FORMAT" and
category == "LOAD" and imm_t == "IMM") or
(name == "LW" and group == "RV32I" and format == "I_FORMAT" and
category == "LOAD" and imm_t == "IMM") or
(name == "LBU" and group == "RV32I" and format == "I_FORMAT" and
category == "LOAD" and imm_t == "IMM") or
(name == "LHU" and group == "RV32I" and format == "I_FORMAT" and
category == "LOAD" and imm_t == "IMM")
# Store instructions
or (name == "SB" and group == "RV32I" and format == "S_FORMAT" and
category == "STORE" and imm_t == "IMM") or
(name == "SH" and group == "RV32I" and format == "S_FORMAT" and
category == "STORE" and imm_t == "IMM") or
(name == "SW" and group == "RV32I" and format == "S_FORMAT" and
category == "STORE" and imm_t == "IMM")
# Shift istructions
or (name == "SLL" and group == "RV32I" and format == "R_FORMAT" and
category == "SHIFT" and imm_t == "IMM") or
(name == "SLLI" and group == "RV32I" and format == "I_FORMAT" and
category == "SHIFT" and imm_t == "IMM") or
(name == "SRL" and group == "RV32I" and format == "R_FORMAT" and
category == "SHIFT" and imm_t == "IMM") or
(name == "SRLI" and group == "RV32I" and format == "I_FORMAT" and
category == "SHIFT" and imm_t == "IMM") or
(name == "SRA" and group == "RV32I" and format == "R_FORMAT" and
category == "SHIFT" and imm_t == "IMM") or
(name == "SRAI" and group == "RV32I" and format == "I_FORMAT" and
category == "SHIFT" and imm_t == "IMM")
# Arithmetic instructions
or (name == "ADD" and group == "RV32I" and format == "R_FORMAT" and
category == "ARITHMETIC" and imm_t == "IMM") or
(name == "ADDI" and group == "RV32I" and format == "I_FORMAT" and
category == "ARITHMETIC" and imm_t == "IMM") or
(name == |
raiabril/arduino_logger | dist/arduino_logger.app/Contents/Resources/__boot__.py | Python | gpl-2.0 | 11,884 | 0.00589 | def _reset_sys_path():
# Clear generic sys.path[0]
import sys, os
resources = os.environ['RESOURCEPATH']
while sys.path[0] == resources:
del sys.path[0]
_reset_sys_path()
def _site_packages():
import site, sys, os
paths = []
prefixes = [sys.prefix]
if sys.exec_prefix != sys.prefix:
prefixes.append(sys.exec_prefix)
for prefix in prefixes:
paths.append(os.path.join(prefix, 'lib', 'python' + sys.version[:3],
'site-packages'))
if os.path.join('.framework', '') in os.path.join(sys.prefix, ''):
home = os.environ.get('HOME')
if home:
paths.append(os.path.join(home, 'Library', 'Python',
sys.version[:3], 'site-packages'))
# Work around for a misfeature in setuptools: easy_install.pth places
# site-packages way to early on sys.path and that breaks py2app bundles.
# NOTE: this is hacks into an undocumented feature of setuptools and
# might stop to work without warning.
sys.__egginsert = len(sys.path)
for path in paths:
site.addsitedir(path)
_site_packages()
"""
sys.argv emulation
This module starts a basic event loop to collect file- and url-open AppleEvents. Those get
converted to strings and stuffed into sys.argv. When that is done we continue starting
the application.
This is a workaround to convert scripts that expect filenames on the command-line to work
in a GUI environment. GUI applications should not use this feature.
NOTE: This module uses ctypes and not the Carbon modules in the stdlib because the latter
don't work in 64-bit mode and are also not available with python 3.x.
"""
import sys
import os
import time
import ctypes
import struct
class AEDesc (ctypes.Structure):
_fields_ = [
('descKey', ctypes.c_int),
('descContent', ctypes.c_void_p),
]
class EventTypeSpec (ctypes.Structure):
_fields_ = [
('eventClass', ctypes.c_int),
('eventKind', ctypes.c_uint),
]
def _ctypes_setup():
carbon = ctypes.CDLL('/System/Library/Carbon.framework/Carbon')
timer_func = ctypes.CFUNCTYPE(
None, ctypes.c_void_p, ctypes.c_long)
ae_callback = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_void_p,
ctypes.c_void_p, ctypes.c_void_p)
carbon.AEInstallEventHandler.argtypes = [
ctypes.c_int, ctypes.c_int, ae_callback,
ctypes.c_void_p, ctypes.c_char ]
carbon.AERemoveEventHandler.argtypes = [
ctypes.c_int, ctypes.c_int, ae_callback,
ctypes.c_char ]
carbon.AEProcessEvent.restype = ctypes.c_int
carbon.AEProcessEvent.argtypes = [ctypes.c_void_p]
carbon.ReceiveNextEvent.restype = ctypes.c_int
carbon.ReceiveNextEvent.argtypes = [
ctypes.c_long, ctypes.POINTER(EventTypeSpec),
ctypes.c_double, ctypes.c_char,
ctypes.POINTER(ctypes.c_void_p)
]
carbon.AEGetParamDesc.restype = ctypes.c_int
carbon.AEGetParamDesc.argtypes = [
ctypes.c_void_p, ctypes.c_int, ctypes.c_int,
ctypes.POINTER(AEDesc)]
carbon.AECountItems.restype = ctypes.c_int
carbon.AECountItems.argtypes = [ ctypes.POINTER(AEDesc),
| ctypes.POINTER(ctypes.c_long) ]
carbon.AEGetNthDesc.restype = ctypes.c_int
carbon.AEGetNthDesc.argtypes = [
ctypes.c_void_p, ctyp | es.c_long, ctypes.c_int,
ctypes.c_void_p, ctypes.c_void_p ]
carbon.AEGetDescDataSize.restype = ctypes.c_int
carbon.AEGetDescDataSize.argtypes = [ ctypes.POINTER(AEDesc) ]
carbon.AEGetDescData.restype = ctypes.c_int
carbon.AEGetDescData.argtypes = [
ctypes.POINTER(AEDesc),
ctypes.c_void_p,
ctypes.c_int,
]
carbon.FSRefMakePath.restype = ctypes.c_int
carbon.FSRefMakePath.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_uint]
return carbon
def _run_argvemulator(timeout = 60):
# Configure ctypes
carbon = _ctypes_setup()
# Is the emulator running?
running = [True]
timeout = [timeout]
# Configure AppleEvent handlers
ae_callback = carbon.AEInstallEventHandler.argtypes[2]
kAEInternetSuite, = struct.unpack('>i', b'GURL')
kAEISGetURL, = struct.unpack('>i', b'GURL')
kCoreEventClass, = struct.unpack('>i', b'aevt')
kAEOpenApplication, = struct.unpack('>i', b'oapp')
kAEOpenDocuments, = struct.unpack('>i', b'odoc')
keyDirectObject, = struct.unpack('>i', b'----')
typeAEList, = struct.unpack('>i', b'list')
typeChar, = struct.unpack('>i', b'TEXT')
typeFSRef, = struct.unpack('>i', b'fsrf')
FALSE = b'\0'
TRUE = b'\1'
eventLoopTimedOutErr = -9875
kEventClassAppleEvent, = struct.unpack('>i', b'eppc')
kEventAppleEvent = 1
@ae_callback
def open_app_handler(message, reply, refcon):
# Got a kAEOpenApplication event, which means we can
# start up. On some OSX versions this event is even
# sent when an kAEOpenDocuments or kAEOpenURLs event
# is sent later on.
#
# Therefore don't set running to false, but reduce the
# timeout to at most two seconds beyond the current time.
timeout[0] = min(timeout[0], time.time() - start + 2)
#running[0] = False
return 0
carbon.AEInstallEventHandler(kCoreEventClass, kAEOpenApplication,
open_app_handler, 0, FALSE)
@ae_callback
def open_file_handler(message, reply, refcon):
listdesc = AEDesc()
sts = carbon.AEGetParamDesc(message, keyDirectObject, typeAEList,
ctypes.byref(listdesc))
if sts != 0:
print("argvemulator warning: cannot unpack open document event")
running[0] = False
return
item_count = ctypes.c_long()
sts = carbon.AECountItems(ctypes.byref(listdesc), ctypes.byref(item_count))
if sts != 0:
print("argvemulator warning: cannot unpack open document event")
running[0] = False
return
desc = AEDesc()
for i in range(item_count.value):
sts = carbon.AEGetNthDesc(ctypes.byref(listdesc), i+1, typeFSRef, 0, ctypes.byref(desc))
if sts != 0:
print("argvemulator warning: cannot unpack open document event")
running[0] = False
return
sz = carbon.AEGetDescDataSize(ctypes.byref(desc))
buf = ctypes.create_string_buffer(sz)
sts = carbon.AEGetDescData(ctypes.byref(desc), buf, sz)
if sts != 0:
print("argvemulator warning: cannot extract open document event")
continue
fsref = buf
buf = ctypes.create_string_buffer(1024)
sts = carbon.FSRefMakePath(ctypes.byref(fsref), buf, 1023)
if sts != 0:
print("argvemulator warning: cannot extract open document event")
continue
if sys.version_info[0] > 2:
sys.argv.append(buf.value.decode('utf-8'))
else:
sys.argv.append(buf.value)
running[0] = False
return 0
carbon.AEInstallEventHandler(kCoreEventClass, kAEOpenDocuments,
open_file_handler, 0, FALSE)
@ae_callback
def open_url_handler(message, reply, refcon):
listdesc = AEDesc()
ok = carbon.AEGetParamDesc(message, keyDirectObject, typeAEList,
ctypes.byref(listdesc))
if ok != 0:
print("argvemulator warning: cannot unpack open document event")
running[0] = False
return
item_count = ctypes.c_long()
sts = carbon.AECountItems(ctypes.byref(listdesc), ctypes.byref(item_count))
if sts != 0:
print("argvemulator warning: cannot unpack open url event")
running[0] = False
return
desc = AEDesc()
for i in range(item_count.value):
sts = carbon.AEGetNthDesc(ctypes.byref(listdesc), i+1, typeChar, 0, ctypes.byref(desc))
if sts != 0:
prin |
shichao-an/momo | momo/plugins/flask/functions.py | Python | bsd-2-clause | 1,505 | 0 | # template global functions
# make sure not to conflict with built-ins:
# http://jinja.pocoo.org/docs/2.9/templates/#list-of-global-functions
from flask.helpers import url_for as _url_for
from flask_paginate import Pagination
def paginate(page, total, per_page, config):
record_name = config['MOMO_PAGINATION_RECORD_NAME']
display_msg = config['MOMO_PAGINATION_DISPLAY_MSG']
pagination = _paginate(
page=page,
total=total,
per_page=per_page,
record_name=record_name,
display_msg=display_msg,
)
return pagination
def _paginate(page, total, per_page, record_name, display_msg):
pagination = Pagination(
page=page,
total=total,
per_page=per_page,
bs_version=3,
show_single_page=False,
record_name=record_name,
display_msg=display_msg,
)
return pagination
def get_page(request):
return request.args.get('page | ', default=1, type=int)
def toggle_arg(endpoint, request, arg, value, **kwargs):
"""Toggle request arguments.
:param endpoint: endpoint name.
:param request: request object.
:param a | rg: request argument name to toggle.
:param value: intial value for the toggled argument.
:param kwargs: keyword arguments to preserve.
"""
args = request.args.to_dict()
if arg in args:
args.pop(arg)
else:
args[arg] = value
args.update(request.view_args)
args.update(kwargs)
return _url_for(endpoint, **args)
|
OpenPathView/batchPanoMaker | tests/unit/services/test_ressource_manager.py | Python | gpl-3.0 | 8,795 | 0.00307 | # coding: utf-8
# Copyright (C) 2017 Open Path View, Maison Du Libre
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
# Contributors: Benjamin BERNARD <benjamin.bernard@openpathview.fr>
# Email: team@openpathview.fr
# Description: Unit test for ressource-db service.
import pytest
from opv_import.services import RessourceManager
from opv_import.model import RederbroMeta, OrientationAngle, GeoPoint, Lot, ImageSet, CameraImage
from o | pv_api_client import ressources
from unittest.mock import patch, call, MagicMock
from path import Path
from typing import List
ID_MALETTE = 1
def cam_img(p, ts):
c = CameraImage(path=Path(p))
c._ts = ts
return c
class TestRessourceManager(object):
@patch("opv_directorymanagerclient.DirectoryManagerClient")
@patch("opv_api_client.RestClient")
def | test_make_campaign(self, mock_dbrest_client, mock_dm_client):
mock_make = MagicMock()
mock_campaign_create = MagicMock()
mock_dbrest_client.make = mock_make
generated_campaign = MagicMock(ressources.Campaign)
generated_campaign.id_campaign = 42
generated_campaign.create = mock_campaign_create
mock_make.return_value = generated_campaign
ress_man = RessourceManager(opv_api_client=mock_dbrest_client, opv_dm_client=mock_dm_client, id_malette=ID_MALETTE)
result_campaign = ress_man.make_campaign(name="my campaign", id_rederbro=1, description="my description")
mock_make.call_args_list[0] = call(ressources.Campaign)
assert result_campaign.id_malette == ID_MALETTE, "Wrong id malette on campaign"
assert result_campaign.name == "my campaign", "Wrong campaign name"
assert result_campaign.id_rederbro == 1, "Wrong id_rederbro"
assert result_campaign.description == "my description", "Wrong description"
assert mock_campaign_create.call_count == 1
@patch("opv_directorymanagerclient.DirectoryManagerClient")
@patch("opv_api_client.RestClient")
def test__model_gp_error_to_db(self, mock_dbrest_client, mock_dm_client):
ress_man = RessourceManager(opv_api_client=mock_dbrest_client, opv_dm_client=mock_dm_client,
id_malette=ID_MALETTE)
assert ress_man._model_gp_error_to_db(bools={0:False, 1:False}) == 0
assert ress_man._model_gp_error_to_db(bools={0: True, 1: False}) == 1
assert ress_man._model_gp_error_to_db(bools={0: False, 1: True}) == 2
assert ress_man._model_gp_error_to_db(bools={0: True, 1: True}) == 3
@patch("opv_directorymanagerclient.DirectoryManagerClient")
@patch("opv_api_client.RestClient")
def test_make_sensors(self, mock_dbrest_client, mock_dm_client):
# mocking a RederbroMeta
meta = MagicMock(RederbroMeta)
meta.geopoint = MagicMock(GeoPoint)
meta.geopoint.coordinates = [0, 1, 2]
meta.orientation = MagicMock(OrientationAngle)
meta.orientation.degree = 42.01
meta.orientation.minutes = 4.04
meta.get_timestamp = MagicMock()
meta.get_timestamp.return_value = 1509154772 # Sat Oct 28 01:39:32 2017, UTC timestamp
meta.gopro_errors = {0: True, 1: False}
# OPV_client dbrest mock
mock_make = MagicMock()
mock_sensors_create = MagicMock()
mock_dbrest_client.make = mock_make
generated_sensors = MagicMock(ressources.Sensors)
generated_sensors.id_sensors = 42
generated_sensors.create = mock_sensors_create
mock_make.return_value = generated_sensors
ress_man = RessourceManager(opv_api_client=mock_dbrest_client, opv_dm_client=mock_dm_client,
id_malette=ID_MALETTE)
result_sensors = ress_man.make_sensors(meta=meta)
mock_make.call_args_list[0] = call(ressources.Sensors)
assert result_sensors.id_malette == ID_MALETTE, "Wrong id malette on campaign"
assert result_sensors.gps_pos.coordinates == meta.geopoint.coordinates
assert result_sensors.degrees == meta.orientation.degree
assert result_sensors.minutes == meta.orientation.minutes
assert mock_sensors_create.call_count == 1
@patch("opv_directorymanagerclient.DirectoryManagerClient")
@patch("opv_api_client.RestClient")
@patch("os.link")
@patch("path.Path.copy")
def test_make_picture_path_no_hardlink(self, mock_path_copy, mock_os_link, mock_dbrest_client, mock_dm_client):
# mocking DM Client
dm_ctx = MagicMock()
dm_ctx.__enter__ = MagicMock()
dm_ctx.__enter__.return_value = ("uuid-42", "/tmp")
dm_ctx.__exit__ = MagicMock()
mock_dm_client.Open.return_value = dm_ctx
# Image set
img_set = ImageSet(l={
0: cam_img("picPath/APN0/DCIM/100S3D_L/3D_L0001.JPG", 10),
1: cam_img("picPath/APN1/DCIM/100S3D_L/3D_L0000.JPG", 15)
})
ress_man_no = RessourceManager(opv_api_client=mock_dbrest_client, opv_dm_client=mock_dm_client,
id_malette=ID_MALETTE, use_hardlink=False)
result_uuid = ress_man_no.make_picture_path(img_set=img_set)
assert result_uuid == "uuid-42"
assert mock_path_copy.call_count == 2, "Path copy wasn't call to copy the pictures"
assert mock_os_link.call_count == 0, "Os.link should be called only in hardlink mode"
@patch("opv_directorymanagerclient.DirectoryManagerClient")
@patch("opv_api_client.RestClient")
@patch("os.link")
@patch("path.Path.copy")
def test_make_picture_path_hardlink(self, mock_path_copy, mock_os_link, mock_dbrest_client, mock_dm_client):
# mocking DM Client
dm_ctx = MagicMock()
dm_ctx.__enter__ = MagicMock()
dm_ctx.__enter__.return_value = ("uuid-42", "/tmp")
dm_ctx.__exit__ = MagicMock()
mock_dm_client.Open.return_value = dm_ctx
# Image set
img_set = ImageSet(l={
0: cam_img("picPath/APN0/DCIM/100S3D_L/3D_L0001.JPG", 10),
1: cam_img("picPath/APN1/DCIM/100S3D_L/3D_L0000.JPG", 15)
})
ress_man_no = RessourceManager(opv_api_client=mock_dbrest_client, opv_dm_client=mock_dm_client,
id_malette=ID_MALETTE, use_hardlink=True)
result_uuid = ress_man_no.make_picture_path(img_set=img_set)
assert result_uuid == "uuid-42"
assert mock_path_copy.call_count == 0, "Path copy wasn't call to copy the pictures"
assert mock_os_link.call_count == 2, "Os.link should be called only in hardlink mode"
@patch("opv_directorymanagerclient.DirectoryManagerClient")
@patch("opv_api_client.RestClient")
def test_make_lot(self, mock_dbrest_client, mock_dm_client):
# mocking a lot
meta = MagicMock(RederbroMeta)
meta.geopoint = MagicMock(GeoPoint)
meta.geopoint.coordinates = [0, 1, 2]
meta.orientation = MagicMock(OrientationAngle)
meta.orientation.degree = 42.01
meta.orientation.minutes = 4.04
meta.get_timestamp = MagicMock()
meta.get_timestamp.return_value = 1509154772 # Sat Oct 28 01:39:32 2017, UTC timestamp
meta.gopro_errors = {0: True, 1: False}
# OPV_client dbrest mock
mock_make = MagicMock()
mock_sensors_create = MagicMock()
mock_dbrest_client.make = mock_make
generated_sensors = MagicMock(ressources.Sensors)
generated_sensors.id_sensors = 42
generated_sensors.create = mock_sensors_create
mock_make.return_value = generated_sensors
ress_ma |
BlackPole/bp-dvbapp | lib/python/Screens/LanguageSelection.py | Python | gpl-2.0 | 2,862 | 0.038784 | from Screen import Screen
from Components.ActionMap import ActionMap
from Components.Language import language
from Components.config import config
from Components.Sources.List import List
from Components.Label import Label
from Components.Pixmap import Pixmap
from Components.language_cache import LANG_TEXT
def _cached(x):
return LANG_TEXT.get(config.osd.language.value, {}).get(x, "")
from Screens.Rc import Rc
from Tools.Directories import resolveFilename, SCOPE_CURRENT_SKIN
from Tools.LoadPixmap import LoadPixmap
def LanguageEntryComponent(file, name, index):
png = LoadPixmap(resolveFilename(SCOPE_CURRENT_SKIN, "countries/" + file + ".png"))
if png == | None:
png = LoadPixmap(resolveFilename(SCOPE_CURRENT_SKIN, "countries/missing.png"))
res = (index, name, png)
ret | urn res
class LanguageSelection(Screen):
def __init__(self, session):
Screen.__init__(self, session)
self.oldActiveLanguage = language.getActiveLanguage()
self.list = []
self["languages"] = List(self.list)
self["languages"].onSelectionChanged.append(self.changed)
self.updateList()
self.onLayoutFinish.append(self.selectActiveLanguage)
self["actions"] = ActionMap(["OkCancelActions"],
{
"ok": self.save,
"cancel": self.cancel,
}, -1)
def selectActiveLanguage(self):
activeLanguage = language.getActiveLanguage()
pos = 0
for x in self.list:
if x[0] == activeLanguage:
self["languages"].index = pos
break
pos += 1
def save(self):
self.run()
self.close()
def cancel(self):
language.activateLanguage(self.oldActiveLanguage)
self.close()
def run(self, justlocal = False):
print "updating language..."
lang = self["languages"].getCurrent()[0]
config.osd.language.value = lang
config.osd.language.save()
self.setTitle(_cached("T2"))
if justlocal:
return
language.activateLanguage(lang)
config.misc.languageselected.value = 0
config.misc.languageselected.save()
print "ok"
def updateList(self):
languageList = language.getLanguageList()
if not languageList: # no language available => display only english
list = [ LanguageEntryComponent("en", "English", "en_EN") ]
else:
list = [ LanguageEntryComponent(file = x[1][2].lower(), name = x[1][0], index = x[0]) for x in languageList]
self.list = list
self["languages"].list = list
def changed(self):
self.run(justlocal = True)
class LanguageWizard(LanguageSelection, Rc):
def __init__(self, session):
LanguageSelection.__init__(self, session)
Rc.__init__(self)
self.onLayoutFinish.append(self.selectKeys)
self["wizard"] = Pixmap()
self["text"] = Label()
self.setText()
def selectKeys(self):
self.clearSelectedKeys()
self.selectKey("UP")
self.selectKey("DOWN")
def changed(self):
self.run(justlocal = True)
self.setText()
def setText(self):
self["text"].setText(_cached("T1"))
|
maximumG/exscript | Exscript/interpreter/__init__.py | Python | mit | 1,329 | 0 | #
# Copyright (C) 2010-2017 Samuel Abels
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS | OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import absolute_import
from .parser import Parser
import inspect
__all__ = [name for name, obj in list(locals().items())
if not (name.startswith('_') or inspect.ismodule(obj))]
|
Talvalin/server-client-python | tableauserverclient/server/endpoint/views_endpoint.py | Python | mit | 1,391 | 0.002157 | from .endpoint import Endpoint
from .exceptions import MissingRequiredFieldError
from .. import ViewItem, PaginationItem
import logging
logger = logging.getLogger('tableau.endpoint.views')
c | lass Views(Endpoint):
@property
def baseurl(self):
return "{0}/s | ites/{1}".format(self.parent_srv.baseurl, self.parent_srv.site_id)
def get(self, req_options=None):
logger.info('Querying all views on site')
url = "{0}/views".format(self.baseurl)
server_response = self.get_request(url, req_options)
pagination_item = PaginationItem.from_response(server_response.content)
all_view_items = ViewItem.from_response(server_response.content)
return all_view_items, pagination_item
def populate_preview_image(self, view_item):
if not view_item.id or not view_item.workbook_id:
error = "View item missing ID or workbook ID."
raise MissingRequiredFieldError(error)
url = "{0}/workbooks/{1}/views/{2}/previewImage".format(self.baseurl,
view_item.workbook_id,
view_item.id)
server_response = self.get_request(url)
view_item._preview_image = server_response.content
logger.info('Populated preview image for view (ID: {0})'.format(view_item.id))
|
sgzsh269/django | tests/schema/tests.py | Python | bsd-3-clause | 97,864 | 0.001196 | import datetime
import itertools
import unittest
from copy import copy
from django.db import (
DatabaseError, IntegrityError, OperationalError, connection,
)
from django.db.models import Model
from django.db.models.deletion import CASCADE, PROTECT
from django.db.models.fields import (
AutoField, BigIntegerField, BinaryField, BooleanField, CharField,
DateField, DateTimeField, IntegerField, PositiveIntegerField, SlugField,
TextField, TimeField,
)
from django.db.models.fields.related import (
ForeignKey, ForeignObject, ManyToManyField, OneToOneField,
)
from django.db.models.indexes import Index
from django.db.transaction import atomic
from django.test import (
TransactionTestCase, mock, skipIfDBFeature, skipUnlessDBFeature,
)
from django.utils.timezone import UTC
from .fields import (
CustomManyToManyField, InheritedManyToManyField, MediumBlobField,
)
from .models import (
Author, AuthorWithDefaultHeight, AuthorWithEvenLongerName, Book,
BookForeignObj, BookWeak, BookWithLongName, BookWithO2O, BookWithoutAuthor,
BookWithSlug, IntegerPK, Node, Note, NoteRename, Tag, TagIndexed,
TagM2MTest, TagUniqueRename, Thing, UniqueTest, new_apps,
)
class SchemaTests(TransactionTestCase):
"""
Tests that the schema-alteration code works correctly.
Be aware that these tests are more liable than most to false results,
as sometimes the code to check if a test has worked is almost as complex
as the code it is testing.
"""
available_apps = []
models = [
Author, AuthorWithDefaultHeight, AuthorWithEvenLongerName, Book,
BookWeak, BookWithLongName, BookWithO2O, BookWithSlug, IntegerPK, Note,
Tag, TagIndexed, TagM2MTest, TagUniqueRename, Thing, UniqueTest,
]
# Utility functions
def setUp(self):
# local_models should contain test dependent model classes that will be
# automatically removed from the app cache on test tear down.
self.local_models = []
def tearDown(self):
# Delete any tables made for our models
self.delete_tables()
new_apps.clear_cache()
for model in new_apps.get_models():
model._meta._expire_cache()
if 'schema' in new_apps.all_models:
for model in self.local_models:
for many_to_many in model._meta.many_to_many:
through = many_to_many.remote_field.through
if through and through._meta.auto_created:
del new_apps.all_models['schema'][through._meta.model_name]
del new_apps.all_models['schema'][model._meta.model_name]
def delete_tables(self):
"Deletes all model tables for our models for a clean test environment"
converter = connection.introspection.table_name_converter
with atomic():
connection.disable_constraint_checking()
table_names = connection.introspection.table_names()
for model in itertools.chain(SchemaTests.models, self.local_models):
tbl = converter(model._meta.db_table)
if tbl in table_names:
with connection.schema_editor() as editor:
editor.delete_model(model)
table_names.remove(tbl)
connection.enable_constraint_checking()
def column_classes(self, model):
with connection.cursor() as cursor:
columns = {
d[0]: (connection.introspection.get_field_type(d[1], d), d)
for d in connection.introspection.get_table_description(
cursor,
model._meta.db_table,
)
}
# SQLite has a different format for field_type
for name, (type, desc) in columns.items():
if isinstance(type, tuple):
columns[name] = (type[0], desc)
# SQLite also doesn't error properly
if not columns:
raise DatabaseError("Table does not exist (empty pragma)")
return columns
def get_indexes(self, table):
"""
Get the indexes on the table using a new cursor.
"""
with connection.cursor() as cursor:
return connection.introspection.get_indexes(cursor, table)
def get_constraints(self, table):
"""
Get the constraints on a table using a new cursor.
"""
with connection.cursor() as cursor:
return connection.introspection.get_constraints(cursor, table)
def get_constraints_for_column(self, model, column_name):
constraints = self.get_constraints(model._meta.db_table)
constraints_for_column = []
for name, details in constraints.items():
if details['columns'] == [column_name]:
constraints_for_column.append(name)
return sorted(constraints_for_column)
def check_added_field_default(self, schema_editor, model, field, field_name, expected_default,
cast_function=None):
with connection.cursor() as cursor:
schema_editor.add_field(model, field)
cursor.execute("SELECT {} FROM {};".format(field_name, model._meta.db_table))
database_default = cursor.fetchall()[0][0]
if cast_function and not type(database_default) == type(expected_default):
database_default = cast_function(database_default)
self.assertEqual(database_default, expected_default)
def get_constraints_count(self, table, column, fk_to):
"""
Return a dict with keys 'fks', 'uniques, and 'indexes' indicating the
number of foreign keys, unique constraints, and indexes on
`table`.`column`. The `fk_to` argument is a 2-tuple specifying the
expected foreign key relationship's (table, column).
"""
with connection.cursor() as cursor:
constraints = connection.introspection.get_constraints(cursor, table)
counts = {'fks': 0, 'uniques': 0, 'indexes': 0}
for c in constraints.values():
if c['columns'] == [column]:
if c['foreign_key'] == fk_to:
counts['fks'] += 1
if c['unique']:
counts['uniques'] += 1
elif c['index']:
counts['indexes'] += 1
return counts
# Tests
def test_creation_deletion(self):
"""
Tries creating a model's table, and then deleting it.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Check that it's there
list(Author.objects.all())
# Clean up that table
with connection.schema_editor() as editor:
editor.delete_model(Author)
# Check that it's gone
with self.assertRaises(DatabaseError):
list(Author.objects.all())
@skipUnlessDBFeature('supports_foreign_keys')
def test_fk(self):
"Tests that creating tables out of FK order, then repointing, works"
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Book)
editor.create_model(Author)
editor.create_model(Tag)
# Check that initial tables are there
list(Author.objects.all())
list(Book.objects.all())
# Make sure the FK constraint is present
with self.assertRaises(IntegrityError):
Book.objects.create(
author_id=1,
title="Much Ado About Foreign Keys",
pub_date=da | tetime.datetime.now(),
)
# Repoint the FK constrain | t
old_field = Book._meta.get_field("author")
new_field = ForeignKey(Tag, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
# Make sure the new FK constraint is present
constraints = self.get_constraints(Book._meta.db_table)
for name, details in constraints.items():
if |
django/djangoproject.com | docs/migrations/0003_auto_20171107_1513.py | Python | bsd-3-clause | 1,072 | 0.001866 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-07 15:13
from __future__ import unicode_literals
import django.co | ntrib.postgres.fields.jsonb
import django.contrib.postgres.indexes
import django.contrib.postgres.search
from django.contrib.postgres.operations import TrigramExtension
from django.db import migrations
class Migration(migrations.Migration):
d | ependencies = [
('docs', '0002_extend_lang_field'),
]
operations = [
migrations.AddField(
model_name='document',
name='metadata',
field=django.contrib.postgres.fields.jsonb.JSONField(default=dict),
),
migrations.AddField(
model_name='document',
name='search',
field=django.contrib.postgres.search.SearchVectorField(editable=False, null=True),
),
migrations.AddIndex(
model_name='document',
index=django.contrib.postgres.indexes.GinIndex(fields=['search'], name='docs_docume_search_5dc895_gin'),
),
TrigramExtension(),
]
|
Jian-Zhan/customarrayformatter | openpyxl/writer/dump_worksheet.py | Python | mit | 10,418 | 0.003744 | # file openpyxl/writer/straight_worksheet.py
# Copyright (c) 2010-2011 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: see AUTHORS file
"""Write worksheets to xml representations in an optimized way"""
import datetime
import os
from tempfile import NamedTemporaryFile
from openpyxl.shared.compat import OrderedDict
from openpyxl.cell import get_column_letter, Cell
from openpyxl.worksheet import Worksheet
from openpyxl.shared.xmltools import (XMLGenerator, start_tag, end_tag, tag)
from openpyxl.shared.date_time import SharedDate
from openpyxl.shared.ooxml import MAX_COLUMN, MAX_ROW
from openpyxl.shared import NUMERIC_TYPES
from openpyxl.shared.exc import WorkbookAlreadySaved
from openpyxl.writer.excel import ExcelWriter
from openpyxl.writer.strings import write_string_table
from openpyxl.writer.styles import StyleWriter
from openpyxl.style import Style, NumberFormat
from openpyxl.shared.ooxml import (ARC_SHARED_STRINGS, PACKAGE_WORKSHEETS)
STYLES = {'datetime' : {'type':Cell.TYPE_NUMERIC,
'style':'1'},
'string':{'type':Cell.TYPE_STRING,
'style':'0'},
'numeric':{'type':Cell.TYPE_NUMERIC,
'style':'0'},
'formula':{'type':Cell.TYPE_FORMULA,
'style':'0'},
'boolean':{'type':Cell.TYPE_BOOL,
'style':'0'},
}
DESCRIPTORS_CACHE_SIZE = 50
DATETIME_STYLE = Style()
DATETIME_STYLE.number_format.format_code = NumberFormat.FORMAT_DATE_YYYYMMDD2
BOUNDING_BOX_PLACEHOLDER = 'A1:%s%d' % (get_column_letter(MAX_COLUMN), MAX_ROW)
def create_temporary_file(suffix=''):
fobj = NamedTemporaryFile(mode='w+', suffix=suffix, prefix='openpyxl.', delete=False)
filename = fobj.name
return filename
class DumpWorksheet(Worksheet):
"""
.. warning::
You shouldn't initialize this yourself, use :class:`openpyxl.workbook.Workbook` constructor instead,
with `optimized_write = True`.
"""
def __init__(self, parent_workbook, title):
Worksheet.__init__(self, parent_workbook, title)
self._max_col = 0
self._max_row = 0
self._parent = parent_workbook
self._fileobj_header_name = create_temporary_file(suffix='.header')
self._fileobj_content_name = create_temporary_file(suffix='.content')
self._fileobj_name = create_temporary_file()
self._shared_date = SharedDate()
self._string_builder = self._parent.strings_table_builder
def get_temporary_file(self, filename):
if filename in self._descriptors_cache:
fobj = self._descriptors_cache[filename]
# re-insert the value so it does not get evicted
# from cache soon
del self._descriptors_cache[filename]
self._descriptors_cache[filename] = fobj
return fobj
else:
if filename is None:
raise WorkbookAlreadySaved('this workbook has already been saved '
'and cannot be modified or saved anymore.')
fobj = open(filename, 'r+')
self._descriptors_cache[filename] = fobj
if len(self._descriptors_cache) > DESCRIPTORS_CACHE_SIZE:
filename, fileobj = self._descriptors_cache.popitem(last=False)
fileobj.close()
return fobj
@property
def _descriptors_cache(self):
try:
return self._parent._local_data.cache
except AttributeError:
self._parent._local_data.cache = OrderedDict()
return self._parent._local_data.cache
@property
def filename(self):
return self._fileobj_name
@property
def _temp_files(self):
return (self._fileobj_content_name,
self._fileobj_header_name,
self._fileobj_name)
def _unset_temp_files(self):
self._fileobj_header_name = None
self._fileobj_content_name = None
sel | f._fileobj_name = None
def write_header(self):
fobj = self.get_temporary_file(filename=self._fileobj_header_name)
doc = XMLGenerator(fobj, 'utf-8')
start_tag(doc, 'worksheet',
{'xml:space': 'preserve',
| 'xmlns': 'http://schemas.openxmlformats.org/spreadsheetml/2006/main',
'xmlns:r': 'http://schemas.openxmlformats.org/officeDocument/2006/relationships'})
start_tag(doc, 'sheetPr')
tag(doc, 'outlinePr',
{'summaryBelow': '1',
'summaryRight': '1'})
end_tag(doc, 'sheetPr')
tag(doc, 'dimension', {'ref': 'A1:%s' % (self.get_dimensions())})
start_tag(doc, 'sheetViews')
start_tag(doc, 'sheetView', {'workbookViewId': '0'})
tag(doc, 'selection', {'activeCell': 'A1',
'sqref': 'A1'})
end_tag(doc, 'sheetView')
end_tag(doc, 'sheetViews')
tag(doc, 'sheetFormatPr', {'defaultRowHeight': '15'})
start_tag(doc, 'sheetData')
def close(self):
self._close_content()
self._fileobj = self.get_temporary_file(filename=self._fileobj_name)
self._write_fileobj(self._fileobj_header_name)
self._write_fileobj(self._fileobj_content_name)
self._fileobj.close()
def _write_fileobj(self, fobj_name):
fobj = self.get_temporary_file(filename=fobj_name)
fobj.flush()
fobj.seek(0)
while True:
chunk = fobj.read(4096)
if not chunk:
break
self._fileobj.write(chunk)
fobj.close()
self._fileobj.flush()
def _close_content(self):
doc = self._get_content_generator()
end_tag(doc, 'sheetData')
end_tag(doc, 'worksheet')
def get_dimensions(self):
if not self._max_col or not self._max_row:
return 'A1'
else:
return '%s%d' % (get_column_letter(self._max_col), (self._max_row))
def _get_content_generator(self):
""" XXX: this is ugly, but it allows to resume writing the file
even after the handle is closed"""
# when I'll recreate the XMLGenerator, it will start writing at the
# begining of the file, erasing previously entered rows, so we have
# to move to the end of the file before adding new tags
handle = self.get_temporary_file(filename=self._fileobj_content_name)
handle.seek(0, 2)
doc = XMLGenerator(out=handle)
return doc
def append(self, row):
"""
:param row: iterable containing values to append
:type row: iterable
"""
doc = self._get_content_generator()
self._max_row += 1
span = len(row)
self._max_col = max(self._max_col, span)
row_idx = self._max_row
attrs = {'r': '%d' % row_idx,
'spans': '1:%d' % span}
start_tag(doc, 'row', attrs)
for col_idx, cell in enumerate(row):
if cell is None:
continue
coordinate = '%s%d' % (get_column_letter(col_idx + 1), ro |
tfroehlich82/erpnext | erpnext/stock/doctype/item/test_item.py | Python | gpl-3.0 | 10,134 | 0.025952 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest
import frappe
from frappe.test_runner import make_test_objects
from erpnext.controllers.item_variant import (create_variant, ItemVariantExistsError,
InvalidItemAttributeValueError, get_variant)
from erpnext.stock.doctype.item.item import StockExistsForTemplate
from frappe.model.rename_doc import rename_doc
from erpnext.stock.doctype.stock_entry.stock_entry_utils import make_stock_entry
from erpnext.stock.get_item_details import get_item_details
test_ignore = ["BOM"]
test_dependencies = ["Warehouse"]
def make_item(item_code, properties=None):
if frappe.db.exists("Item", item_code):
return frappe.get_doc("Item", item_code)
item = frappe.get_doc({
"doctype": "Item",
"item_code": item_code,
"item_name": item_code,
"description": item_code,
"item_group": "Products"
})
if properties:
item.update(properties)
if item.is_stock_item and not item.default_warehouse:
item.default_warehouse = "_Test Warehouse - _TC"
item.insert()
return item
class TestItem(unittest.TestCase):
def setUp(self):
frappe.flags.attribute_values = None
def get_item(self, idx):
item_code = test_records[idx].get("item_code")
if not frappe.db.exists("Item", item_code):
item = frappe.copy_doc(test_records[idx])
item.insert()
else:
item = frappe.get_doc("Item", | item_code)
return item
def | test_get_item_details(self):
# delete modified item price record and make as per test_records
frappe.db.sql("""delete from `tabItem Price`""")
to_check = {
"item_code": "_Test Item",
"item_name": "_Test Item",
"description": "_Test Item 1",
"warehouse": "_Test Warehouse - _TC",
"income_account": "Sales - _TC",
"expense_account": "_Test Account Cost for Goods Sold - _TC",
"cost_center": "_Test Cost Center 2 - _TC",
"qty": 1.0,
"price_list_rate": 100.0,
"base_price_list_rate": 0.0,
"discount_percentage": 0.0,
"rate": 0.0,
"base_rate": 0.0,
"amount": 0.0,
"base_amount": 0.0,
"batch_no": None,
"item_tax_rate": '{}',
"uom": "_Test UOM",
"conversion_factor": 1.0,
}
make_test_objects("Item Price")
print(frappe.get_all("Item Price"))
details = get_item_details({
"item_code": "_Test Item",
"company": "_Test Company",
"price_list": "_Test Price List",
"currency": "_Test Currency",
"doctype": "Sales Order",
"conversion_rate": 1,
"price_list_currency": "_Test Currency",
"plc_conversion_rate": 1,
"order_type": "Sales",
"customer": "_Test Customer",
"conversion_factor": 1,
"price_list_uom_dependant": 1,
"ignore_pricing_rule": 1
})
for key, value in to_check.iteritems():
self.assertEquals(value, details.get(key))
def test_item_attribute_change_after_variant(self):
frappe.delete_doc_if_exists("Item", "_Test Variant Item-L", force=1)
variant = create_variant("_Test Variant Item", {"Test Size": "Large"})
variant.save()
attribute = frappe.get_doc('Item Attribute', 'Test Size')
attribute.item_attribute_values = []
# reset flags
frappe.flags.attribute_values = None
self.assertRaises(InvalidItemAttributeValueError, attribute.save)
frappe.db.rollback()
def test_make_item_variant(self):
frappe.delete_doc_if_exists("Item", "_Test Variant Item-L", force=1)
variant = create_variant("_Test Variant Item", {"Test Size": "Large"})
variant.save()
# doing it again should raise error
variant = create_variant("_Test Variant Item", {"Test Size": "Large"})
variant.item_code = "_Test Variant Item-L-duplicate"
self.assertRaises(ItemVariantExistsError, variant.save)
def test_copy_fields_from_template_to_variants(self):
frappe.delete_doc_if_exists("Item", "_Test Variant Item-XL", force=1)
fields = [{'field_name': 'item_group'}, {'field_name': 'is_stock_item'}]
allow_fields = [d.get('field_name') for d in fields]
set_item_variant_settings(fields)
if not frappe.db.get_value('Item Attribute Value',
{'parent': 'Test Size', 'attribute_value': 'Extra Large'}, 'name'):
item_attribute = frappe.get_doc('Item Attribute', 'Test Size')
item_attribute.append('item_attribute_values', {
'attribute_value' : 'Extra Large',
'abbr': 'XL'
})
item_attribute.save()
variant = create_variant("_Test Variant Item", {"Test Size": "Extra Large"})
variant.item_code = "_Test Variant Item-XL"
variant.item_name = "_Test Variant Item-XL"
variant.save()
template = frappe.get_doc('Item', '_Test Variant Item')
template.item_group = "_Test Item Group D"
template.save()
variant = frappe.get_doc('Item', '_Test Variant Item-XL')
for fieldname in allow_fields:
self.assertEquals(template.get(fieldname), variant.get(fieldname))
template = frappe.get_doc('Item', '_Test Variant Item')
template.item_group = "_Test Item Group Desktops"
template.save()
def test_make_item_variant_with_numeric_values(self):
# cleanup
for d in frappe.db.get_all('Item', filters={'variant_of':
'_Test Numeric Template Item'}):
frappe.delete_doc_if_exists("Item", d.name)
frappe.delete_doc_if_exists("Item", "_Test Numeric Template Item")
frappe.delete_doc_if_exists("Item Attribute", "Test Item Length")
frappe.db.sql('''delete from `tabItem Variant Attribute`
where attribute="Test Item Length"''')
frappe.flags.attribute_values = None
# make item attribute
frappe.get_doc({
"doctype": "Item Attribute",
"attribute_name": "Test Item Length",
"numeric_values": 1,
"from_range": 0.0,
"to_range": 100.0,
"increment": 0.5
}).insert()
# make template item
make_item("_Test Numeric Template Item", {
"attributes": [
{
"attribute": "Test Size"
},
{
"attribute": "Test Item Length",
"numeric_values": 1,
"from_range": 0.0,
"to_range": 100.0,
"increment": 0.5
}
],
"default_warehouse": "_Test Warehouse - _TC",
"has_variants": 1
})
variant = create_variant("_Test Numeric Template Item",
{"Test Size": "Large", "Test Item Length": 1.1})
self.assertEquals(variant.item_code, "_Test Numeric Template Item-L-1.1")
variant.item_code = "_Test Numeric Variant-L-1.1"
variant.item_name = "_Test Numeric Variant Large 1.1m"
self.assertRaises(InvalidItemAttributeValueError, variant.save)
variant = create_variant("_Test Numeric Template Item",
{"Test Size": "Large", "Test Item Length": 1.5})
self.assertEquals(variant.item_code, "_Test Numeric Template Item-L-1.5")
variant.item_code = "_Test Numeric Variant-L-1.5"
variant.item_name = "_Test Numeric Variant Large 1.5m"
variant.save()
def test_item_merging(self):
create_item("Test Item for Merging 1")
create_item("Test Item for Merging 2")
make_stock_entry(item_code="Test Item for Merging 1", target="_Test Warehouse - _TC",
qty=1, rate=100)
make_stock_entry(item_code="Test Item for Merging 2", target="_Test Warehouse 1 - _TC",
qty=1, rate=100)
rename_doc("Item", "Test Item for Merging 1", "Test Item for Merging 2", merge=True)
self.assertFalse(frappe.db.exists("Item", "Test Item for Merging 1"))
self.assertTrue(frappe.db.get_value("Bin",
{"item_code": "Test Item for Merging 2", "warehouse": "_Test Warehouse - _TC"}))
self.assertTrue(frappe.db.get_value("Bin",
{"item_code": "Test Item for Merging 2", "warehouse": "_Test Warehouse 1 - _TC"}))
def test_item_variant_by_manufacturer(self):
fields = [{'field_name': 'description'}, {'field_name': 'variant_based_on'}]
set_item_variant_settings(fields)
if frappe.db.exists('Item', '_Test Variant Mfg'):
frappe.delete_doc('Item', '_Test Variant Mfg')
if frappe.db.exists('Item', '_Test Variant Mfg-1'):
frappe.delete_doc('Item', '_Test Variant Mfg-1')
if frappe.db.exists('Manufacturer', 'MSG1'):
frappe.delete_doc('Manufacturer', 'MSG1')
template = frappe.get_doc(dict(
doctype='Item',
item_code='_Test Variant Mfg',
has_variant=1,
item_group='Products',
variant_based_on='Manufacturer'
)).insert()
manufacturer = frappe.get_doc(dict(
doctype='Manufacturer',
sh |
davidwaroquiers/abiflows | abiflows/fireworks/utils/databases.py | Python | gpl-2.0 | 10,111 | 0.00267 | # coding: utf-8
"""
Utilities for database insertion
"""
import gridfs
import json
import pymongo
import paramiko
import os
import stat
import shutil
from monty.json import MSONable
class MongoDatabase(MSONable):
"""
MongoDB database class for access, insertion, update, ... in a MongoDB database
"""
def __init__(self, host, port, database, username, password, collection, gridfs_collection=None):
self._host = host
self._port = port
self._database = database
self._username = username
self._password = password
self._collection = collection
self._gridfs_collection = gridfs_collection
self._connect()
def _connect(self):
self.server = pymongo.MongoClient(host=self._host, port=self._port)
self.database = self.server[self._database]
if self._username:
self.database.authenticate(name=self._username, password=self._password)
self.collection = self.database[self._collection]
if self._gridfs_collection is not None:
self.gridfs = gridfs.GridFS(self.database, collection=self._gridfs_collection)
else:
self.gridfs = None
def insert_entry(self, entry, gridfs_msonables=None):
if gridfs_msonables is not None:
for entry_value, msonable_object in gridfs_msonables.items():
dict_str = json.dumps(msonable_object.as_dict())
file_obj = self.gridfs.put(dict_str, encoding='utf-8')
entry[entry_value] = file_obj
self.collection.insert(entry)
def get_entry(self, criteria):
count = self.collection.find(criteria).count()
if count == 0:
raise ValueError("No entry found with criteria ...")
elif count > 1:
raise ValueError("Multiple entries ({:d}) found with criteria ...".format(count))
return self.collection.find_one(criteria)
def save_entry(self, entry):
if '_id' not in entry:
raise ValueError('Entry should contain "_id" field to be saved')
self.collection.save(entry)
def update_entry(self, query, entry_update, gridfs_msonables=None):
count = self.collection.find(query).count()
if count != 1:
raise RuntimeError("Number of entries != 1, found : {:d}".format(count))
entry = self.collection.find_one(query)
entry.update(entry_update)
if gridfs_msonables is not None:
for entry_value, msonable_object in gridfs_msonables.items():
if entry_value in entry:
backup_current_entry_value = str(entry_value)
backup_number = 1
while True:
if backup_number > 10:
raise ValueError('Too many backups (10) for object with entry name "{}"'.format(entry_value))
if backup_current_entry_value i | n entry:
backup_current_entry_value = '{}_backup_{:d}'.format(entry_value, backup_number)
backup_number += 1
continue
entry[backup_current_entry_value] = entry[entry_value]
break
dict_str = json.dumps(msonable_object.as_dict())
| file_obj = self.gridfs.put(dict_str, encoding='utf-8')
entry[entry_value] = file_obj
self.collection.save(entry)
def as_dict(self):
"""
Json-serializable dict representation of a MongoDatabase
"""
dd = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"host": self._host,
"port": self._port,
"database": self._database,
"username": self._username,
"password": self._password,
"collection": self._collection,
"gridfs_collection": self._gridfs_collection}
return dd
@classmethod
def from_dict(cls, d):
return cls(host=d['host'], port=d['port'], database=d['database'],
username=d['username'], password=d['password'], collection=d['collection'],
gridfs_collection=d['gridfs_collection'])
class StorageServer(MSONable):
"""
Storage server class for moving files to/from a given server
"""
REMOTE_SERVER = 'REMOTE_SERVER'
LOCAL_SERVER = 'LOCAL_SERVER'
def __init__(self, hostname, port=22, username=None, password=None, server_type=REMOTE_SERVER):
self.hostname = hostname
self.port = port
self.username = username
self.password = password
self.server_type = server_type
# self.connect()
def connect(self):
if self.server_type == self.REMOTE_SERVER:
self.ssh_client = paramiko.SSHClient()
self.ssh_client.load_system_host_keys()
self.ssh_client.connect(hostname=self.hostname, port=self.port,
username=self.username, password=self.password)
self.sftp_client = self.ssh_client.open_sftp()
def disconnect(self):
if self.server_type == self.REMOTE_SERVER:
self.sftp_client.close()
self.ssh_client.close()
def remotepath_exists(self, path):
try:
self.sftp_client.stat(path)
except IOError as e:
if e[0] == 2:
return False
raise
else:
return True
def remote_makedirs(self, path):
head, tail = os.path.split(path)
if not tail:
head, tail = os.path.split(head)
if head and tail and not self.remotepath_exists(path=head):
self.remote_makedirs(head)
if tail == '.':
return
self.sftp_client.mkdir(path=path)
def put(self, localpath, remotepath, overwrite=False, makedirs=True):
if self.server_type == self.REMOTE_SERVER:
self.connect()
if not os.path.exists(localpath):
raise IOError('Local path "{}" does not exist'.format(localpath))
if not overwrite and self.remotepath_exists(remotepath):
raise IOError('Remote path "{}" exists'.format(remotepath))
rdirname, rfilename = os.path.split(remotepath)
if not rfilename or rfilename in ['.', '..']:
raise IOError('Remote path "{}" is not a valid filepath'.format(remotepath))
if not self.remotepath_exists(rdirname):
if makedirs:
self.remote_makedirs(rdirname)
else:
raise IOError('Directory of remote path "{}" does not exists and '
'"makedirs" is set to False'.format(remotepath))
sftp_stat = self.sftp_client.put(localpath=localpath, remotepath=remotepath)
self.disconnect()
return sftp_stat
elif self.server_type == self.LOCAL_SERVER:
if not os.path.exists(localpath):
raise IOError('Source path "{}" does not exist'.format(localpath))
if os.path.exists(remotepath) and not overwrite:
raise IOError('Dest path "{}" exists'.format(remotepath))
if not os.path.isfile(localpath):
raise NotImplementedError('Only files can be copied in LOCAL_SERVER mode.')
shutil.copyfile(src=localpath, dst=remotepath)
else:
raise ValueError('Server type "{}" is not allowed'.format(self.server_type))
def get(self, remotepath, localpath=None, overwrite=False, makedirs=True):
if self.server_type == self.REMOTE_SERVER:
self.connect()
if not self.remotepath_exists(remotepath):
raise IOError('Remote path "{}" does not exist'.format(remotepath))
if localpath is None:
head, tail = os.path.split(remotepath)
localpath = tail
localpath = os.path.expanduser(localpath)
if not overwrite and os.path.exists(localpath):
raise IOError('Local pat |
michaelnetbiz/mistt-solution | app/controllers/interviews.py | Python | mit | 2,627 | 0.003426 | from app import db
| from app.models.interviews import InterviewSevenDays, InterviewNinetyDays
from flask import Blueprint, make_response, jsonify
from flask_cors import cross_origin
from flask_login import login_required
# instantiate the module's blueprint
interviews = Blueprint('interviews', __name__, template_folder='interviews', url_prefix='/interviews')
@cross_origin()
@interviews.route('/seven/', metho | ds=['GET', 'POST'])
@login_required
def get_interviews_7_days():
return make_response(jsonify([service_plan for service_plan in db.InterviewSevenDays.find()]), 200)
@cross_origin()
@interviews.route('/seven/descriptives/', methods=['GET', 'POST'])
@login_required
def get_interviews_7_days_descriptives():
return make_response(jsonify([service_plan for service_plan in db.InterviewSevenDays.find()]), 200)
@cross_origin()
@interviews.route('/ninety/', methods=['GET', 'POST'])
@login_required
def get_interviews_90_days():
return make_response(jsonify([service_plan for service_plan in db.InterviewNinetyDays.find()]), 200)
@cross_origin()
@interviews.route('/ninety/descriptives/', methods=['GET', 'POST'])
@login_required
def get_interviews_90_days_descriptives():
return make_response(jsonify([service_plan for service_plan in db.InterviewNinetyDays.find()]), 200)
@cross_origin()
@interviews.route('/seven/scores/<string:_id>/', methods=['GET', 'POST'])
@login_required
def get_interview_7_days_scores(_id):
return make_response(jsonify(db.InterviewSevenDays.find_one_or_404({'_id': _id})), 200)
@cross_origin()
@interviews.route('/ninety/scores/<string:_id>/', methods=['GET', 'POST'])
@login_required
def get_interview_90_days_scores(_id):
return make_response(jsonify(db.InterviewNinetyDays.find_one_or_404({'_id': _id})), 200)
@cross_origin()
@interviews.route('/seven/<string:_id>/', methods=['GET', 'POST'])
@login_required
def get_interview_7_days(_id):
return make_response(jsonify(db.InterviewNinetyDays.find_one_or_404({'_id': _id})), 200)
@cross_origin()
@interviews.route('/ninety/<string:_id>/', methods=['GET', 'POST'])
@login_required
def get_interview_90_days(_id):
return make_response(jsonify(db.InterviewNinetyDays.find_one_or_404({'_id': _id})), 200)
@cross_origin()
@interviews.route('/seven/count/', methods=['GET', 'POST'])
@login_required
def get_interviews_7_days_count():
return make_response(jsonify(db['interview_7_days'].count()))
@cross_origin()
@interviews.route('/ninety/count/', methods=['GET', 'POST'])
@login_required
def get_interviews_90_days_count():
return make_response(jsonify(db['interview_90_days'].count()))
|
Mariaanisimova/pythonintask | PMIa/2015/Velyan_A_S/task_5_6.py | Python | apache-2.0 | 503 | 0.015015 | # Задача 5. Вариант 6.
# Напишите программу, которая бы при запуске случайным образом отображала назв | ание одного из двух спутников Марса.
# Velyan A. S.
# 27.05.2016
print("\nназвание одного из двух спутников Марса:")
import random
satellite=["Фобос", "Деймос"]
s=random.choice(satellite)
print( | s)
input("Нажмите Enter для выхода") |
triump0870/mysana | src/mysana/settings/production.py | Python | mit | 1,793 | 0.000558 | # In production set the environment variable like this:
# DJANGO_SETTINGS_MODULE=mysana.settings.production
from .base import * # NOQA
import logging.config
# For security and performance reasons, DEBUG is turned off
DEBUG = False
TEMPLATE_DEBUG = False
# Must mention ALLOWED_HOSTS in production!
# ALLOWED_HOSTS = ["mysana.com"]
# Cache the templates in memory for speed-up
loaders = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]),
]
TEMPLATES[0]['OPTIONS'].update({"loaders": loaders})
TEMPLATES[0].update({"APP_DIRS": False})
# Define STATI | C_ROOT for the collectstatic command
STATIC_ROOT = join(BASE_DIR, '..', 'site', 'static')
# Log everything to the logs directory at the top
LOGFILE_ROOT = join(dirname(BASE_DIR), 'logs')
# Reset logging
LOGGING_CONFIG = None
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': "[%(asctime)s] | %(levelname)s [%(pathname)s:%(lineno)s] %(message)s",
'datefmt': "%d/%b/%Y %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'proj_log_file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': join(LOGFILE_ROOT, 'project.log'),
'formatter': 'verbose'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
}
},
'loggers': {
'project': {
'handlers': ['proj_log_file'],
'level': 'DEBUG',
},
}
}
logging.config.dictConfig(LOGGING)
|
varnion/genial | genial/utils.py | Python | bsd-3-clause | 6,193 | 0.009527 | from sys import intern
import os
import numpy as np
import pandas as pd
import re
from .exceptions import UnsupportedFile
def sort_intervals(starts, ends):
intervals = [(s, e) for s, e in zip(starts, ends)]
sorted_by_lower_bound = sorted(intervals, key=lambda tup: tup[0])
starts, ends = zip(*sorted_by_lower_bound)
starts = np.array(starts, dtype=np.int64)
ends = np.array(ends, dtype=np.int64)
return starts, ends
def nice_sort(l):
""" Sort given iterable in the way that humans expect.
src: http://stackoverflow.com/a/2669120
"""
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
return sorted(l, key=alphanum_key)
def detect_mime(path_to_file, uncompress=False):
import magic
mime = magic.Magic(mime=True, uncompress=uncompress)
mime_from_file = mime.from_file(path_to_file)
# magic returns byte, not str
# mime_from_file = mime_from_file.decode()
return mime_from_file
def magic_open(path_to_file, mode='rt'):
import gzip
# follow symlinks
path_to_file = os.path.realpath(path_to_file)
mime = detect_mime(path_to_file)
if mime == 'text/plain':
return open(path_to_file, mode=mode)
elif mime == 'application/gzip':
# if detect_mime(path_to_file, uncompress=True) == 'text/plain':
return gzip.open(path_to_file, mode=mode)
raise UnsupportedFile('File %s is type %s' % (path_to_file, mime))
def rand_id():
import string as s
chars = s.ascii_lowercase + s.digits
chars = [x for x in chars]
_id = ''.join(np.random.choice(chars, 15))
return "Random_ID_" + _id
def array2str(arr):
return ','.join(str(x) for x in arr)
def str2array(string):
return np.fromstring(string, sep=',', dtype=np.int64)
def format_intervals(iterable_with_numbers):
n_list = sorted(iterable_with_numbers)
dist = 0
curr = n_list[0]
intervals = []
for i, n in enumerate(n_list):
if n - curr != dist:
next_n = n_list[i - 1]
if next_n - curr == 0:
intervals.append(str(curr))
elif next_n - curr == 1:
intervals.append(str(curr))
intervals.append(str(next_n))
else:
intervals.append('%d-%d' % (curr, next_n))
curr = n
dist = 1
else:
dist += 1
if n - curr == 0:
intervals.append(str(curr))
elif n - curr == 1:
intervals.append(str(curr))
intervals.append(str(n))
else:
intervals.append('%d-%d' % (curr, n))
return intervals
def stringfy(obj):
"""
Transform an numpy array in a string (same as array2str) and any other kind of obj in a string.
I was subclassing numpy.ndarray ONLY to override its print method.
Even though it works, I'd have to ALWAYS import it and always enforce its use.
All because I was to lazy to type str(obj) for some objects and array2str(obj)
for others.
A Better solution was to create a wrapper to do the job for me:
stringfy!
"""
if isinstance(obj, np.ndarray):
return array2str(obj)
return str(obj)
def read_extb(filepath):
"""
returns a pandas dataframe with the content of the specified extb file.
transcript_ID is used as index
EXTB (EXon TaBle) columns:
'organism' # 0
'assembly_version' # 1
'coords' # 2
'strand' # 3
'biotype' # 4
'gene_name' # 5
'gene_id' # 6
'transcript_id' # 7
'protein_id' # 8
'exons' # 9
'introns' | #10
| 'cds' #11
'phase' #12
'coords_array' #13
'ORF_coords' #14
"""
columns = ['organism', # 0
'assembly_version', # 1
'coords', # 2
'strand', # 3
'biotype', # 4
'gene_name', # 5
'gene_id', # 6
'transcript_id', # 7
'protein_id', # 8
'exons', # 9
'introns', #10
'cds', #11
'phase', #12
'coords_array', #13
'ORF_coords', #14
]
extb = pd.read_csv(filepath, sep='\t', header=None)
extb.columns = columns
extb.index = extb.transcript_id
return extb
def read_bed(filepath, cols=12):
"""
returns a pandas dataframe with the content of the specified BED12 file.
transcript_ID is used as index
Parameters
----------
filepath: path to file
cols: # columns in the bedfile
Columns
-------
chrom
chromStart
chromEnd
name
score
strand
thickStart
thickEnd
itemRgb
blockCount
blockSizes --> exons
blockStarts
"""
columns = """
chrom
chromStart
chromEnd
name
score
strand
thickStart
thickEnd
itemRgb
blockCount
exons
blockStart
""".split()
bed = pd.read_csv(filepath, sep='\t', header=None, names=columns[:cols])
bed.index = bed.name
return bed
class AttribDict(dict):
# attributes are dict keys =D
# [source](http://goodcode.io/articles/python-dict-object/)
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError("attribute %s doesn't exist" % name)
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
if name in self:
del self[name]
else:
raise AttributeError("attribute %s doesn't exist" % name)
class InternDict(dict):
def __setitem__(self, key, value):
if key is not None:
key = intern(key)
if isinstance(value, str):
value = intern(value)
super(InternDict, self).__setitem__(key, value)
|
rspavel/spack | var/spack/repos/builtin/packages/xrefresh/package.py | Python | lgpl-2.1 | 696 | 0.001437 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Xrefresh(AutotoolsPackage, | XorgPackage):
"""xrefresh - refresh all or part of an X screen."""
homepage = "http://cgit.freedesktop.org/xorg/app/xrefresh"
xorg_mirror_path = "app/xrefresh-1.0.5.tar.gz"
version('1.0.5', sha256='b373cc1ecd37c3d787e7074ce89a8a06ea173 | d7ba9e73fa48de973c759fbcf38')
depends_on('libx11')
depends_on('xproto@7.0.17:', type='build')
depends_on('pkgconfig', type='build')
depends_on('util-macros', type='build')
|
jhseu/tensorflow | tensorflow/python/platform/test.py | Python | apache-2.0 | 3,771 | 0.005834 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Testing.
See the [Testing](https://tensorflow.org/api_docs/python/tf/test) guide.
Note: `tf.compat.v1.test.mock` is an alias to the python `mock` or
`unittest.mock` depending on the python version.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=g-bad-import-order
from tensorflow.python.framework import test_util as _test_util
from tensorflow.python.platform import googletest as _googletest
# pylint: disable=unused-import
from tensorflow.python.framework.test_util import assert_equal_graph_def
from tensorflow.python.framework.test_util import create_local_cluster
from tensorflow.python.framework.test_util import TensorFlowTestCase as TestCase
from tensorflow.python.framework.test_util import gpu_device_name
from tensorflow.python.framework.test_util import is_gpu_available
from tensorflow.python.ops.gradient_checker import compute_gradient_error
from tensorflow.python.ops.gradient_checker import compute_gradient
# pylint: enable=unused-import,g-bad-import-order
import sys
from tensorflow.python.util.tf_export import tf_export
if sys.version_info.major == 2:
import mock # pylint: disable=g-import-not-at-top,unused-import
else:
from unittest import mock # pylint: disable=g-import-not-at-top,g-importing-member
tf_export(v1=['test.mock'])(mock)
# Import Benchmark class
Benchmark = _googletest.Benchmark # pylint: disable=invalid-name
# Import StubOutForTesting class
StubOutForTesting = _googletest.StubOutForTesting # pylint: disable=invalid-name
@tf_export('test.main')
def main(argv=None):
"""Runs all unit tests."""
_test_util.InstallStackTraceHandler()
return _googletest.main(argv)
@tf_export(v1=['test.get_temp_dir'])
def get_temp_dir():
"""Returns a temporary directory for use during tests.
There is no need to delete the directory after the test.
Returns:
The temporary directory.
| """
return _googletest.GetTempDir()
@tf_export(v1=['test.test_src_dir_path'])
def test_src_dir_path(relative_path):
"""Creates an absolute test srcdir path given a relative path.
Args:
relative_path: a path relative to tensorflow root.
e.g. "core/platform".
Returns:
An absolute path to the linked | in runfiles.
"""
return _googletest.test_src_dir_path(relative_path)
@tf_export('test.is_built_with_cuda')
def is_built_with_cuda():
"""Returns whether TensorFlow was built with CUDA (GPU) support."""
return _test_util.IsGoogleCudaEnabled()
@tf_export('test.is_built_with_rocm')
def is_built_with_rocm():
"""Returns whether TensorFlow was built with ROCm (GPU) support."""
return _test_util.IsBuiltWithROCm()
@tf_export('test.is_built_with_gpu_support')
def is_built_with_gpu_support():
"""Returns whether TensorFlow was built with GPU (i.e. CUDA or ROCm) support."""
return is_built_with_cuda() or is_built_with_rocm()
@tf_export('test.is_built_with_xla')
def is_built_with_xla():
"""Returns whether TensorFlow was built with XLA support."""
return _test_util.IsBuiltWithXLA()
|
astrand/webkom | set_version.py | Python | gpl-2.0 | 1,081 | 0.004625 | #!/usr/bin/env python2
# -*- coding: iso-8859-1 -*-
# WebKOM - a web based LysKOM client
#
# Copyright (C) 2000 by Peter Åstrand
#
# This program is free software; you can redistribute it and/or
# modi | fy it under the terms of the GNU General Publ | ic License
# as published by the Free Software Foundation; version 2
# of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from webkom_constants import VERSION
template = open("webkom.spec.template")
new = open("webkom.spec", "w")
while 1:
line = template.readline()
if not line:
break
if line.find("Version:") != -1:
line = "Version: " + VERSION + "\n"
new.write(line)
|
wjbeksi/rgbd-covariance-descriptors | utilities/generate_random_paths.py | Python | bsd-2-clause | 932 | 0.006438 | #!/usr/bin/env python
#
# Generate test and training lists of absolute paths to files within
# subdirectories for a given top level directory. Each test path is randomly
# selected for a given input directory. The test and training paths are written
# out to separate files.
#
import os
import sys
import random
if len(sys.argv) != 2:
print "Usage: generate_random_paths path_to_top_level_dir"
sys.exit()
dir = sys.argv[1]
train_paths = open('train_paths.txt', 'w')
test_paths = open('test_paths.txt', 'w')
for path, dirs, files in os.walk(dir):
file_count = len(files)
if file_count:
r = random.randint(1, file_count)
i = 1;
for f in files:
file_path = os.path.abspath(os.path.join(path, f))
if i == r:
test_paths.write(f | ile_path + '\n')
else:
train_paths.write(file_path + '\n' | )
i = i + 1
train_paths.close()
test_paths.close()
|
laurentb/weboob | modules/avendrealouer/pages.py | Python | lgpl-3.0 | 6,825 | 0.0022 | # -*- coding: utf-8 -*-
# Copyright(C) 2017 ZeHiro
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from datetime import datetime
from weboob.browser.pages import HTMLPage, JsonPage, pagination
from weboob.browser.elements import ItemElement, ListElement, method, DictElement
from weboob.browser.filters.html import Attr, AbsoluteLink, Link
from weboob.browser.filters.json import Dict
from weboob.browser.filters.standard import CleanDecimal, CleanText, Date, Regexp, Async, AsyncLoad
from weboob.capabilities.housing import City, Housing, UTILITIES, HousingPhoto
from weboob.capabilities.base import NotAvailable, Currency
from weboob.tools.capabilities.housing.housing import PricePerMeterFilter
class CitiesPage(JsonPage):
@method
class iter_cities(DictElement):
class item(ItemElement):
klass = City
obj_id = Dict('Value')
obj_name = Dict('Name')
class AvendreAlouerItem(ItemElement):
klass = Housing
_url = AbsoluteLink('.//a[has-class("linkCtnr")]')
load_details = _url & AsyncLoad
obj_url = _url
obj_id = Async('details') & CleanText(Regexp(CleanText('//p[has-class("property-reference")]'), r'\:(.*)$', default=''))
obj_title = CleanText('.//a//ul')
obj_area = CleanDecimal(
CleanText('.//a//ul//li[has-class("first")]//following-sibling::li[2]'),
default=NotAvailable
)
obj_cost = CleanDecimal(
CleanText('.//span[has-class("price")]')
)
obj_price_per_meter = PricePerMeterFilter()
obj_currency = CleanText(
Regexp(
CleanText('.//span[has-class("price")]'),
r'[\d\ ]+(.*)'
)
)
obj_location = CleanText('.//span[has-class("loca")]')
obj_text = CleanText('.//p[has-class("propShortDesc")]')
obj_date = Async('details') & Date(
Regexp(
CleanText('//div[has-class("property-description-main")]'),
r'Mise à jour le ([\d\\]+)', default=datetime.today()
)
)
def obj_details(self):
page_doc = Async('details').loaded_page(self).doc
return {
'GES': CleanText('//span[@id="gassymbol"]', '')(page_doc),
'DPE': CleanText('//span[@id="energysymbol"]', '')(page_doc),
}
def obj_utilities(self):
price = CleanText('//span[has-class("price-info")]')(self)
if 'CC' in price:
return UTILITIES.INCLUDED
elif 'HC' in price:
return UTILITIES.EXCLUDED
else:
return UTILITIES.UNKNOWN
obj_station = 'Test'
obj_bedrooms = Async('details') & CleanDecimal(
CleanText('.//td//span[contains(text(), "Chambre")]//following-sibling::span[has-class("r")]'),
default=NotAvailable
)
obj_rooms = Async('details') & CleanDecimal(
CleanText('.//td//span[contains(text(), "Pièce")]//following-sibling::span[has-class("r")]'),
default=NotAvailable
)
def obj_photos(self):
page_doc = Async('details').loaded_page(self).doc
photos = []
for photo in page_doc.xpath('//div[@id="bxSliderContainer"]//ul//li//img'):
url = Attr('.', 'src')(photo)
if url[0] != '/':
photos.append(HousingPhoto(url))
return photos
def validate(self, obj):
return obj.id != ''
class SearchPage(HTMLPage):
@pagination
@method
class iter_housings(ListElement):
item_xpath = './/li[@data-tranid="1"]'
next_page = AbsoluteLink('./ul[has-class("pagination")]/li/a[has-class("next")]')
class item(AvendreAlouerItem):
obj_phone = CleanText(Attr('.', 'data-infos'))
def get_housing_url(self):
return Link('.//a[has-class("picCtnr")]')(self.doc)
class HousingPage(HTMLPage):
@method
class get_housing(ItemElement):
klass = Housing
obj_id = Regexp(CleanText('//p[has-class("property-reference")]'), r'\:(.*)$')
def obj_url(self):
return self.page.url
obj_area = CleanDecimal(
Regexp(
CleanText('//table[@id="table"]//span[contains(text(), "Surface")]//following-sibling::span[has-class("r")]'),
r'([\d\ ]+)m'
),
default=NotAvailable
)
obj_title = CleanText('//span[has-class("mainh1")]')
obj_cost = CleanDecimal('//span[has-class("price-info")]')
obj_currency = Currency.get_currency(u'€')
obj_rooms = CleanDecimal('//table[@id="table"]//span[contains(text(), "Pièce")]//following-sibling::span[has-class("r")]')
obj_bedrooms = CleanDecimal('//table[@id="table"]//span[contains(text(), "Chambre")]//following-sibling::span[has-class("r")]')
obj_location = CleanText(Regexp(CleanText('//span[has-class | ("mainh1")]'), r',(.+)$'))
obj_text = CleanText('//div[has-class("property-description-main")]')
obj_date = Date(
Regexp(
CleanText('//div[has-class("property-description-main")]'),
r'Mise à jour le ([\d\\]+)', default=datetime.today()
)
)
obj_phone = Attr('//button[@id="display-phonenumber-1"]', 'data-phone-number')
def obj_photos(self):
photos = []
for photo in self.xpath('//div[@id="bx | SliderContainer"]//ul//li//img'):
url = Attr('.', 'src')(photo)
if url[0] != '/':
photos.append(HousingPhoto(url))
return photos
def obj_details(self):
return {
'GES': CleanText('//span[@id="gassymbol"]', '')(self),
'DPE': CleanText('//span[@id="energysymbol"]', '')(self),
}
def obj_utilities(self):
price = CleanText('//span[has-class("price-info")]')(self)
if 'CC' in price:
return UTILITIES.INCLUDED
elif 'HC' in price:
return UTILITIES.EXCLUDED
else:
return UTILITIES.UNKNOWN
obj_station = NotAvailable
obj_price_per_meter = PricePerMeterFilter()
|
googleapis/python-automl | samples/beta/list_models.py | Python | apache-2.0 | 1,782 | 0 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def list_models(project_id):
"""List models."""
# [START automl_list_models_beta]
from google.cloud import automl_v1beta1 as automl
# TODO(developer): Uncomment and set the following variables
# project_id = "YOUR_PROJECT_ID"
client = automl.AutoMlClient()
# A resource that represents Google Cloud Platform location.
project_location = f"projects/{project_id}/locations/us-central1"
request = automl.ListModelsRequest(parent=project_location, filter="")
response = client.list_models(request=request)
print("List of models:")
for model i | n response:
# Display the model information.
if (
model.deployment_state
== automl.Model.DeploymentState.DEPLOYED
):
deployment_state = "deployed"
else:
deployment_state = "undeployed"
print("Model name: {}".format(model.name))
print("Model id: {}".format(model.name.split("/")[-1]))
print("Model display name: {}" | .format(model.display_name))
print("Model create time: {}".format(model.create_time))
print("Model deployment state: {}".format(deployment_state))
# [END automl_list_models_beta]
|
databricks/spark-sklearn | python/spark_sklearn/random_search.py | Python | apache-2.0 | 10,095 | 0.000396 | """
Class for parallelizing RandomizedSearchCV jobs in scikit-learn
"""
from sklearn.model_selection import ParameterSampler
from spark_sklearn.base_search import SparkBaseSearchCV
class RandomizedSearchCV(SparkBaseSearchCV):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated search over parameter settings.
In contrast to GridSearchCV, not all combinations of parameter values are tried
out, but rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used for all parameters.
It is highly recommended to use continuous distributions for continuous
parameters.
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Not used; exists for scikit-learn compatibility.
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_se | lection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
re | fit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int, RandomState instance or None, optional, default=None
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Attributes
----------
cv_results_ : dict of numpy (masked) ndarrays
A dict with keys as column headers and values as columns, that can be
imported into a pandas ``DataFrame``.
For instance the below given table
+------------+-----------+------------+-----------------+---+---------+
|param_kernel|param_gamma|param_degree|split0_test_score|...|rank_....|
+============+===========+============+=================+===+=========+
| 'poly' | -- | 2 | 0.8 |...| 2 |
+------------+-----------+------------+-----------------+---+---------+
| 'poly' | -- | 3 | 0.7 |...| 4 |
+------------+-----------+------------+-----------------+---+---------+
| 'rbf' | 0.1 | -- | 0.8 |...| 3 |
+------------+-----------+------------+-----------------+---+---------+
| 'rbf' | 0.2 | -- | 0.9 |...| 1 |
+------------+-----------+------------+-----------------+---+---------+
will be represented by a ``cv_results_`` dict of::
{
'param_kernel': masked_array(data = ['poly', 'poly', 'rbf', 'rbf'],
mask = [False False False False]...)
'param_gamma': masked_array(data = [-- -- 0.1 0.2],
mask = [ True True False False]...),
'param_degree': masked_array(data = [2.0 3.0 -- --],
mask = [False False True True]...),
'split0_test_score' : [0.8, 0.7, 0.8, 0.9],
'split1_test_score' : [0.82, 0.5, 0.7, 0.78],
'mean_test_score' : [0.81, 0.60, 0.75, 0.82],
'std_test_score' : [0.02, 0.01, 0.03, 0.03],
'rank_test_score' : [2, 4, 3, 1],
'split0_train_score' : [0.8, 0.9, 0.7],
'split1_train_score' : [0.82, 0.5, 0.7],
'mean_train_score' : [0.81, 0.7, 0.7],
'std_train_score' : [0.03, 0.03, 0.04],
'mean_fit_time' : [0.73, 0.63, 0.43, 0.49],
'std_fit_time' : [0.01, 0.02, 0.01, 0.01],
'mean_score_time' : [0.007, 0.06, 0.04, 0.04],
'std_score_time' : [0.001, 0.002, 0.003, 0.005],
'params' : [{'kernel': 'poly', 'degree': 2}, ...],
}
NOTE that the key ``'params'`` is used to store a list of parameter
settings dict for all the parameter candidates.
The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and
``std_score_time`` are all in seconds.
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
best_index_ : int
The index (of the ``cv_results_`` arrays) which corresponds to the best
candidate parameter setting.
The dict at ``search.cv_results_['params'][search.best_index_]`` gives
the parameter setting for the best model, that gives the highest
mean score (``search.best_score_``).
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
See Also
--------
:class:`GridSearchCV`:
Does ex |
egtaonline/GameAnalysis | profile/display.py | Python | apache-2.0 | 4,344 | 0 | """Aggregate profiling data and generate an rst file"""
import itertools
import tabulate
def update(means, new, count, num=1):
"""Recursively update mean dictionary"""
for key, val in new.items():
if isinstance(val, dict):
update(means.setdefault(key, {}), val, count)
else:
value = means.get(key, 0)
means[key] = value + (val - value) * num / count
def write_file(results, fil):
"""Write file with results"""
# Compute normalized speeds
for game in next(iter(results.values())):
min_speed = min(g[game]['speed'] for g in results.values())
for games in results.values():
games[game]['norm_speed'] = games[game]['speed'] / min_speed
# Aggregate info over all games
agg_results = {}
for method, game_info in results.items():
agg_info = {}
game_count = 0
for info in game_info.values():
count = info.pop('count')
game_count += count
update(agg_info, info, game_count, count)
agg_results[method] = agg_info
fil.write(""".. _profile_nash:
Nash Equilibrium Methods Comparison
===================================
For each method available for Nash equilibrium finding, this lists various
information about the performance across different game types and starting
locations. "Fraction of Eqa" is the mean fraction of all equilibria found via
that method or starting location. "Weigted Fraction (of Eqa)" is the same,
except each equilibrium is down weighted by the number of methods that found
it, thus a larger weighted fraction indicates that this method found more
unique equilibria. "Time" is the average time in seconds it took to run this
method for every starting location. "Normalized Time" sets the minimum time for
each game type and sets it to one, thus somewhat mitigating the | fact that
certain games may be more difficult than others. It also provides an easy
comparison metric to for baseline timing.
""")
fil.write(
'Comparisons Between Methods\n'
'----------------------------------\n\n')
fil.write(tabulate.tabulate(
sorted(([m.title(), v['card | '], v['weight'], v['speed'],
v['norm_speed']]
for m, v in agg_results.items()),
key=lambda x: x[1], reverse=True),
headers=['Method', 'Fraction of Eqa', 'Weighted Fraction',
'Time (sec)', 'Normalized Time'],
tablefmt='rst'))
fil.write('\n\n')
for method, game_info in results.items():
title = method.title()
fil.write(title)
fil.write('\n')
fil.writelines(itertools.repeat('-', len(title)))
fil.write('\n\n')
agg_info = agg_results[method]
fil.write(
'Initial Profile Rates\n'
'^^^^^^^^^^^^^^^^^^^^^\n\n')
fil.write(tabulate.tabulate(
sorted(([k.capitalize(), v, agg_info['profweight'][k]]
for k, v in agg_info['profcard'].items()),
key=lambda x: x[1], reverse=True),
headers=['Starting Type', 'Fraction of Eqa',
'Weighted Fraction'], tablefmt='rst'))
fil.write('\n\n')
fil.write(
'Compared to Other Methods\n'
'^^^^^^^^^^^^^^^^^^^^^^^^^\n\n')
fil.write(tabulate.tabulate(
sorted(([m.title(), v,
agg_info['norm_speed'] / agg_results[m]['norm_speed']]
for m, v in agg_info['pair'].items()),
key=lambda x: x[1], reverse=True),
headers=['Method', 'Fraction of Eqa', 'Time Ratio'],
tablefmt='rst'))
fil.write('\n\n')
fil.write(
'By Game Type\n'
'^^^^^^^^^^^^\n\n')
for game, info in game_info.items():
fil.write(game.capitalize())
fil.write('\n')
fil.writelines(itertools.repeat('"', len(game)))
fil.write('\n\n')
fil.write(tabulate.tabulate([
['Fraction of Eqa', info['card']],
['Weighted Fraction of Eqa', info['weight']],
['Time (sec)', info['speed']],
['Normalized Time', info['norm_speed']],
], headers=['Metric', 'Value'], tablefmt='rst'))
fil.write('\n\n')
|
globus/globus-release-tools | share/python/repo/advisories.py | Python | apache-2.0 | 3,857 | 0.001296 | #! /usr/bin/python
import json
import os
import re
import datetime
class Advisories(object):
today = datetime.datetime.now().strftime("%Y-%m-%d")
def __init__(self, initial_advisories_path=None, format="txt"):
self.advisories = []
self.added_packages = {}
if initial_advisories_path is not None:
f = open(initial_advisories_path, "r")
if format == 'json':
s = f.read()
if s.startswith("advisories ="):
s = s.replace("advisories = ", "", 1)
s = s.rstrip(";\n")
self.advisories = json.loads(s)
else:
for line in f:
self.parse_line(line)
f.close()
def parse_line(self, line):
line = line.strip()
if line.startswith("#") or line == "":
return
d, p, v, | f, desc = line.split(";", 4)
pkgs = p.split(",")
flags = f.split(" ")
desc = desc.replace("\"", "\\\"")
obj = {
"date": d,
"packages": pkgs,
"toolkit_version": v,
"flags": flags,
"description": desc,
}
self.advisories.append(obj)
def add_advisories(self, packages):
for p in packages:
if p.arch == 'src' and p.name not in self.added_p | ackages and \
".src.rpm" in p.path:
pfd = os.popen('rpm -q -p "%s" --changelog' % p.path)
pfd.readline() # Date line
changelog = ""
for l in pfd:
if l.startswith("*"):
break
else:
if l.startswith("- "):
l = l.replace("- ", "", 1)
changelog += l
pfd.close()
changelog = changelog.strip().replace("\n", "<br />")
pfd = os.popen('rpm -q -p "%s" -l' % p.path)
files = []
for l in pfd:
if ".tar.gz" in l:
l = l.replace(".tar.gz", "").strip()
matches = re.match(l, r"([a-z-]+)(-[0-9.]+)")
if matches is not None:
l = matches.group(1).replace("-", "_") + \
matches.group(2)
files.append(l.replace(".tar.gz", "").strip())
pfd.close()
if len(files) > 0:
obj = {
"date": Advisories.today,
"packages": files,
"toolkit_version": "6.0",
"flags": ["bug"],
"description": changelog
}
self.advisories.append(obj)
self.added_packages[p.name] = obj
def to_json(self):
return json.dumps(self.advisories)
def new_to_text(self):
s = ""
for k in self.added_packages:
a = self.added_packages[k]
date = a['date']
pkgs = " ".join(a['packages'])
toolkit_version = a['toolkit_version']
flags = " ".join(a['flags'])
desc = a['description'].replace("\\\"", "\"")
s += "%s;%s;%s;%s;%s\n" % \
(date, pkgs, toolkit_version, flags, desc)
return s
def to_text(self):
s = ""
for a in self.advisories:
date = a['date']
pkgs = " ".join(a['packages'])
toolkit_version = a['toolkit_version']
flags = " ".join(a['flags'])
desc = a['description'].replace("\\\"", "\"")
s += "%s;%s;%s;%s;%s\n" % \
(date, pkgs, toolkit_version, flags, desc)
return s
|
jclgoodwin/bustimes.org.uk | fares/migrations/0004_alter_tariff_access_zones.py | Python | mpl-2.0 | 414 | 0 | # Generated by Django 3.2.5 on 2021-08-17 19:45
from django. | db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fares', '0003_auto_20210602_1534'),
]
operations = [
migrations.AlterField(
model_name='tariff',
name='access_zones',
field=models.Many | ToManyField(blank=True, to='fares.FareZone'),
),
]
|
haticeerturk/Github-Repositories | Github-Repositories.py | Python | gpl-3.0 | 957 | 0.036573 | import urllib2 , re
name = raw_input("Enter GitHub Account: ")
project_name = raw_input("Enter the project name: ")
url = "https://github.com/" + name + "? | tab=repositories"
searching = urllib2.urlopen(url)
reading = searching.read()
string = '<a href="/' + name + '/(.*?)" class="css-truncate css-truncate-target">'
project = re.findall(string, reading)
for i in range(len(project)) :
if project[i] == projec | t_name :
print "Project Name: ",project[i]
#Get Description...
_url = "https://github.com/" + name + "/" + project_name
_searching = urllib2.urlopen(_url)
_reading = _searching.read()
description = re.findall('<div class="repository-description">\n <p>(.*?)</p>', _reading)
if description :
print "Description: ",description[0]
else :
print "Description: Description not found."
break
downlink = "http://github.com/"+name+"/"+project_name+"/archive/master.zip"
print "Download Link: ",downlink
|
kjs73/pele | pele/potentials/test_functions/__init__.py | Python | gpl-3.0 | 75 | 0.013333 | from _beale imp | ort Beale, Be | aleSystem
from _booth import Booth, BoothSystem |
skywalkerytx/oracle | src/main/python/xgbpredict.py | Python | mit | 138 | 0.014493 | import tensorflow as tf
im | port numpy as np
#import allguess as al
|
from datetime import datetime
a = str(datetime.now())[11:19]
print(a) |
OpenSourceHelpCommunity/OpenSourceHelpCommunity.github.io | oshc/oshc/settings/dev.py | Python | mit | 71 | 0 | # dev set | tings
from .base import *
DEBUG = os.getenv("DEBUG" | , False)
|
wakalixes/sqldataplot | plugins/pluginTransformI2B2a.py | Python | gpl-2.0 | 2,418 | 0.016956 | #--------------------------------------------------
# Revision = $Rev: 20 $
# Date = $Date: 2011-08-05 20:42:24 +0200 (Fri, 05 Aug 2011) $
# Author = $Author: stefan $
#--------------------------------------------------
from pluginInterfaces import PluginTransform
import numpy as np
import csv
import os,sys
class PluginTransformI2B2a(PluginTransform):
def __init__(self):
self.pathname = os.path.realpath(os.path.dirname(sys.argv[0]))
def importData(csvFile):
datacsv = csv.reader(open(self.pathname+"/plugins/feshbachdata/"+csvFile, 'rb'), delimiter=',', quotechar='|')
data = []
for row in datacsv:
try:
data.append([float(row[0]), float(row[1])])
except ValueError:
pass
data = np.array(data)
return data
self.swaves = importData("swaves.csv")
self.d0to60 = importData("data0-60.csv")
self.d490to510 = importData("data490-510.csv")
self.d540to560 = importData("data540-560.csv")
self.d750to950 = importData("data750-950.csv")
def transform(self,data, axes, constants):
def convB2a(bval):
if bval >= 0 and bval <= 60:
t = self.d0to60
elif bval >= 490 and bval <= 510:
t = self.d490to510
elif bval >= 540 and bval <= 560:
t = self.d540to560
elif bval >= 750 and bval <= 950:
t = self.d750to950
else:
t = self.swaves
return np.interp(bval, t[:,0], t[:,1])
A0 = constants[0]
COILCONVERSION = constants[1]
FESHBACHRES_CENTER = constants[2]
Bdata = (data[0,:] - A0)*COILCONVERSION + FESHBACHRES_CENTER
data[0,:] = map(convB2a, Bdata)
return data
def getConstants(self):
| return ["A0", "Coil-Conversion", "Feshbach-resonance center"]
def getTransformModelStr(self):
return "x-axis to a/a0"
def requieredInputParameters(self):
return None
def getInfoStr(self):
return "This plugin uses the equation B = (I-'A0')*'Coil-Conversion'" \
"+'Freshbarch-resonance center' to convert the | current in a coil" \
" to magnetic-field-strength B. The conversion will touch whatever " \
"x-axis is selected and will leave all other axes untouched"
|
xiangrufan/astro-NLP | hobby2vec/gensim_test.py | Python | gpl-3.0 | 558 | 0.005618 | import gensim
import numpy as np
from gensim.models import word2vec
import jieba
from TextSta_v2 import TextSta
from gensim.corpora.dictionary impor | t Dictionary
path = u"C:\\Users\\xiangrufan\\Desktop\\NLP\\Astro_NLP\\resource\\复旦分类语料\\answer\\C3-Art\\C3-Art0002.txt"
text = TextSta(path,encoding="GBK")
sentense_file = text.sen(all_return=True)
word_li | st = jieba.lcut(sentense_file)
tmp_dic = Dictionary()
tmp_dic(word_list)
# sentences = word2vec.Text8Corpus() # 加载语料
# model = word2vec.Word2Vec(sentences, size=10) # 默认window=5 |
jawilson/home-assistant | homeassistant/components/ebusd/__init__.py | Python | apache-2.0 | 3,733 | 0.000804 | """Support for Ebusd daemon for communication with eBUS heating systems."""
import logging
import socket
import ebusdpy
import voluptuous as vol
from homeassistant.const import (
CONF_HOST,
CONF_MONITORED_CONDITIONS,
CONF_NAME,
CONF_PORT,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import load_platform
from .const import DOMAIN, SENSOR_TYPES
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "ebusd"
DEFAULT_PORT = 8888
CONF_CIRCUIT = "circuit"
CACHE_TTL = 900
SERVICE_EBUSD_WRITE = "ebusd_write"
def verify_ebusd_config(config):
"""Verify eBusd config."""
circuit = config[CONF_CIRCUIT]
for condition in config[CONF_MONITORED_CONDITIONS]:
if condition not in SENSOR_TYPES[circuit]:
raise vol.Invalid(f"Condition '{condition}' not in '{circuit}'.")
return config
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
vol.All(
{
vol.Required(CONF_CIRCUIT): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS, default=[]): cv.ensure_list,
},
verify_ebusd_config,
)
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Set up the eBusd component."""
_LOGGER.debug("Integration setup started")
conf = config[DOMAIN]
name = conf[CONF_NAME]
circuit = conf[CONF_CIRCUIT]
monitored_conditions = conf.get(CONF_MONITORED_CONDITIONS)
server_address = (conf.get(CONF_HOST), conf.get(CONF_PORT))
try:
ebusdpy.init(server_address)
hass.data[DOMAIN] = EbusdData(server_address, circuit)
sensor_config = {
CONF_MONITORED_CONDITIONS: monitored_conditions,
"client_name": name,
"sensor_types": SENSOR_TYPES[circuit],
}
load_platform(hass, "sensor", DOMAIN, sensor_config, config)
hass.services.register(DOMAIN, SERVICE_EBUSD_WRITE, hass.data[DOMAIN].write)
_LOGGER.debug("Ebusd integration setup completed")
return True
except (socket.timeout, OSError):
return False
class EbusdData:
"""Get the latest data from Ebusd."""
def __init__(self, address, circuit):
"""Initialize the data object."""
self._circuit = circuit
self._address = address
self.value = {}
def update(self, name, stype):
"""Call the Ebusd API to update the data."""
try:
_LOGGER.debug("Opening socket to ebusd %s", name)
command_result = ebusdpy.read(
self._address, self._circuit, name, stype, CACHE_TTL
)
if command_result is not None:
if "ERR:" in command_result:
_LOGGER.warning(command_result)
else:
self.value[name] = command_result
except RuntimeError as err:
_LOGGER.error(err)
raise RuntimeError(err) from err
def | write(self, call):
"""Call write methon on ebusd."""
name = call.data.get("name")
value = call.data.get("value")
try:
_LOGGER.debug("Opening socket to ebusd %s", name)
command_result = ebusdpy.write(self._address, self._circuit, n | ame, value)
if command_result is not None and "done" not in command_result:
_LOGGER.warning("Write command failed: %s", name)
except RuntimeError as err:
_LOGGER.error(err)
|
kittiu/sale-workflow | sale_order_margin_percent/__manifest__.py | Python | agpl-3.0 | 647 | 0 | # -*- coding: utf-8 -*-
# Copyright 2017 PESOL (http://pesol.es)
# Luis Adan Jimenez | Hernandez (luis.jimenez@pesol.es)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "Sale Order Margin Percent",
"summary": "Show Percent in sale order",
"version": "10.0.1.0.0",
"category": "Sales",
"website": "http://www.pesol.es",
"author": "PESOL, Odoo Community Association (OCA)",
"license": "AGPL-3",
"application": False,
"installable": True,
"depends": [
'sale',
| 'sale_margin'
],
"data": [
'views/sale_order_margin_percent_view.xml',
]
}
|
fantopop/post-production-scripts | Session info to table/modules/html.py | Python | mit | 1,144 | 0.011364 | #!/usr/bin/python
tab = ' '
class Tag():
def __init__(self, name, HTML):
self.name = name
self.HTML = HTML
def __enter__(self):
self.HTML.content += tab * self.HTML.indent + '<' + self.name + '>\n'
self.HTML.indent += 1
def __exit__(self, exc_type, exc_value, traceback):
self.HTML.indent -= 1
self.HTML.content += tab * self.HTML.indent + '</' + self.nam | e + '>\n'
class HTML():
def __init__(self):
self.indent = 0
| self.content = '<!DOCTYPE html>\n'
def __str__(self):
return self.content
def add(self, text):
for line in text.split('\n'):
self.content += tab * self.indent + line + '\n'
def html(self):
return Tag('html', self)
def body(self):
return Tag('body', self)
def head(self):
return Tag('head', self)
def title(self):
return Tag('title', self)
def h1(self):
return Tag('h1', self)
def h2(self):
return Tag('h2', self)
def style(self):
return Tag('style', self)
def main():
pass
if __name__ == '__main__':
main()
|
alivecor/tensorflow | tensorflow/python/framework/function_test.py | Python | apache-2.0 | 48,059 | 0.011049 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import time
import numpy as np
from tensorflow.core.framework import function_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import function
from tensorflow.python.framework import graph_to_function_def
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_logging_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def _OptimizerOptions():
for cse in [False, True]:
for inline in [False, True]:
for cfold in [False, True]:
yield config_pb2.ConfigProto(graph_options=config_pb2.GraphOp | tions(
optimizer_options=config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L0,
do_common_subexpress | ion_elimination=cse,
do_function_inlining=inline,
do_constant_folding=cfold)))
class FunctionTestMethods(object):
"""Test methods for verifying Function support.
These test methods are used as mix-ins in two test cases: with
and without C API support.
"""
def testIdentity(self):
@function.Defun(dtypes.float32, func_name="MyIdentity")
def MyIdentityFunc(a):
return a
with ops.Graph().as_default():
call = MyIdentityFunc([18.0])
self.assertEqual("MyIdentity", call.op.name)
with session.Session() as sess:
self.assertAllEqual([18.0], sess.run(call))
def testIdentityOutputName(self):
@function.Defun(
dtypes.float32, func_name="MyIdentity", out_names=["my_result_name"])
def MyIdentityFunc(a):
return a
with ops.Graph().as_default():
call = MyIdentityFunc([18.0])
self.assertEqual("MyIdentity", call.op.name)
with session.Session() as sess:
self.assertAllEqual([18.0], sess.run(call))
def testTooManyOutputNames(self):
@function.Defun(
dtypes.float32, func_name="MyIdentity",
out_names=["my_result1", "my_result2"])
def MyIdentityFunc(a):
return a
with ops.Graph().as_default():
with self.assertRaisesRegexp(
ValueError, (r"Length of out_names \(2\) does not match number of "
r"outputs \(1\): my_result1, my_result2")):
MyIdentityFunc([18.0])
def testDefineFunction2Args(self):
@function.Defun(dtypes.float32, dtypes.float32, func_name="APlus2B")
def APlus2B(a, b):
return a + b * 2
with ops.Graph().as_default():
call = APlus2B([1.0], [2.0])
self.assertEqual("APlus2B", call.op.name)
with session.Session() as sess:
self.assertAllEqual([5.0], sess.run(call))
def testValueErrorOnFunctionWithNoOutput(self):
# TODO(iga): Remove this restriction and this test
@function.Defun(dtypes.float32, dtypes.float32)
def APlus2B(a, b):
print(a + b * 2) # Create some ops to have nodes in the body
# Using 'print' to make lint happy
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError,
"Function can not return None"):
APlus2B([1.0], [2.0])
def testDefineFunction2ArgsOutputName(self):
@function.Defun(
dtypes.float32,
dtypes.float32,
func_name="APlus2B",
out_names=["my_result_name"])
def APlus2B(a, b):
return a + b * 2
with ops.Graph().as_default():
call = APlus2B([1.0], [2.0])
self.assertEqual("APlus2B", call.op.name)
with session.Session() as sess:
self.assertAllEqual([5.0], sess.run(call))
def testDefineFunctionDuplicateOutputs(self):
@function.Defun(dtypes.float32, func_name="Duplicate")
def Duplicate(a):
b = a + 1.0
return b, b
g = ops.Graph()
with g.as_default():
Duplicate([3.0])
func_sig = g.as_graph_def().library.function[0].signature
# The names given to both outputs should be different
# even though the same tensor is emitted to both.
out_names = [a.name for a in func_sig.output_arg]
self.assertEqual(2, len(out_names))
self.assertNotEqual(out_names[0], out_names[1])
def testGradientFunc(self):
@function.Defun(dtypes.float32, func_name="XSquarePlusOneFn")
def XSquarePlusOne(x):
return x * x + 1.0
@function.Defun(dtypes.float32, dtypes.float32)
def XSquarePlusOneGrad(x, dy):
dx = functional_ops._symbolic_gradient(
input=[x, dy], Tout=[dtypes.float32], f="XSquarePlusOneFn", name="dx")
return dx
g = ops.Graph()
with g.as_default():
call_f = XSquarePlusOne([2.0])
call_g = XSquarePlusOneGrad([2.0], [0.1])
with session.Session() as sess:
self.assertAllClose([5.0], sess.run(call_f))
self.assertAllClose([0.4], sess.run(call_g))
def testTanhSymGrad(self):
@function.Defun(dtypes.float32)
def Forward(x):
return math_ops.reduce_sum(math_ops.tanh(x))
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(dtypes.float32)
y = Forward(x)
dx = gradients_impl.gradients([y], [x])
inp = np.array([-1, 1, 2, -2], dtype=np.float32)
feed = {x: inp}
cfg = config_pb2.ConfigProto(graph_options=config_pb2.GraphOptions(
optimizer_options=config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L1,
do_function_inlining=True)))
with session.Session(graph=g, config=cfg) as sess:
out, = sess.run(dx, feed)
self.assertAllClose(1 - np.square(np.tanh(inp)), out)
@test_util.disable_c_api # Function gradients don't work with C API
def testCustomGradient(self):
dtype = dtypes.float32
@function.Defun(dtype, dtype, dtype)
def XentLossGrad(logits, labels, dloss):
dlogits = array_ops.reshape(dloss, [-1, 1]) * (
nn_ops.softmax(logits) - labels)
dlabels = array_ops.zeros_like(labels)
# Takes exp(dlogits) to differentiate it from the "correct" gradient.
return math_ops.exp(dlogits), dlabels
@function.Defun(dtype, dtype, grad_func=XentLossGrad)
def XentLoss(logits, labels):
return math_ops.reduce_sum(labels * math_ops.log(nn_ops.softmax(logits)),
1)
g = ops.Graph()
with g.as_default():
logits = array_ops.placeholder(dtype)
labels = array_ops.placeholder(dtype)
loss = XentLoss(logits, labels)
dlogits = gradients_impl.gradients([loss], [logits])
x = np |
davelab6/pyfontaine | fontaine/charsets/noto_glyphs/notosansdevanagari_regular.py | Python | gpl-3.0 | 38,139 | 0.02365 | # -*- coding: utf-8 -*-
class Charset(object):
common_name = 'NotoSansDevanagari-Regular'
native_name = ''
def glyphs(self):
glyphs = []
glyphs.append(0x0065) #uni0961
glyphs.append(0x0178) #glyph00376
glyphs.append(0x02D8) #glyph00728
glyphs.append(0x0179) #glyph00377
glyphs.append(0x02EF) #four
glyphs.append(0x024D) #glyph00589
glyphs.append(0x0310) #uniFEFF
glyphs.append(0x0176) #glyph00374
glyphs.append(0x01AC) #glyph00428
glyphs.append(0x01AD) #glyph00429
glyphs.append(0x01AA) #glyph00426
glyphs.append(0x01AB) #glyph00427
glyphs.append(0x01A8) #glyph00424
glyphs.append(0x0177) #glyph00375
glyphs.append(0x01A6) #glyph00422
glyphs.append(0x01A7) #glyph00423
glyphs.append(0x01A4) #glyph00420
glyphs.append(0x01A5) #glyph00421
glyphs.append(0x030A) #quotedblright
glyphs.append(0x0354) #uni1CEC
glyphs.append(0x0172) #glyph00370
glyphs.append(0x01A9) #glyph00425
glyphs.append(0x02A7) #glyph00679
glyphs.append(0x0183) #glyph00387
glyphs.append(0x0182) #glyph00386
glyphs.append(0x0181) #glyph00385
glyphs.append(0x0180) #glyph00384
glyphs.append(0x017F) #glyph00383
glyphs.append(0x0173) #glyph00371
glyphs.append(0x017D) #glyph00381
glyphs.append(0x017C) #glyph00380
glyphs.append(0x0185) #glyph00389
glyphs.append(0x0184) #glyph00388
glyphs.append(0x008D) #glyph00141
glyphs.append(0x008C) #glyph00140
glyphs.append(0x008F) #glyph00143
glyphs.append(0x008E) #glyph00142
glyphs.append(0x0091) #glyph00145
glyphs.append(0x0090) #glyph00144
glyphs.append(0x0093) #glyph00147
glyphs.append(0x0092) #glyph00146
glyphs.append(0x0095) #glyph00149
glyphs.append(0x0094) #glyph00148
glyphs.append(0x0277) #glyph00631
glyphs.append(0x0276) #glyph00630
glyphs.append(0x027D) #glyph00637
glyphs.append(0x027C) #glyph00636
glyphs.append(0x027B) #glyph00635
glyphs.append(0x027A) #glyph00634
glyphs.append(0x02DC) #glyph00732
glyphs.append(0x02DD) #glyph00733
glyphs.append(0x02DA) #glyph00730
glyphs.append(0x02DB) #glyph00731
glyphs.append(0x02C4) #glyph00708
glyphs.append(0x0353) #uni1CEB
glyphs.append(0x0136) #glyph00310
glyphs.append(0x02EB) #zero
glyphs.append(0x0137) #glyph00311
glyphs.append(0x0374) #glyph00884
glyphs.append(0x0375) #glyph00885
glyphs.append(0x0376) #glyph00886
glyphs.append(0x0139) #glyph00313
glyphs.append(0x0370) #glyph00880
glyphs.append(0x0371) #glyph00881
glyphs.append(0x0372) #glyph00882
glyphs.append(0x0373) #glyph00883
glyphs.append(0x013A) #glyph00314
glyphs.append(0x0378) #glyph00888
glyphs.append(0x0379) #glyph00889
glyphs.append(0x02E2) #quotesingle
glyphs.append(0x013C) #glyph00316
glyphs.append(0x013D) #glyph00317
glyphs.append(0x0307) #quoteleft
glyphs.append(0x0239) #glyph00569
glyphs.append(0x0238) #glyph00568
glyphs.append(0x0237) #glyph00567
glyphs.append(0x0236) #glyph00566
glyphs.append(0x0235) #glyph00565
glyphs.append(0x0234) #glyph00564
glyphs.append(0x0233) #glyph00563
glyphs.append(0x0232) #glyph00562
glyphs.append(0x0231) #glyph00561
glyphs.append(0x0230) #glyph00560
glyphs.append(0x00CE) #glyph00206
glyphs.append(0x00CF) #glyph00207
glyphs.append(0x00CC) #glyph00204
glyphs.append(0x00CD) #glyph00205
glyphs.append(0x00CA) #glyph00202
glyphs.append(0x00CB) #glyph00203
glyphs.append(0x00C8) #glyph00200
glyphs.append(0x00C9) #glyph00201
glyphs.append(0x0169) #glyph00361
glyphs.append(0x0168) #glyph00360
glyphs.append(0x016B) #glyph00363
glyphs.append(0x016A) #glyph00362
glyphs.append(0x016D) #glyph00365
glyphs.append(0x016C) #glyph00364
glyphs.append(0x00D0) #glyph00208
glyphs.append(0x00D1) #glyph00209
glyphs.append(0x021C) #glyph00540
glyphs.append(0x030D) #divide
glyphs.append(0x02BF) #glyph00703
glyphs.append(0x0303) #asciitilde
glyphs.append(0x037C) #glyph00892
glyphs.append(0x0206) #glyph00518
glyphs.append(0x0207) #glyph00519
glyphs.append(0x0377) #glyph00887
glyphs.append(0x0200) #glyph00512
glyphs.append(0x0201) #glyph00513
glyphs.append(0x01FE) #glyph00510
glyphs.append(0x01FF) #glyph00511
glyphs.append(0x0204) #glyph00516
glyphs.append(0x0205) #glyph00517
glyphs.append(0x0202) #glyph00514
glyphs.append(0x0203) #glyph00515
glyphs.append(0x000C) #uni0908
glyphs.append(0x000D) #uni0909
glyphs.append(0x0008) #uni0904
glyphs.append(0x0009) #uni0905
glyphs.append(0x000A) #uni0906
glyphs.append(0x000B) #uni0907
glyphs.append(0x0004) #uni0900
glyphs.append(0x0005) #uni0901
glyphs.append(0x0006) #uni0902
glyphs.append(0x0007) #uni0903
glyphs.append(0x01A3) #glyph00419
glyphs.append(0x0244) #glyph00580
glyphs.append(0x0011) #uni090D
glyphs.append(0x0012) #uni090E
glyphs.append(0x0013) #uni090F
glyphs.append(0x000E) #uni090A
glyphs.append(0x000F) #uni090B
glyphs.append(0x0010) #uni090C
glyphs.append(0x02A5) #glyph00677
glyphs.append(0x02A4) #glyph00676
glyphs.append(0x02A3) #glyph00675
glyphs.append(0x02A2) #glyph00674
glyphs.append(0x02A1) #glyph00673
glyphs.append(0x02A0) #glyph00672
glyphs.append(0x029F) #glyph00671
glyphs.append(0x029E) #glyph00670
glyphs.append(0x02FE) #asciicircum
glyphs.append(0x01A2) #glyph00418
glyphs.append(0x02A6) #glyph00678
glyphs.append(0x01D7) #glyph00471
glyphs.append(0x01D6) #glyph00470
glyphs.append(0x01D9) #glyph00473
glyphs.append(0x01D8) #glyph00472
glyphs.append(0x01DB) #glyph00475
glyphs.append(0x01DA) #glyph00474
glyphs.append(0x01DD) #glyph00477
glyphs.append(0x01DC) #glyph00476
glyphs.append(0x01DF) #glyph00479
glyphs.append(0x01DE) #glyph00478
glyphs.append(0x02F4) #nine
glyphs.append(0x02FC) #backslash
glyphs.append(0x017E) #glyph00382
glyphs.append(0x025A) #glyph00602
glyphs.append(0x025B) #glyph00603
glyphs.append(0x0258) #glyph00600
glyphs.append(0x0259) #glyph00601
glyphs.append(0x025E) #glyph00606
glyphs.append(0x025F) #glyph00607
glyphs.append(0x025C) #glyph00604
glyphs.append(0x025D) #glyph00605
glyphs.append(0x0260) #glyph00608
glyphs.append(0x0261) #glyph00609
glyphs.append(0x030E) #minus
glyphs.append(0x0225) #glyph00549
glyphs.append(0x021A) #glyph00538
glyphs.append(0x021B) #glyph00539
glyphs.append(0x0145) #glyph00325
glyphs.append(0x0144) #glyph00324
glyphs.append(0x0147) #glyph00327
glyphs.append(0x0146) #glyph00326
glyphs.append(0x0141) #glyph00321
glyphs.append(0x0140) #glyph00320
glyphs.append(0x0143) #glyph00323
glyphs.append(0x0142) #glyph00322
glyphs.append(0x0149) #glyph00329
gl | yphs.append(0x0148) #glyph00328
glyphs.append(0x0224) #glyph00548
glyphs.append(0x02BB) #glyph00699
glyphs.append(0x02BA) #glyph00698
glyphs.append(0x027E) #glyph00638
glyphs.append(0x031F) #uniA8E3
glyphs.append(0x02B3) # | glyph00691
glyphs.append(0x02B2) #glyph00690
glyphs.append(0 |
tethysplatform/tethys_dataset_services | tethys_dataset_services/tests/e2e_tests/ckan_engine_e2e_tests.py | Python | bsd-2-clause | 10,844 | 0.001752 | import os
import random
import string
import unittest
import requests
from tethys_dataset_services.engines import CkanDatasetEngine
try:
from tethys_dataset_services.tests.test_config import TEST_CKAN_DATASET_SERVICE
except ImportError:
print('ERROR: To perform tests, you must create a file in the "tests" package called "test_config.py". In this file'
'provide a dictionary called "TEST_CKAN_DATASET_SERVICE" with keys "API_ENDPOINT" and "APIKEY".')
exit(1)
def random_string_generator(size):
chars = string.ascii_lowercase + string.digits
return ''.join(random.choice(chars) for _ in range(size))
class TestCkanDatasetEngine(unittest.TestCase):
def setUp(self):
# Auth
self.endpoint = TEST_CKAN_DATASET_SERVICE['ENDPOINT']
self.apikey = TEST_CKAN_DATASET_SERVICE['APIKEY']
self.username = TEST_CKAN_DATASET_SERVICE['USERNAME']
# Files
self.tests_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
self.files_root = os.path.join(self.tests_root, 'files')
self.support_root = os.path.join(self.tests_root, 'support')
# Create Test Engine
self.engine = CkanDatasetEngine(endpoint=self.endpoint,
apikey=self.apikey)
# Create Test Organization
self.test_org = random_string_generator(10)
data_dict = {
'name': self.test_org,
'users': [{'name': self.username}]
}
url, data, headers = self.engine._prepare_request(
'organization_create', data_dict=data_dict, apikey=self.apikey
)
status_code, response_text = self.engine._execute_request(url, data, headers)
if status_code != 200:
raise requests.RequestException('Unable to create group: {}'.format(response_text))
# Create Test Dataset
self.test_dataset_name = random_string_generator(10)
dataset_result = self.engine.create_dataset(name=self.test_dataset_name, version='1.0', owner_org=self.test_org)
if not dataset_result['success']:
raise requests.RequestException('Unable to create test dataset: {}'.format(dataset_result['error']))
self.test_dataset = dataset_result['result']
# Create Test Resource
self.test_resource_name = random_string_generator(10)
self.test_resource_url = 'http://home.byu.edu'
resource_result = self.engine.create_resource(self.test_dataset_name,
url=self.test_resource_url, format='zip')
if not resource_result['success']:
raise requests.RequestException('Unable to create test resource: {}'.format(resource_result['error']))
self.test_resource = resource_result['result']
def tearDown(self):
pass
# Delete test resource and dataset
self.engine.delete_dataset(dataset_id=self.test_dataset_name)
def test_create_dataset(self):
# Setup
new_dataset_name = random_string_generator(10)
# Execute
result = self.engine.create_dataset(name=new_dataset_name, owner_org=self.test_org)
# Verify Success
self.assertTrue(result['success'])
# Should return the new one
self.assertEqual(new_dataset_name, result['result']['name'])
# TEST search_datasets
result = self.engine.search_datasets(query={'name': new_dataset_name}, console=False)
# Verify Success
self.assertTrue(result['success'])
# Check search results
search_results = result['result']['results']
self.assertIn(new_dataset_name, search_results[0]['name'])
self.assertIn(self.test_org, search_results[0]['organization']['name'])
# TEST list_datasets
# Execute
result = self.engine.list_datasets()
# Verify Success
self.assertTrue(result['success'])
self.assertIn(new_dataset_name, result['result'])
# Delete
result = self.engine.delete_dataset(dataset_id=new_dataset_name)
# Check if success
self.assertTrue(result['success'])
def test_create_resource_file(self):
# Prepare
file_name = 'upload_test.txt'
save_name = random_string_generator(10)
file_to_upload = os.path.join(self.support_root, file_name)
# Execute
result = self.engine.create_resource(dataset_id=self.test_dataset_name,
name=save_name,
file=file_to_upload)
# Verify Success
self.assertTrue(result['success'])
# Verify name and url_type (which should be upload if file upload)
self.assertIn(save_name, result['result']['name'])
self.assertEqual(result['result']['url_type'], 'upload')
# TEST search resource
# Execute
result = self.engine.search_resources(query={'name': save_name})
# Verify Success
self.assertTrue(result['success'])
self.assertIn(save_name, result['result']['results'][-1]['name'])
# Delete
result = self.engine.delete_resource(resource_id=result['result']['results'][-1]['id'])
self.assertTrue(result['success'])
def test_create_resource_url(self):
# Prepare
new_resource_name = random_string_generator(10)
new_resource_url = 'http://home.byu.edu/'
# Execute
result = self.engine.create_resource(dataset_id=self.test_dataset_name,
url=new_resource_url,
name=new_resource_name)
# Verify Success
self.assertTrue(result['success'])
# Verify name and url_type (which should be upload if file upload)
self.assertIn(new_resource_name, result['result']['name'])
self.assertEqual(result['result']['url'], new_resource_url)
# TEST search resource
# Execute
result = self.engine.search_resources(query={'name': new_resource_name})
# Verify Success
self.assertTrue(result['success'])
self.assertIn(new_resource_name, result['result']['results'][-1]['name'])
self.assertIn(new_resource_url, result['result']['results'][-1]['url'])
# Delete
result = self.engine.delete_resource(resource_id=result['result']['results'][-1]['id'])
self.assertTrue(result['success'])
def test_update_dataset(self):
# Setup
notes = random_string_generator(10)
author = random_string_generator(5)
# Execute
result = self.engine.update_dataset(dataset_id=self.test_dataset_name,
author=author, notes=notes)
# Verify Success
self.assertTrue(result['success'])
# Verify new property
self.assertEqual(result['result']['author'], author)
self.assertEqual(result['result']['notes'], notes)
# TEST get_dataset
# Execute
result = self.engine.get_dataset(dataset_id=self.test_dataset_name)
# Verify Success
self.assertTrue(result['success'])
# Verify Name
self.assertEqual(result['result']['name'], self.test_dataset_name)
self.assertEqual(result['result']['author'], author)
self.assertEqual(result['result']['notes'], notes)
# TEST download_dataset
location = self.files_root
result = self.engine.download_dataset(self.test_dataset_name,
location=location)
# Result will return list of the file with .zip at the end. Check here
self.assertIn('.zip', result[0][-4:].lower())
download_file = os.path.basename(result[0])
location_final = os.path.join(self.files_root, download_file)
# Delete the file
if os.path.isfile(location_final):
os.remove(location_final)
else:
raise AssertionError('No file has been downloaded')
# TEST de | lete_dataset
# Execute
result = self | .engine.delete_dataset(data |
cheesyc/2014KStateProgrammingCompetitionAnswers | 2011/1A.py | Python | mit | 386 | 0.007772 | num = input("What is the numerator")
dem = input("What is the denominator")
counta = 2
countb = 2
def math (num,dem):
| remainsa = 1
remainsb = 1
remains = remainsa - remainsb
while r | emains > 0:
a = num / counta
b = dem / countb
remainsa = num % counta
remainsb = num % countb
remains = remainsa - remainsb
if remains =
|
erkanay/django | tests/migrations/test_operations.py | Python | bsd-3-clause | 70,029 | 0.002956 | from __future__ import unicode_literals
import unittest
try:
import sqlparse
except ImportError:
sqlparse = None
from django import test
from django.test import override_settings
from django.db import connection, migrations, models, router
from django.db.migrations.migration import Migration
from django.db.migrations.state import ProjectState
from django.db.models.fields import NOT_PROVIDED
from django.db.transaction import atomic
from django.db.utils import IntegrityError, DatabaseError
from .test_base import MigrationTestBase
class OperationTestBase(MigrationTestBase):
"""
Common functions to help test operations.
"""
def apply_operations(self, app_label, project_state, operations):
migration = Migration('name', app_label)
migration.operations = operations
with connection.schema | _editor() as editor:
return migration.apply(project_state, editor)
def unapply_operations(self, app_label, project_state, operations):
migration = Migration('name', app_label)
migration.operations = operations
with connection.schema_editor() as editor:
return migrati | on.unapply(project_state, editor)
def make_test_state(self, app_label, operation, **kwargs):
"""
Makes a test state using set_up_test_model and returns the
original state and the state after the migration is applied.
"""
project_state = self.set_up_test_model(app_label, **kwargs)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
return project_state, new_state
def set_up_test_model(self, app_label, second_model=False, third_model=False, related_model=False, mti_model=False, proxy_model=False, unique_together=False, options=False):
"""
Creates a test model state and database table.
"""
# Delete the tables if they already exist
with connection.cursor() as cursor:
# Start with ManyToMany tables
try:
cursor.execute("DROP TABLE %s_pony_stables" % app_label)
except DatabaseError:
pass
try:
cursor.execute("DROP TABLE %s_pony_vans" % app_label)
except DatabaseError:
pass
# Then standard model tables
try:
cursor.execute("DROP TABLE %s_pony" % app_label)
except DatabaseError:
pass
try:
cursor.execute("DROP TABLE %s_stable" % app_label)
except DatabaseError:
pass
try:
cursor.execute("DROP TABLE %s_van" % app_label)
except DatabaseError:
pass
# Make the "current" state
model_options = {
"swappable": "TEST_SWAP_MODEL",
"unique_together": [["pink", "weight"]] if unique_together else [],
}
if options:
model_options["permissions"] = [("can_groom", "Can groom")]
operations = [migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=3)),
("weight", models.FloatField()),
],
options=model_options,
)]
if second_model:
operations.append(migrations.CreateModel(
"Stable",
[
("id", models.AutoField(primary_key=True)),
]
))
if third_model:
operations.append(migrations.CreateModel(
"Van",
[
("id", models.AutoField(primary_key=True)),
]
))
if related_model:
operations.append(migrations.CreateModel(
"Rider",
[
("id", models.AutoField(primary_key=True)),
("pony", models.ForeignKey("Pony")),
("friend", models.ForeignKey("self"))
],
))
if mti_model:
operations.append(migrations.CreateModel(
"ShetlandPony",
fields=[
('pony_ptr', models.OneToOneField(
auto_created=True,
primary_key=True,
to_field='id',
serialize=False,
to='Pony',
)),
("cuteness", models.IntegerField(default=1)),
],
bases=['%s.Pony' % app_label],
))
if proxy_model:
operations.append(migrations.CreateModel(
"ProxyPony",
fields=[],
options={"proxy": True},
bases=['%s.Pony' % app_label],
))
return self.apply_operations(app_label, ProjectState(), operations)
class OperationTests(OperationTestBase):
"""
Tests running the operations and making sure they do what they say they do.
Each test looks at their state changing, and then their database operation -
both forwards and backwards.
"""
def test_create_model(self):
"""
Tests the CreateModel operation.
Most other tests use this operation as part of setup, so check failures here first.
"""
operation = migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=1)),
],
)
self.assertEqual(operation.describe(), "Create model Pony")
# Test the state alteration
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards("test_crmo", new_state)
self.assertEqual(new_state.models["test_crmo", "pony"].name, "Pony")
self.assertEqual(len(new_state.models["test_crmo", "pony"].fields), 2)
# Test the database alteration
self.assertTableNotExists("test_crmo_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crmo", editor, project_state, new_state)
self.assertTableExists("test_crmo_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_crmo", editor, new_state, project_state)
self.assertTableNotExists("test_crmo_pony")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "CreateModel")
self.assertEqual(len(definition[1]), 2)
self.assertEqual(len(definition[2]), 0)
self.assertEqual(definition[1][0], "Pony")
def test_create_model_with_unique_after(self):
"""
Tests the CreateModel operation directly followed by an
AlterUniqueTogether (bug #22844 - sqlite remake issues)
"""
operation1 = migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=1)),
],
)
operation2 = migrations.CreateModel(
"Rider",
[
("id", models.AutoField(primary_key=True)),
("number", models.IntegerField(default=1)),
("pony", models.ForeignKey("test_crmoua.Pony")),
],
)
operation3 = migrations.AlterUniqueTogether(
"Rider",
[
("number", "pony"),
],
)
# Test the database alteration
project_state = ProjectState()
self.assertTableNotExists("test_crmoua_pony")
self.assertTableNotExists("test_crmoua_rider")
with connection.schema_editor() as editor:
new_state = project_state.clone()
operation1.state_forwards("test_crmoua", new_state)
operation1.database_forwards("test_crmoua", editor, project_state, new_state)
project_state, new_state |
rogeriofalcone/treeio | events/hmodule.py | Python | mit | 444 | 0.002252 | # encoding: utf-8
# Copyright 2011 Tree.io Limited
| # This file is part of Treeio.
# License www.tree.io/license
"""
Events: Hardtree module definition
"""
PROPERTIES = {
'title': 'Calendar',
'details': 'Manage events and calendars',
'url': '/calendar/',
'system': True,
'type': 'minor',
}
URL_PATTERNS = [
'^/ | calendar/',
]
|
dryan/django-session-activity | session_activity/urls.py | Python | mit | 397 | 0 | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from django.conf.urls import patterns, url
from .views import session_list_view, sign_out_other_view
urlpatterns = [
url(r"^$", session_li | st_view, name="session_activity_list"), |
url(r"^sign-out-other/", sign_out_other_view, name="session_sign_out"),
]
|
jacobmarks/QTop | src/tests/test_DSP.py | Python | gpl-3.0 | 976 | 0.033811 | #
# QTop
#
# Copyright (c) 2016 Jacob Marks (jacob.marks@yale.edu)
#
# This file is part of QTop.
#
# QTop is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public | License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
import sys
sys.path.append('../')
from src import color_codes, error_models, visualization
sys.path.append('decoders/')
from dsp import *
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
################## Testing ######## | ##########
L, d, p = 13, 2, 0.08
code = color_codes.Color_6_6_6(L,d)
model = error_models.PhaseError()
code = code.CodeCycle(model, p)
visualization.PlotPlaquette(code, "Before Decoding", 1)
decoder = DSP_decoder()
code = decoder(code)
if code.hasLogicalError():
print "ERROR"
else:
print "GOOD JOB!"
visualization.PlotPlaquette(code, "After Decoding", 3)
plt.show()
|
Nocturnal42/runmyrobot | test/gpio_test.py | Python | apache-2.0 | 566 | 0.014134 | import time
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
#GPIO.setup(4, GPIO.IN, pull_up_down=GPI | O.PUD_DOWN)
#GPIO.setup(25, GPIO.OUT, initial=GPIO.LOW)
#GPIO.add_event_detect(4, GPIO.BOTH)
#def my_callback():
# GPIO.output(25, GPIO.input(4))
#GPIO.add_event_callback(4, my_callback)
def call( | x):
print "something happened:", x
#for i in range(0, 26):
for i in range(17, 18):
# print i
GPIO.setup(i, GPIO.IN)
# #GPIO.add_event_detect(i, GPIO.BOTH)
# GPIO.add_event_callback(i, call)
while True:
print GPIO.input(17)
time.sleep(1)
|
freyes/juju | acceptancetests/jujucharm.py | Python | agpl-3.0 | 6,215 | 0 | """Helpers to create and manage local juju charms."""
from contextlib import contextmanager
import logging
import os
import re
import subprocess
import pexpect
import yaml
from utility import (
ensure_deleted,
JujuAssertionError,
)
__metaclass__ = type
log = logging.getLogger("jujucharm")
class Charm:
"""Representation of a juju charm."""
DEFAULT_MAINTAINER = "juju-qa@lists.canonical.com"
DEFAULT_SERIES = ("bionic", "xenial", "trusty")
DEFAULT_DESCRIPTION = "description"
NAME_REGEX = re.compile('^[a-z][a-z0-9]*(-[a-z0-9]*[a-z][a-z0-9]*)*$')
def __init__(self, name, summary, maintainer=None, series=None,
description=None, storage=None, ensure_valid_name=True):
if ensure_valid_name and Charm.NAME_REGEX.match(name) is None:
raise JujuAssertionError(
'Invalid Juju Charm Name, "{}" does not match "{}".'.format(
name, Charm.NAME_REGEX.pattern))
self.metadata = {
"name": name,
"summary": summary,
"maintainer": maintainer or self.DEFAULT_MAINTAINER,
"series": series or self.DEFAULT_SERIES,
"description": description or self.DEFAULT_DESCRIPTION
}
| if storage is not None:
self.metadata["storage"] = storage
self._hook_scripts = {}
def to_dir(self, directory):
"""Serialize charm into a new directory."""
with open(os.path.join(directory, "metadata.yaml"), "w") as f:
| yaml.safe_dump(self.metadata, f, default_flow_style=False)
if self._hook_scripts:
hookdir = os.path.join(directory, "hooks")
os.mkdir(hookdir)
for hookname in self._hook_scripts:
with open(os.path.join(hookdir, hookname), "w") as f:
os.fchmod(f.fileno(), 0o755)
f.write(self._hook_scripts[hookname])
def to_repo_dir(self, repo_dir):
"""Serialize charm into a directory for a repository of charms."""
charm_dir = os.path.join(
repo_dir, self.default_series, self.metadata["name"])
os.makedirs(charm_dir)
self.to_dir(charm_dir)
return charm_dir
@property
def default_series(self):
series = self.metadata.get("series", self.DEFAULT_SERIES)
if series and isinstance(series, (tuple, list)):
return series[0]
return series
def add_hook_script(self, name, script):
self._hook_scripts[name] = script
def local_charm_path(charm, juju_ver, series=None, repository=None,
platform='ubuntu'):
"""Create either Juju 1.x or 2.x local charm path."""
if juju_ver.startswith('1.'):
if series:
series = '{}/'.format(series)
else:
series = ''
local_path = 'local:{}{}'.format(series, charm)
return local_path
else:
charm_dir = {
'ubuntu': 'charms',
'win': 'charms-win',
'centos': 'charms-centos'}
abs_path = charm
if repository:
abs_path = os.path.join(repository, charm)
elif os.environ.get('JUJU_REPOSITORY'):
repository = os.path.join(
os.environ['JUJU_REPOSITORY'], charm_dir[platform])
abs_path = os.path.join(repository, charm)
return abs_path
class CharmCommand:
default_api_url = 'https://api.jujucharms.com/charmstore'
def __init__(self, charm_bin, api_url=None):
"""Simple charm command wrapper."""
self.charm_bin = charm_bin
self.api_url = sane_charm_store_api_url(api_url)
def _get_env(self):
return {'JUJU_CHARMSTORE': self.api_url}
@contextmanager
def logged_in_user(self, user_email, password):
"""Contextmanager that logs in and ensures user logs out."""
try:
self.login(user_email, password)
yield
finally:
try:
self.logout()
except Exception as e:
log.error('Failed to logout: {}'.format(str(e)))
default_juju_data = os.path.join(
os.environ['HOME'], '.local', 'share', 'juju')
juju_data = os.environ.get('JUJU_DATA', default_juju_data)
token_file = os.path.join(juju_data, 'store-usso-token')
cookie_file = os.path.join(os.environ['HOME'], '.go-cookies')
log.debug('Removing {} and {}'.format(token_file, cookie_file))
ensure_deleted(token_file)
ensure_deleted(cookie_file)
def login(self, user_email, password):
log.debug('Logging {} in.'.format(user_email))
try:
command = pexpect.spawn(
self.charm_bin, ['login'], env=self._get_env())
command.expect(r'(?i)Login to Ubuntu SSO')
command.expect(r'(?i)Press return to select.*\.')
command.expect(r'(?i)E-Mail:')
command.sendline(user_email)
command.expect(r'(?i)Password')
command.sendline(password)
command.expect(r'(?i)Two-factor auth')
command.sendline()
command.expect(pexpect.EOF)
if command.isalive():
raise AssertionError(
'Failed to log user in to {}'.format(
self.api_url))
except (pexpect.TIMEOUT, pexpect.EOF) as e:
raise AssertionError(
'Failed to log user in: {}'.format(e))
def logout(self):
log.debug('Logging out.')
self.run('logout')
def run(self, sub_command, *arguments):
try:
output = subprocess.check_output(
[self.charm_bin, sub_command] + list(arguments),
env=self._get_env(),
stderr=subprocess.STDOUT)
return output
except subprocess.CalledProcessError as e:
log.error(e.output)
raise
def sane_charm_store_api_url(url):
"""Ensure the store url includes the right parts."""
if url is None:
return CharmCommand.default_api_url
return '{}/charmstore'.format(url)
|
amurzeau/streamlink-debian | src/streamlink/plugins/app17.py | Python | bsd-2-clause | 1,952 | 0.001025 | import logging
import re
from streamlink.plugin import Plugin, pluginmatcher
from streamlink.plugin.api import validate
from streamlink.stream.hls import HLSStream
from streamlink.stream.http import HTTPStream
log = logging.getLogger(__name__)
@pluginmatcher(re.compile(
r"https?://17\.live/.+/live/(?P<channel>[^/&?]+)"
))
class App17(Plugin):
def _get_streams(self):
channel = self.match.group("channel")
self.session.http.headers.update({"Referer": self.url})
data = self.session.http.post(
f"https://wap-api.17app.co/api/v1/lives/{channel}/viewers/alive",
data={"liveStreamID": channel},
schema=validate.Schema(
validate.parse_json(),
validate.any(
{"rtmpUrls": [{
validate.optional("provider"): validate.any(int, None),
"url": validate.url(path=validate.endswith(".flv")),
}]},
{"errorCode": int, "errorMessage": str},
),
),
acceptable_status=(200, 403, 404, 420))
log.trace(f"{data!r}")
if data.get("errorCode"):
log.error(f"{data['errorCode']} - {data['errorMessage'].replace('Something wrong: ', '')}")
return
flv_url = data["rtmpUrls"][0]["url"]
yield "live", HTTPStream(self.session, flv_url)
if "wansu-" in flv_url:
hls_url = flv_url.replace(".flv", "/playlist.m3u8")
else:
hls_url = flv_url.replace( | "live-hdl", "live-hls").replace(".flv", ".m3u8")
s = HLSStream.parse_variant_playlist(self.session, hls_url)
if not s:
yield "live", HLSStream(self.session, hls_url)
else:
if | len(s) == 1:
for _n, _s in s.items():
yield "live", _s
else:
yield from s.items()
__plugin__ = App17
|
codecakes/cloudroutes-service | src/web/reactions.py | Python | agpl-3.0 | 5,105 | 0 | ######################################################################
# Cloud Routes Web Application
# -------------------------------------------------------------------
# Reaction Class
######################################################################
import rethinkdb as r
class Reaction(object):
def __init__(self, rid=None):
''' Create a reaction object and set attributes as None for now '''
self.rid = rid
self.name = None
self.rtype = None
self.uid = None
self.trigger = None
self.lastrun = None
self.frequency = None
self.data = {}
def createReaction(self, rdb):
''' This will create a reaction with the supplied information '''
reactdata = {
'name': self.name,
'rtype': self.rtype,
'uid': self.uid,
'trigger': self.trigger,
'frequency': self.frequency,
'lastrun': 0,
'data': self.data}
if self.exists(reactdata['name'], reactdata['uid'], rdb):
return 'exists'
else:
results = r.table('reactions').insert(reactdata).run(rdb)
if results['inserted'] == 1:
qdata = {}
qdata['item'] = reactdata
qdata['action'] = 'create'
qdata['type'] = 'reaction'
qdata['item']['rid'] = results['generated_keys'][0]
q1 = r.table('dc1queue').insert(qdata).run(rdb)
q2 = r.table('dc2queue').insert(qdata).run(rdb)
return results['generated_keys'][0]
else:
return False
def editReaction(self, rdb):
''' This will edit a reaction with the supplied information '''
reactdata = {
'name': self.name,
'rtype': self.rtype,
'uid': self.uid,
'trigger': self.trigger,
'frequency': self.frequency,
'lastrun': self.lastrun,
'data': self.data}
results = r.table('reactions').get(self.rid).update(reactdata).run(rdb)
if results['replaced'] == 1:
qdata = {}
qdata['item'] = reactdata
qdata['action'] = 'edit'
qdata['type'] = 'reaction'
qdata['item']['rid'] = self.rid
q1 = r.table('dc1queue').insert(qdata).run(rdb)
q2 = r.table('dc2queue').insert(qdata).run(rdb)
return "edit true"
| else:
return "edit failed"
def deleteReaction(self, uid, rid, rdb):
''' This will delete a specified reaction '''
reaction = r.table('reactions').get(rid).run(rdb)
if reaction['uid'] == uid:
delete = r.table('reactions').get(rid).delete().run(rdb)
if delete['deleted'] == 1:
qdata = {}
qdat | a['item'] = reaction
qdata['action'] = 'delete'
qdata['type'] = 'reaction'
qdata['item']['rid'] = rid
q1 = r.table('dc1queue').insert(qdata).run(rdb)
q2 = r.table('dc2queue').insert(qdata).run(rdb)
return True
else:
return False
else:
return False
def exists(self, name, uid, rdb):
'''
This will check to see if the
specified reactions already exists or not
'''
result = r.table('reactions').filter(
{'name': name, 'uid': uid}).count().run(rdb)
if result >= 1:
return True
else:
return False
def getRID(self, searchstring, rdb):
'''
This will lookup a reaction by name and uid (name:uid)
and return the rid
'''
strings = searchstring.split(":")
result = r.table('reactions').filter(
{'name': strings[0], 'uid': strings[1]}).run(rdb)
xdata = {}
for x in result:
key = '{0}:{1}'.format(x['name'], x['uid'])
xdata[key] = x['id']
return xdata[searchstring]
def get(self, method, lookup, rdb):
'''
This will return a reactions
information based on the data provided
'''
if method == 'rid':
rid = lookup
else:
rid = self.getRID(lookup, rdb)
results = r.table('reactions').get(rid).run(rdb)
if results:
self.rid = rid
self.name = results['name']
self.rtype = results['rtype']
self.uid = results['uid']
self.trigger = results['trigger']
self.frequency = results['frequency']
self.lastrun = results['lastrun']
self.data = results['data']
return self
else:
return False
def count(self, uid, rdb):
''' This will return the numerical count of reactions by user id '''
result = r.table('reactions').filter({'uid': uid}).count().run(rdb)
return result
if __name__ == '__main__': # pragma: no cover
pass # pragma: no cover
|
BoxeePlay/svtplay3 | tv.boxeeplay.svtplay3/async_task.py | Python | mit | 995 | 0.003015 | import threading
from logger import BPLog, Level
class AsyncTask(threading.Thread):
def __init__(self, target, args=(), kwargs={}):
threading.Thread.__init__(self)
self.target = target
self.args = args
self.kwargs = kwargs
self.setDaemon(True)
self.returned = None
self.exc_info = None
def run(self):
try:
self.returned = self.target(*self.args, **self.kwargs)
except Exception:
import sys
self.exc_info= sys.exc_info()
def get_result(self):
if sel | f.isAlive():
raise RuntimeError("Not finished yet")
if self.exc_info:
raise self.exc_info[1], None, self.exc_info[2]
return self.returned
class AsyncError(Exception):
def __init__(self, message, e):
Exception.__init__(self, message)
self.log(message, e)
def log(self, message, e):
BPLog("AsyncTask: %s! %s" % (message, str(e)), Level.ERROR) | |
Pekkoo/OhSiHAPP | ohsiha/urls.py | Python | mit | 568 | 0.001761 | from django.conf.urls import url, include
from django.contrib impor | t admin
import ohsihapp.views
import django.contrib.auth.views
import comment.urls
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^login/$', django.contrib.auth.views.login, {'template_name': 'ohsihapp/login.html'}),
url(r'^register/$', ohsihapp.views.register),
url(r'^soundcloud/$', ohsihapp.views.soundcloud),
url(r'^logou | t/$', django.contrib.auth.views.logout, {'next_page': '/'}),
url(r'^$', ohsihapp.views.index),
url(r'^comment/', include(comment.urls))
]
|
tomjmul/wemo | wemo/examples/Randomize.py | Python | bsd-3-clause | 1,078 | 0.008349 |
import random
import datetime
import time
import wemo
from wemo.environment import Environment
# http://pydoc.net/Python/wemo/0.7.3/wemo.examples.watch/
if __name__ == "__main__":
print("")
print("WeMo Randomizer")
print("---------------")
env = Environment()
# TODO: run from 10am to 10pm
try:
env.star | t()
env.discover(100)
print(env.list_switches())
print(env.list_motions())
print("---------------")
while True:
# http://stackoverflow.com/questions/306400/how-do-i-randomly-select-an-item-from-a-list-using-python
switchRND = env.get_switch( random.choice( env.list_switches() ) )
print(switchRND)
switchRND.toggle()
env.wait(90)
except (KeyboardInterrupt, SystemExit):
| print("---------------")
print("Goodbye!")
print("---------------")
# Turn off all switches
for switch in ( env.list_switches() ):
print("Turning Off: " + switch)
env.get_switch( switch ).off()
|
TrevorLowing/PyGames | pysollib/tk/selectcardset.py | Python | gpl-2.0 | 15,583 | 0.003915 | #!/usr/bin/env python
# -*- mode: python; coding: utf-8; -*-
##---------------------------------------------------------------------------##
##
## Copyright (C) 1998-2003 Markus Franz Xaver Johannes Oberhumer
## Copyright (C) 2003 Mt. Hood Playing Card Co.
## Copyright (C) 2005-2009 Skomoroh
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
##---------------------------------------------------------------------------##
__all__ = ['SelectCardsetDialogWithPreview']
# imports
import os
import Tkinter
# PySol imports
from pysollib.mfxutil import KwStruct
from pysollib.util import CARDSET
from pysollib.resource import CSI
# Toolkit imports
from tkutil import loadImage
from tkwidget import MfxDialog, MfxScrolledCanvas
from tkcanvas import MfxCanvasImage
from selecttree import SelectDialogTreeLeaf, SelectDialogTreeNode
from selecttree import SelectDialogTreeData, SelectDialogTreeCanvas
# ************************************************************************
# * Nodes
# ************************************************************************
class SelectCardsetLeaf(SelectDialogTreeLeaf):
pass
class SelectCardsetNode(SelectDialogTreeNode):
def _getContents(self):
contents = []
for obj in self.tree.data.all_objects:
if self.select_func(obj):
node = SelectCardsetLeaf(self.tree, self, text=obj.name, key=obj.index)
contents.append(node)
return contents or self.tree.data.no_contents
# ************************************************************************
# * Tree database
# ************************************************************************
class SelectCardsetData(SelectDialogTreeData):
def __init__(self, manager, key):
SelectDialogTreeData.__init__(self)
self.all_objects = manager.getAllSortedByName()
self.all_objects = [obj for obj in self.all_objects if not obj.error]
self.no_contents = [ SelectCardsetLeaf(None, None, _("(no cardsets)"), key=None), ]
#
select_by_type = None
items = CSI.TYPE.items()
items.sort(lambda a, b: cmp(a[1], b[1]))
nodes = []
for key, name in items:
if manager.registered_types.get(key):
nodes.append(SelectCardsetNode(None, name, lambda cs, key=key: key == cs.si.type))
if nodes:
select_by_type = SelectCardsetNode(None, _("by Type"), tuple(nodes), expanded=1)
#
select_by_style = No | ne
| items = CSI.STYLE.items()
items.sort(lambda a, b: cmp(a[1], b[1]))
nodes = []
for key, name in items:
if manager.registered_styles.get(key):
nodes.append(SelectCardsetNode(None, name, lambda cs, key=key: key in cs.si.styles))
if nodes:
nodes.append(SelectCardsetNode(None, _("Uncategorized"), lambda cs: not cs.si.styles))
select_by_style = SelectCardsetNode(None, _("by Style"), tuple(nodes))
#
select_by_nationality = None
items = CSI.NATIONALITY.items()
items.sort(lambda a, b: cmp(a[1], b[1]))
nodes = []
for key, name in items:
if manager.registered_nationalities.get(key):
nodes.append(SelectCardsetNode(None, name, lambda cs, key=key: key in cs.si.nationalities))
if nodes:
nodes.append(SelectCardsetNode(None, _("Uncategorized"), lambda cs: not cs.si.nationalities))
select_by_nationality = SelectCardsetNode(None, _("by Nationality"), tuple(nodes))
#
select_by_date = None
items = CSI.DATE.items()
items.sort(lambda a, b: cmp(a[1], b[1]))
nodes = []
for key, name in items:
if manager.registered_dates.get(key):
nodes.append(SelectCardsetNode(None, name, lambda cs, key=key: key in cs.si.dates))
if nodes:
nodes.append(SelectCardsetNode(None, _("Uncategorized"), lambda cs: not cs.si.dates))
select_by_date = SelectCardsetNode(None, _("by Date"), tuple(nodes))
#
self.rootnodes = filter(None, (
SelectCardsetNode(None, _("All Cardsets"), lambda cs: 1, expanded=len(self.all_objects)<=12),
SelectCardsetNode(None, _("by Size"), (
SelectCardsetNode(None, _("Tiny cardsets"), lambda cs: cs.si.size == CSI.SIZE_TINY),
SelectCardsetNode(None, _("Small cardsets"), lambda cs: cs.si.size == CSI.SIZE_SMALL),
SelectCardsetNode(None, _("Medium cardsets"), lambda cs: cs.si.size == CSI.SIZE_MEDIUM),
SelectCardsetNode(None, _("Large cardsets"), lambda cs: cs.si.size == CSI.SIZE_LARGE),
SelectCardsetNode(None, _("XLarge cardsets"), lambda cs: cs.si.size == CSI.SIZE_XLARGE),
), expanded=1),
select_by_type,
select_by_style,
select_by_date,
select_by_nationality,
))
class SelectCardsetByTypeData(SelectDialogTreeData):
def __init__(self, manager, key):
SelectDialogTreeData.__init__(self)
self.all_objects = manager.getAllSortedByName()
self.no_contents = [ SelectCardsetLeaf(None, None, _("(no cardsets)"), key=None), ]
#
items = CSI.TYPE.items()
items.sort(lambda a, b: cmp(a[1], b[1]))
nodes = []
for key, name in items:
if manager.registered_types.get(key):
nodes.append(SelectCardsetNode(None, name, lambda cs, key=key: key == cs.si.type))
select_by_type = SelectCardsetNode(None, _("by Type"), tuple(nodes), expanded=1)
#
self.rootnodes = filter(None, (
select_by_type,
))
# ************************************************************************
# * Canvas that shows the tree
# ************************************************************************
class SelectCardsetTree(SelectDialogTreeCanvas):
data = None
class SelectCardsetByTypeTree(SelectDialogTreeCanvas):
data = None
# ************************************************************************
# * Dialog
# ************************************************************************
class SelectCardsetDialogWithPreview(MfxDialog):
Tree_Class = SelectCardsetTree
TreeDataHolder_Class = SelectCardsetTree
TreeData_Class = SelectCardsetData
def __init__(self, parent, title, app, manager, key=None, **kw):
kw = self.initKw(kw)
MfxDialog.__init__(self, parent, title, kw.resizable, kw.default)
top_frame, bottom_frame = self.createFrames(kw)
self.createBitmaps(top_frame, kw)
#
if key is None:
key = manager.getSelected()
self.manager = manager
self.key = key
#padx, pady = kw.padx, kw.pady
padx, pady = 5, 5
if self.TreeDataHolder_Class.data is None:
self.TreeDataHolder_Class.data = self.TreeData_Class(manager, key)
#
self.top.wm_minsize(400, 200)
if self.top.winfo_screenwidth() >= 800:
w1, w2 = 216, 400
else:
w1, w2 = 200, 300
paned_window = Tkinter.PanedWindow(top_frame)
paned_window.pack(expand=True, fill='both')
left_frame = Tkinter.Frame(paned_window)
right_frame = Tkinter.Frame(paned_window)
paned_window.add(left_frame)
paned_window.add(right_frame)
font = app.getFont("default")
self.tree = self.Tree_Class(self, left_frame, key=key,
default=kw.default,
|
SAP/lumira-extension-da-linkedin | source/LinkedinExtractor.py | Python | apache-2.0 | 6,132 | 0.012231 | # -*- coding: utf-8 -*-
#imports
from linkedin import linkedin
import easygui
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
import requests
def enum(*sequential, **named):
enums = dict(zip(sequential, range(len(sequential))), **named)
return type('Enum', (), enums)
Mode = enum('PREVIEW', 'EDIT', 'REFRESH')
mode = 0
paramslist = []
key = ''
i = 0
msg = "Enter Required Information"
title = "Linkedin Extractor"
fieldNames = ["Consumer Key","Consumer Secret",
"User Key","User Secret"]
fieldValues = [] # we start with blanks for the values
for i in range(4):
fieldValues.append(i)
for i in range(len(sys.argv)):
if str(sys.argv[i]).lower() == "-mode" and (i + 1) < len(sys.argv):
if str(sys.argv[i + 1]).lower() == "preview":
mode = Mode.PREVIEW
elif str(sys.argv[i + 1]).lower() == "edit":
mode = Mode.EDIT
elif str(sys.argv[i + 1]).lower() == "refresh":
mode = Mode.REFRESH
elif str(sys.argv[i]).lower() == "-size":
size = int(sys.argv[i + 1])
elif str(sys.argv[i]).lower() == "-params":
params = str(sys.argv[i + 1])
paramslist = params.split(';')
i += 1
def setArgs(fieldValues):
CONSUMER_KEY = ''
CONSUMER_SECRET = ''
USER_TOKEN = ''
USER_SECRET = ''
RETURN_URL = '' # Not required for developer authentication
fieldValues[0] = ''
fieldValues[1] = ''
fieldValues[2] = ''
fieldValues[3] = ''
return fieldValues
def parseArgs(fieldValues):
#if paramslist is None: break
for i in range(len(paramslist)):
if paramslist[i].split('=')[0].lower() == 'consumer_key':
try:
fieldValues[0] = paramslist[i].split('=')[1].decode('hex')
except:
fieldValues[0] = 'ENTER_CONSUMER_KEY'
elif paramslist[i].split('=')[0].lower() == 'consumer_secret':
try:
fieldValues[1] = paramslist[i].split('=')[1].decode('hex')
except:
fieldValues[1] = 'ENTER_CONSUMER_SECRET'
elif paramslist[i].split('=')[0].lower() == 'user_token':
try:
fieldValues[2] = paramslist[i].split('=')[1].decode('hex')
except:
fieldValues[2] = 'ENTER_USER_TOKEN'
elif paramslist[i].split('=')[0].lower() == 'user_secret':
try:
fieldValues[3] = paramslist[i].split('=')[1].decode('hex')
except:
fieldValues[3] = 'ENTER_USER_SECRET'
i += 1
return fieldValues
def getScreenInput(fieldValues):
fieldValues = easygui.multenterbox(msg = msg, title = title, fields = fieldNames, values = fieldValues )
# make sure that none of the fields was left blank
while 1:
if fieldValues == None: break
errmsg = ""
for i in range(len(fieldNames)):
if fieldValues[i].strip() == "":
errmsg += ('"%s" is a required field.\n\n' % fieldNames[i])
if errmsg == "":
break # no problems found
fieldValues = easygui.multenterbox(errmsg, title, fieldNames, fieldValues)
return fieldValues
def printData(fieldValues):
if fieldValues != None:
CONSUMER_KEY = fieldValues[0]
CONSUMER_SECRET = fieldValues[1]
USER_TOKEN = fieldValues[2]
USER_SECRET = fieldValues[3]
RETURN_URL = ''
print "beginDSInfo"
print """fileName;#;true
csv_first_row_has_column_names;true;true;
csv_separator;|;true
csv_number_grouping;,;true
csv_number_decimal;.;true
csv_date_format;d.M.yyyy;true"""
print ''.join(['consumer_key;', str(fieldValues[0]).encode('hex'), ';true'])
print ''.join(['consumer_secret;', str(fieldValues[1]).encode('hex'), ';true'])
print ''.join(['user_token;', str(fieldValues[2]).encode('hex'), ';true'])
print ''.join(['user_secret;', str(fieldValues[3]).encode('hex'), ';true'])
print "endDSInfo"
print "beginData"
print 'First_Name, Last_Name, Location'
#try:
# Instantiate the developer authentication class
auth = linkedin.LinkedInDeveloperAuthentication(CONSUMER_KEY, CONSUMER_SECRET,
USER_TOKEN, USER_SECRET,
RETURN_URL,
permissions=linkedin.PERMISSIONS.enums.values())
# Pass it in to the app...
app = linkedin.LinkedInApplication(auth)
try:
connections = app.get_connections()
except requests.ConnectionError:
easygui.msgbox('Connection Error, Extension Doesnt Support Proxies Yet')
#print connections
for c in connections['values']:
#if c.has_key('location')]
| try:
print ''.join([c['firstName'].replace(',', ''), ',']),
except:
print ''.join(['None', ', ']),
try:
print ''.join([c['lastName'].replace(',', ''), ',']),
except:
| print ''.join(['None', ', ']),
try:
print ''.join([c['location']['name'].replace(',', '')])
except:
print ''.join(['None'])
print "endData"
else:
print "beginDSInfo"
print "endDSInfo"
print "beginData"
print """Error
User Cancelled"""
print "endData"
if mode == Mode.PREVIEW:
fieldValues = setArgs(fieldValues)
#easygui.textbox(msg = 'preview1', text = sys.argv)
fieldValues = getScreenInput(fieldValues)
#easygui.textbox(msg = 'preview2', text = fieldValues)
printData(fieldValues)
elif mode == Mode.EDIT:
#easygui.textbox(msg = 'edit1', text = sys.argv)
fieldValues = parseArgs(fieldValues)
#easygui.textbox(msg = 'edit2', text = fieldValues)
fieldValues = getScreenInput(fieldValues)
#easygui.textbox(msg = 'edit2', text = fieldValues)
printData(fieldValues)
elif mode == Mode.REFRESH:
fieldValues = parseArgs(fieldValues)
#easygui.textbox(msg = 'refresh1', text = sys.argv)
printData(fieldValues)
|
emilybache/KataMedicineClash | Python/medicine.py | Python | mit | 228 | 0.017544 |
class Medicine(object):
def __init__(self, name):
self.name = name
self.prescriptions = []
def add_prescription(self, prescription):
self.prescriptions.append(prescription)
| ||
timonweb/djow | project_name/apps/custom_comments/models.py | Python | mit | 138 | 0.014493 | from django.db import | models
from django_comments.models import Comment
# Create your models here.
class CustomComment(Comment):
| pass |
noslenfa/tdjangorest | uw/lib/python2.7/site-packages/IPython/core/tests/test_run.py | Python | apache-2.0 | 13,433 | 0.001489 | # encoding: utf-8
"""Tests for code execution (%run and related), which is particularly tricky.
Because of how %run manages namespaces, and the fact that we are trying here to
verify subtle object deletion and reference counting issues, the %run tests
will be kept in this separate file. This makes it easier to aggregate in one
place the tricks needed to handle it; most other magics are much easier to test
and we do so in a common test_magic file.
"""
from __future__ import absolute_import
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import functools
import os
import random
import sys
import tempfile
import textwrap
import unittest
import nose.tools as nt
from nose import SkipTest
from IPython.testing import decorators as dec
from IPython.testing import tools as tt
from IPython.utils import py3compat
from IPython.utils.tempdir import TemporaryDirectory
from IPython.core import debugger
#-----------------------------------------------------------------------------
# Test functions begin
#-----------------------------------------------------------------------------
def doctest_refbug():
"""Very nasty problem with references held by multiple runs of a script.
See: https://github.com/ipython/ipython/issues/141
In [1]: _ip.clear_main_mod_cache()
# random
In [2]: %run refbug
In [3]: call_f()
lowercased: hello
In [4]: %run refbug
In [5]: call_f()
lowercased: hello
lowercased: hello
"""
def doctest_run_builtins():
r"""Check that %run doesn't damage __builtins__.
In [1]: import tempfile
In [2]: bid1 = id(__builtins__)
In [3]: fname = tempfile.mkstemp('.py')[1]
In [3]: f = open(fname,'w')
In [4]: dummy= f.write('pass\n')
In [5]: f.flush()
In [6]: t1 = type(__builtins__)
In [7]: %run $fname
In [7]: f.close()
In [8]: bid2 = id(__builtins__)
In [9]: t2 = type(__builtins__)
In [10]: t1 == t2
Out[10]: True
In [10]: bid1 == bid2
Out[10]: True
In [12]: try:
....: os.unlink(fname)
....: except:
....: pass
....:
"""
def doctest_run_option_parser():
r"""Test option parser in %run.
In [1]: %run print_argv.py
[]
In [2]: %run print_argv.py print*.py
['print_argv.py']
In [3]: %run -G print_argv.py print*.py
['print*.py']
"""
@dec.skip_win32
def doctest_run_option_parser_for_posix():
r"""Test option parser in %run (Linux/OSX specific).
You need double quote to escape glob in POSIX systems:
In [1]: %run print_argv.py print\\*.py
['print*.py']
You can't use quote to escape glob in POSIX systems:
In [2]: %run print_argv.py 'print*.py'
['print_argv.py']
"""
@dec.skip_if_not_win32
def doctest_run_option_parser_for_windows():
r"""Test option parser in %run (Windows specific).
In Windows, you can't escape ``*` `by backslash:
In [1]: %run print_argv.py print\\*.py
['print\\*.py']
You can use quote to escape glob:
In [2]: %run print_argv.py 'print*.py'
['print*.py']
"""
@py3compat.doctest_refactor_print
def doctest_reset_del():
"""Test that resetting doesn't cause errors in __del__ methods.
In [2]: class A(object):
...: def __del__(self):
...: print str("Hi")
...:
In [3]: a = A()
In [4]: get_ipython().reset()
Hi
In [5]: 1+1
Out[5]: 2
"""
# For some tests, it will be handy to organize them in a class with a common
# setup that makes a temp file
class TestMagicRunPass(tt.TempFileMixin):
def setup(self):
"""Make a valid python temp file."""
self.mktmp('pass\n')
def run_tmpfile(self):
_ip = get_ipython()
# This fails on Windows if self.tmpfile.name has spaces or "~" in it.
# See below and ticket https://bugs.launchpad.net/bugs/366353
_ip.magic('run %s' % self.fname)
def run_tmpfile_p(self):
_ip = get_ipython()
# This fails on Windows if self.tmpfile.name has spaces or "~" in it.
# See below and ticket https://bugs.launchpad.net/bugs/366353
_ip.magic('run -p %s' % self.fname)
def test_builtins_id(self):
"""Check that %run doesn't damage __builtins__ """
| _ip = get_ipython()
# Test that the id of __builtins__ is not modified by %run
bid1 = id(_ip.user_ns['__builtins__'])
self.run_tmpfile()
bid2 = id(_ip.user_ns['__builtins__'])
n | t.assert_equal(bid1, bid2)
def test_builtins_type(self):
"""Check that the type of __builtins__ doesn't change with %run.
However, the above could pass if __builtins__ was already modified to
be a dict (it should be a module) by a previous use of %run. So we
also check explicitly that it really is a module:
"""
_ip = get_ipython()
self.run_tmpfile()
nt.assert_equal(type(_ip.user_ns['__builtins__']),type(sys))
def test_prompts(self):
"""Test that prompts correctly generate after %run"""
self.run_tmpfile()
_ip = get_ipython()
p2 = _ip.prompt_manager.render('in2').strip()
nt.assert_equal(p2[:3], '...')
def test_run_profile( self ):
"""Test that the option -p, which invokes the profiler, do not
crash by invoking execfile"""
_ip = get_ipython()
self.run_tmpfile_p()
class TestMagicRunSimple(tt.TempFileMixin):
def test_simpledef(self):
"""Test that simple class definitions work."""
src = ("class foo: pass\n"
"def f(): return foo()")
self.mktmp(src)
_ip.magic('run %s' % self.fname)
_ip.run_cell('t = isinstance(f(), foo)')
nt.assert_true(_ip.user_ns['t'])
def test_obj_del(self):
"""Test that object's __del__ methods are called on exit."""
if sys.platform == 'win32':
try:
import win32api
except ImportError:
raise SkipTest("Test requires pywin32")
src = ("class A(object):\n"
" def __del__(self):\n"
" print 'object A deleted'\n"
"a = A()\n")
self.mktmp(py3compat.doctest_refactor_print(src))
if dec.module_not_available('sqlite3'):
err = 'WARNING: IPython History requires SQLite, your history will not be saved\n'
else:
err = None
tt.ipexec_validate(self.fname, 'object A deleted', err)
def test_aggressive_namespace_cleanup(self):
"""Test that namespace cleanup is not too aggressive GH-238
Returning from another run magic deletes the namespace"""
# see ticket https://github.com/ipython/ipython/issues/238
class secondtmp(tt.TempFileMixin): pass
empty = secondtmp()
empty.mktmp('')
# On Windows, the filename will have \users in it, so we need to use the
# repr so that the \u becomes \\u.
src = ("ip = get_ipython()\n"
"for i in range(5):\n"
" try:\n"
" ip.magic(%r)\n"
" except NameError as e:\n"
" print(i)\n"
" break\n" % ('run ' + empty.fname))
self.mktmp(src)
_ip.magic('run %s' % self.fname)
_ip.run_cell('ip == get_ipython()')
nt.assert_equal(_ip.user_ns['i'], 4)
def test_run_second(self):
"""Test that running a second file doesn't clobber the first, gh-3547
"""
self.mktmp("avar = 1\n"
"def afunc():\n"
" return avar\n")
empty = tt.TempFileMixin()
empty.mktmp("")
_ip.magic('run %s' % self.fname)
_ip.magic('run %s' % empty.fname)
nt.assert_equal(_ip.user_ns['afunc'](), 1)
@dec.skip_win32
def test_tclass(self):
mydir = os.path.dirname(__file__)
tc = os.path.join(mydir, 'tclass')
src = ("%%run '%s' C-first\n"
|
cloudcopy/seahub | tests/api/test_auth.py | Python | apache-2.0 | 2,115 | 0.002364 | #coding: UTF-8
"""
Test auth related api, such as login/logout.
"""
import random
import re
from urllib import urlencode, quote
from tests.common.common import USERNAME, PASSWORD, SEAFILE_BASE_URL
from tests.common.utils import randstring, urljoin
from tests.api.urls import (
AUTH_PING_URL, TOKEN_URL, DOWNLOAD_REPO_URL, LOGOUT_DEVICE_URL
)
from tests.api.apitestbase import ApiTestBase
def fake_ccnet_id():
return randstring(length=40)
class AuthTest(ApiTestBase):
"""This tests involves creating/deleting api tokens, so for this test we use
a specific auth token so that it won't affect other test cases.
"""
def test_logout_device(self):
token = self._desktop_login()
self._do_auth_ping(token, expected=200)
with self.get_tmp_repo() as repo:
sync_token = self._clone_repo(token, repo.repo_id)
self._get_repo_info(sync_token, repo.repo_id)
self._logout(token)
self._do_auth_ping(token, expected=403)
# self._get_repo_info(sync_token, repo.repo_id, expected=400)
def _desktop_login(self):
data = {
'username': USERNAME,
'password': PASSWORD,
'platform': 'windows',
'device_id': fake_ccnet_id(), |
'device_name': 'fake-device-name',
'client_version': '4.1.0',
'platform_version': '',
}
return self.post(TOKEN_URL, data=data, use_token=False).json()['token']
def _do_auth_ping(self, token, **kwargs):
return self.get(AUTH_PING_URL, token=token, **kwargs)
def _clone_repo(self, token, repo_id):
return self.get(DOWNLOAD_REPO_URL % repo_id, tok | en=token).json()['token']
def _get_repo_info(self, sync_token, repo_id, **kwargs):
headers = {
'Seafile-Repo-Token': sync_token
}
url = urljoin(SEAFILE_BASE_URL,
'repo/%s/permission-check/?op=upload' % repo_id)
self.get(url, use_token=False, headers=headers, **kwargs)
def _logout(self, token):
self.post(LOGOUT_DEVICE_URL, token=token)
|
alexcasgarcia/CCI | TreesAndGraphs/DFS.py | Python | mit | 2,154 | 0.021356 | class Queue:
def __init__(self,values):
self.values = values
def __str__(self):
return str(self.values)
def add(self,value):
self.values = self.values + [value]
def remove(self):
beginningOfQueue = self.values[0]
self.values = self.values[1:]
return beginningOfQueue
def peek(self):
return self.values[0]
def isEmpty(self):
return eval('len(self.values) == 0')
class Node:
def __init__(self,value,children):
self.value = value
self.children = children
self.visited = False
self.marked = False
def __str__(self):
return str(self.value)
def addChild(self,value):
self.children += Node(value,[])
def getChildren(self):
return self.children
def visit(self):
print str(self)
self.visited = True
def mark(self):
self.marked = True
def wasVi | sited(self):
return self.visited
def depthfirstsearch(self):
if | (self == None):
return
self.visit()
children = self.getChildren()
for x in xrange(len(children)):
if (children[x].wasVisited() == False):
children[x].depthfirstsearch()
def breadthfirstsearch(self):
q = Queue([])
self.mark()
q.add(self)
while (q.isEmpty() == False):
node = q.remove()
node.visit()
children = node.getChildren()
for x in xrange(len(children)):
if (children[x].wasVisited() == False):
children[x].mark()
q.add(children[x])
n4 = Node(4,[])
n5 = Node(5,[])
n6 = Node(6,[])
n7 = Node(7,[])
n8 = Node(8,[])
n9 = Node(9,[])
n10 = Node(10,[])
n2 = Node(2,[n4,n5,n6])
n3 = Node(3,[n7,n8,n9,n10])
n1 = Node(1,[n2,n3])
print "Breadth First Search"
n1.breadthfirstsearch()
n4 = Node(4,[])
n5 = Node(5,[])
n6 = Node(6,[])
n7 = Node(7,[])
n8 = Node(8,[])
n9 = Node(9,[])
n10 = Node(10,[])
n2 = Node(2,[n4,n5,n6])
n3 = Node(3,[n7,n8,n9,n10])
n1 = Node(1,[n2,n3])
print "Depth First Search"
n1.depthfirstsearch()
|
recap/pumpkin | examples/tracula/injector.py | Python | mit | 2,720 | 0.013235 | __author__ = 'reggie'
###START-CONF
##{
##"object_name": "injector",
##"object_poi": "injector-9192",
##"auto-load": true,
##"remoting" : true,
##"parameters": [
## {
## "name": "inject",
## "type": "Composite",
## "state" : "ALL_RAW",
## "format" : "MRIFile,DTIFile"
## }
## ],
##"return": [
## {
## "name": "token",
## "type": "Composite",
## "state" : "MRI_RAW&DTI_RAW"
## }
##
## ] }
##END-CONF
from pumpkin import *
import os.path
class injector(PmkSeed.Seed):
def __init__(self, context, poi=None):
PmkSeed.Seed.__init__(self, context,poi)
self.home = os.path.expanduser("~")
pass
def run(self, pkt, data):
#data = msg.split("|,|")
mri_file = self.home+"/"+data[0]
dti_file = self.home+"/"+data[1]
subjectID = self.home+"/"+data[2]
outputDir = self.home+"/"+data[3]
lic = self.home+"/"+data[4]
found = True
if os.path.isfile(mri_file):
self.logger.info("Found input file: "+mri_file)
else:
found = False
self.logger.warn("Input file "+mri_file+" not found yet...retry later")
if os.path.isfile(dti_file):
self.logger.info("Found input file: "+dti_file)
else:
found = False
self.logger.warn("Input file "+dti_file+" not found yet...retry later")
if os.path.isfile(subjectID):
self.logger.info("Found subjectID file")
else:
found = False
self.logger.warn("Input file "+subjectID+" not found yet...ret | ry later")
if os.path.isfile(outputDir):
self.logger.info("Found outputDir file")
else:
found = False
self.logger.warn("Input file "+outputDir+" not found yet...retry later")
if os.path.isfile(lic):
self.logger | .info("Found license file")
else:
found = False
self.logger.warn("Input file "+lic+" not found yet...retry later")
#mri_input = mri_file+"|,|"+subjectID+"|,|"+outputDir+"|,|"+lic
mri_input = data[0]+"|,|"+data[2]+"|,|"+data[3]+"|,|"+data[4]
#dti_input = dti_file
dti_input = data[1]
self.fork_dispatch(pkt, mri_input, "MRI_RAW")
self.fork_dispatch(pkt, dti_input, "DTI_RAW")
# for x in range(1,3):
# npkt = self.duplicate_pkt_new_box(pkt)
# self.dispatch(npkt, "##############################TEST####### "+str(x), "MRI_RAW")
pass
|
iarspider/pyscm | main.py | Python | gpl-3.0 | 834 | 0.003597 | #!python | 3
import argparse
import scmFile
if __name__ == "__main__":
parser = argparse.ArgumentParser()
g = parser.add_mutually_exclusive_group(required=True)
g.add_argument('-r', '--read', action="store_const", dest="mode", const="read",
help="Convert | SCM to one or more CSV")
g.add_argument('-w', '--write', action="store_const", dest="mode", const="write",
help="Convert CSV files back to SCM")
parser.add_argument('-f', '--format', action="store", choices=['F', 'f', 'C', 'c'], required=True)
parser.add_argument('file')
args = parser.parse_args()
if args.format.lower() == 'f':
scm = scmFile.scmFileF()
else:
scm = scmFile.scmFileC()
if args.mode == 'write':
scm.CSV2SCM(args.file)
else:
scm.SCM2CSV(args.file)
|
Seldaiendil/meyeOS | devtools/qooxdoo-1.5-sdk/tool/pylib/generator/code/clazz/MClassResources.py | Python | agpl-3.0 | 5,779 | 0.006056 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# qooxdoo - the new era of web development
#
# http://qooxdoo.org
#
# Copyright:
# 2006-2011 1&1 Internet AG, Germany, http://www.1und1.de
#
# License:
# LGPL: http://www.gnu.org/licenses/lgpl.html
# EPL: http://www.eclipse.org/org/documents/epl-v10.php
# See the LICENSE file in the project's top-level directory for details.
#
# Authors:
# * Thomas Herchenroeder (thron7)
#
################################################################################
##
# generator.code.Class Mixin: class resources
##
import sys, os, types, re, string, copy
from generator.resource.AssetHint import AssetHint
from generator.resource.CombinedImage import CombinedImage
from generator import Context
from misc import util
from misc.securehash import sha_construct
class MClassResources(object):
# --------------------------------------------------------------------------
# Resource Support
# --------------------------------------------------------------------------
def getAssets(self, assetMacros={}):
# Memoizing needs assetMacros in the key, otherwise you get wrong
# results with multiple builds in one generator run.
macroskey = util.toString(assetMacros)
macroskey = sha_construct(macroskey).hexdigest()
if macroskey not in self._assetRegex:
# prepare a regex encompassing all asset hints, asset macros resolved
classAssets = self.getHints()['assetDeps'][:]
iresult = [] # [AssetHint]
for res in classAssets:
# expand file glob into regexp
res = re.sub(r'\*', ".*", res)
# expand macros
if res.find('${')>-1:
expres = self._expandMacrosInMeta(assetMacros, res)
else:
expres = [res]
# collect resulting asset objects
for e in expres:
assethint = AssetHint(res)
assethint.clazz = self
assethint.expanded = e
assethint.regex = re.compile(e)
if assethint not in iresult:
iresult.append(assethint)
self._assetRegex[macroskey] = iresult
return self._assetRegex[macroskey]
##
# expand asset macros in asset strings, like "qx/decoration/${theme}/*"
def _expandMacrosInMeta(self, assetMacros, res):
def expMacRec(rsc):
if rsc.find('${')==-1:
return [rsc]
result = []
nres = rsc[:]
mo = re.search(r'\$\{(.*?)\}',rsc)
if mo:
themekey = mo.group(1)
if themekey in assetMacros:
# create an array with all possibly variants for this replacement
iresult = []
for val in assetMacros[themekey]:
iresult.append(nres.replace('${'+themekey+'}', val))
# for each variant replace the remaining macros
for ientry in iresult:
result.extend(expMacRec(ientry))
else:
nres = nres.replace('${'+themekey+'}','') # just remove '${...}'
nres = nres.replace('//', '/') # get rid of '...//...'
result.append(nres)
console.warn("Warning: (%s): Cannot replace macro '%s' in #asset hint" % (self.id, themekey))
else:
raise SyntaxError, "Non-terminated macro in string: %s" % rsc
return result
console = self.context['console']
result = expMacRec(res)
return result
##
# Map resources to classes.
# Takes a list of Library's and a list of Class'es, and modifies the
# classes' .resources member to hold suitable resources from the Libs.
@staticmethod
def mapResourcesToClasses(libs, classes, assetMacros={}):
# Resource list
resources = []
for libObj in libs:
resources.extend(libObj.getResources()) # weightedness of same res id through order of script.libraries
# remove unwanted files
exclpatt = re.compile("\.(?:meta|py)$", re.I)
for res in resources[:]:
if exclpatt.search(res.id):
resources.remove(res)
# Asset pattern list -- this is basically an optimization, to condense
# asset patterns
#assetMacros = self._genobj._job.get('asset-let',{})
assetHints = []
for clazz in classes:
assetHints.extend(clazz.getAssets(assetMacros))
clazz.resources = set() #TODO: they might be filled by previous jobs, with different libs
# Go through resources and asset patterns
for res in resources:
for hint in assetHints:
# add direct matche | s
if hint.regex.match(res.id):
| hint.seen = True
hint.clazz.resources.add(res)
# add matches of embedded images
if isinstance(res, CombinedImage):
for embed in res.embeds:
if hint.regex.match(embed.id):
hint.seen = True
hint.clazz.resources.add(res)
# Now that the resource mapping is done, check if we have unfullfilled hints
for hint in assetHints:
if not hint.seen:
Context.console.warn("No resource matched #asset(%s) (%s)" % (hint.source, hint.clazz.id))
return classes
|
nuobit/odoo-addons | account_asset_invoice_line_link/models/account_move.py | Python | agpl-3.0 | 659 | 0 | # Copyright NuoBiT Solutions, S.L. (<https://www.nuobit.com>)
# Eric Antones <eantones@nuobit.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl)
from odoo import fields, models
class AccountMove(models.Model):
_inher | it = "account.move"
def _prepare_asset_vals(self, aml):
vals = super()._prepare_asset_vals(aml)
vals["move_id"] = aml.move_id
vals["move_line_id"] = aml
return vals
class AccountInvoiceLine(models.Model):
_inherit = "account.move.line"
asset_ids = fields.One2many(
comodel_name="account.asset",
inverse_name="move_line_id",
string="A | ssets",
)
|
thjwhite/personal_website | personal_website/recipes/admin.py | Python | mit | 700 | 0.001429 | from django.contrib import admin
from .models import Recipe, IngredientRequirement, Ingredient
admin.site.site_header = 'TJ White\'s website admin panel'
admin.site.site_title = 'TJ White\'s personal website'
admin.site.index_title = 'Administration'
class IngredientRequirementInline(admin.TabularInline):
model = IngredientRequirement
extra = 1
class RecipeAd | min(admin.ModelAdmin):
fieldsets = [
('General Information', {'fields': ['name', 'title']}),
('Publishing Information', {'fields': ['date_published']})
]
inlines =[IngredientRequirementInline]
search_fields = ['title']
admin.site.register(Recipe, RecipeAdmin | )
admin.site.register(Ingredient)
|
mochrul/zorp | tests/zorpctl/test_szig.py | Python | gpl-2.0 | 3,271 | 0.006726 | #!/usr/bin/env python2.7
############################################################################
##
## Copyright (c) 2000-2015 BalaBit IT Ltd, Budapest, Hungary
## Copyright (c) 2015-2018 BalaSys IT Ltd, Budapest, Hungary
##
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License along
## with this program; if not, write to the Free Software Foundation, Inc.,
## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
##
############################################################################
import unittest
from HandlerMock import HandlerMock
from zorpctl.szig import SZIG
class TestSzig(unittest.TestCase):
def setUp(self):
self.szig = SZIG("", HandlerMock)
def test_get_value(self):
self.assertEquals(self.szig.get_value(""), None)
self.assertEquals(self.szig.get_value("service"), N | one)
self.assertEquals(self.szig.get_value("info.policy.file"), "/etc/zorp/policy.py")
self.assertEquals(self.szig.get_value("stats.thread_number"), 5)
self.assertEquals(self.szig.get_value("service.service_http_transparent.sessions_running"), 0)
def test_get_sibling(self):
self.assertEquals(self.szig.get_sibling("conns"), "info")
self.assertEquals(self.szig.get_sibling("stats.threads_running"), "stats | .thread_rate_max")
self.assertEquals(self.szig.get_sibling("stats.thread_rate_max"), "stats.audit_number")
self.assertEquals(self.szig.get_sibling("stats.thread_number"), None)
def test_get_child(self):
self.assertEquals(self.szig.get_child(""), "conns")
self.assertEquals(self.szig.get_child("info"), "info.policy")
self.assertEquals(self.szig.get_child("info.policy"), "info.policy.reload_stamp")
self.assertEquals(self.szig.get_child("info.policy.reload_stamp"), None)
def test_get_set_loglevel(self):
loglevel = 6
self.szig.loglevel = loglevel
self.assertEquals(self.szig.loglevel, loglevel)
def test_get_set_logspec(self):
logspec = "this is a logspec"
self.szig.logspec = logspec
self.assertEquals(self.szig.logspec, logspec)
def test_get_set_deadlockcheck(self):
deadlockcheck = False
self.szig.deadlockcheck = deadlockcheck
self.assertEquals(self.szig.deadlockcheck, deadlockcheck)
def test_reload_and_reload_result(self):
self.szig.reload()
self.assertEquals(self.szig.reload_result(), True)
def test_coredump(self):
try:
self.szig.coredump()
self.assertTrue(False, "szig coredump should not work while not repaired")
except:
self.assertTrue(True, "szig coredump is not working yet")
if __name__ == '__main__':
unittest.main()
|
mumuwoyou/vnpy-dev | vn.trader/ctaStrategy/strategy/strategyTmm3.py | Python | mit | 19,435 | 0.006349 | # encoding: UTF-8
from ctaBase import *
from ctaTemplate import CtaTemplate
import talib
import numpy as np
import math
import copy
from datetime import datetime
########################################################################
class Tmm3Strategy(CtaTemplate):
className = 'Tmm3Strategy'
author = u'用Python的交易员'
barDbName = MINUTE_5_DB_NAME
# 策略参数
initDays = 10 # 初始化数据所用的天数
fixedSize = 1 # 开仓
zCount = 0 # 转向次数
# 策略变量
bar = None # K线对象
m5bar = None
barMinute = EMPTY_STRING # K线当前的分钟
bufferSize = 200 # 需要缓存的数据的大小
bufferCount = 0 # 目前已经缓存了的数据的计数
UporDownCount = 0 # 上升或下降的计数
highArray = np.zeros(bufferSize) # K线最高价的数组
lowArray = np.zeros(bufferSize) # K线最低价的数组
closeArray = np.zeros(bufferSize) # K线收盘价的数组
openArray = np.zeros(bufferSize) # K线开盘价的数组
# Tmm K线
H1Array = np.zeros(bufferSize) # K线最高价的数组
L1Array = np.zeros(bufferSize) # K线最低价的数组
C1Array = np.zeros(bufferSize) # K线收盘价的数组
O1Array = np.zeros(bufferSize) # K线开盘价的数组
UPorDOWNArray = np.zeros(bufferSize)
H1Value = 0
L1Value = 0
C1Value = 0
O1Value = 0
UPorDOWNValue = 0
orderList = [] # 保存委托代码的列表
# 参数列表,保存了参数的名称
paramList = ['name',
'className',
'author',
'vtSymbol']
# 变量列表,保存了变量的名称
varList = ['inited',
'trading',
'pos',
'ydPos',
'H1Value',
'L1Value',
'O1Value',
'C1Value',
'UPorDOWNValue']
#----------------------------------------------------------------------
def __init__(self, ctaEngine, setting):
"""Constructor"""
super(Tmm3Strategy, self).__init__(ctaEngine, setting)
# 注意策略类中的可变对象属性(通常是list和dict等),在策略初始化时需要重新创建,
# 否则会出现多个策略实例之间数据共享的情况,有可能导致潜在的策略逻辑错误风险,
# 策略类中的这些可变对象属性可以选择不写,全都放在__init__下面,写主要是为了阅读
# 策略时方便(更多是个编程习惯的选择)
#self.isPrePosHaved = False
#self.isAlreadyTraded = False
#----------------------------------------------------------------------
def onInit(self):
"""初始化策略(必须由用户继承实现)"""
self.writeCtaLog(u'%s策略初始化' %self.name)
# 载入历史数据,并采用回放计算的方式初始化策略数值
initData = self.loadBar(self.initDays)
for bar in initData:
self.onBar(bar)
self.putEvent()
#----------------------------------------------------------------------
def onStart(self):
"""启动策略(必须由用户继承实现)"""
self.writeCtaLog(u'%s策略启动' %self.name)
self.putEvent()
#----------------------------------------------------------------------
def onStop(self):
"""停止策略(必须由用户继承实现)"""
self.writeCtaLog(u'%s策略停止' %self.name)
self.putEvent()
#-------- | --------------------------------------------------------------
def onTick(self, tick):
"""收到行情TICK推送(必须由用户继承实现)"""
| # 计算K线
#super(Tmm3Strategy, self).onTick(tick)
tickMinute = tick.datetime.minute
if tickMinute != self.barMinute:
if self.bar:
self.bar.datetime = tick.datetime.replace(second=0, microsecond=0)
self.bar.date = tick.date
self.bar.time = tick.time
# self.writeCtaLog(u'记录分钟线数据%s,时间:%s, O:%s, H:%s, L:%s, C:%s'
# % (self.bar.vtSymbol, self.bar.time, self.bar.open, self.bar.high,
#
# self.bar.low, self.bar.close))
if self.barInTime(tick):
self.procecssBar(self.bar)
bar = CtaBarData()
bar.vtSymbol = tick.vtSymbol
bar.symbol = tick.symbol
bar.exchange = tick.exchange
bar.open = tick.lastPrice
bar.high = tick.lastPrice
bar.low = tick.lastPrice
bar.close = tick.lastPrice
bar.date = tick.date
bar.time = tick.time
bar.datetime = tick.datetime # K线的时间设为第一个Tick的时间
self.bar = bar # 这种写法为了减少一层访问,加快速度
self.barMinute = tickMinute # 更新当前的分钟
else: # 否则继续累加新的K线
bar = self.bar # 写法同样为了加快速度
bar.high = max(bar.high, tick.lastPrice)
bar.low = min(bar.low, tick.lastPrice)
bar.close = tick.lastPrice
#----------------------------------------------------------------------
def procecssBar(self, bar):
if not self.m5bar or bar.datetime.minute % 5 == 1:
m5bar = CtaBarData()
m5bar.vtSymbol = bar.vtSymbol
m5bar.symbol = bar.vtSymbol
m5bar.exchange = bar.exchange
m5bar.open = bar.open
m5bar.high = bar.high
m5bar.low = bar.low
m5bar.close = bar.close
m5bar.date = bar.date
m5bar.time = bar.time
m5bar.datetime = bar.datetime
m5bar.volume = bar.volume
m5bar.openInterest = bar.openInterest
self.m5bar = m5bar
else:
m5bar = self.m5bar
m5bar.high = max(m5bar.high, bar.high)
m5bar.low = min(m5bar.low, bar.low)
m5bar.close = bar.close
m5bar.volume = m5bar.volume + bar.volume
m5bar.openInterest = bar.openInterest
if bar.datetime.minute % 5 == 0:
newBar = copy.copy(m5bar)
newBar.datetime = bar.datetime.replace(second=0,microsecond=0)
newBar.date = bar.date
newBar.time = bar.time
self.onBar(newBar)
# self.writeCtaLog(u'记录3分钟线数据%s,时间:%s, O:%s, H:%s, L:%s, C:%s'
# % (newBar.vtSymbol, newBar.time, newBar.open, newBar.high,
# newBar.low, newBar.close))
#----------------------------------------------------------------------
def onBar(self, bar):
"""收到Bar推送(必须由用户继承实现)"""
# 撤销之前发出的尚未成交的委托(包括限价单和停止单)
#super(Tmm3Strategy, self).onBar(bar)
for orderID in self.orderList:
self.cancelOrder(orderID)
self.orderList = []
# 保存K线数据
self.closeArray[0:self.bufferSize-1] = self.closeArray[1:self.bufferSize]
self.highArray[0:self.bufferSize-1] = self.highArray[1:self.bufferSize]
self.lowArray[0:self.bufferSize-1] = self.lowArray[1:self.bufferSize]
self.openArray[0:self.bufferSize - 1] = self.openArray[1:self.bufferSize]
self.C1Array[0:self.bufferSize - 1] = self.C1Array[1:self.bufferSize]
self.H1Array[0:self.bufferSize - 1] = self.H1Array[1:self.bufferSize]
self.L1Array[0:self.bufferSize - 1] = self.L1Array[1:self.bufferSize]
self.O1Array[0:self.bufferSize - 1] = self.O1Array[1:self.bufferSize]
self.UPorDOWNArray[0:self.bufferSize - 1] = self.UPorDOWNArray[1:self.bufferSize]
self.closeArray[-1] = bar.close
self.highArray[-1] = bar.high
self.lowArray[-1] = bar.low
self.openArray[-1] = bar.open
self.bufferCount += 1
if self.bufferCount == 1: #第一天特殊处理
if self.closeArray[-1] >= self.openArray[-1]: #上涨
self.O1Array[-1] = self.openArray[-1]
self.L1Array[-1] = self.openArray[-1]
self.H1Array[-1] = self.closeArray[-1]
self.C1Array[-1] = self.closeArray[-1]
self.UPorDOWNArray[-1] = 1
else: #下跌
self.O1Array[-1] = self.openArray[-1]
self.H1Array[-1] = self.openArray[-1]
self.L1Array[-1] = self.closeArray[-1]
self.C1Array[-1] = self.closeArray[-1]
self.UPorDOWNArray[-1] = 0
self.UporDownCount = 1
self.zCount += 1
return
|
Azure/azure-sdk-for-python | sdk/synapse/azure-mgmt-synapse/azure/mgmt/synapse/operations/_integration_runtime_nodes_operations.py | Python | mit | 15,893 | 0.004216 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
subscription_id: str,
resource_group_name: str,
workspace_name: str,
integration_runtime_name: str,
node_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-06-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/integrationRuntimes/{integrationRuntimeName}/nodes/{nodeName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"integrationRuntimeName": _SERIALIZER.url("integration_runtime_name", integration_runtime_name, 'str'),
"nodeName": _SERIALIZER.url("node_name", node_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_update_request(
subscription_id: str,
resource_group_name: str,
workspace_name: str,
integration_runtime_name: str,
node_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-06-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/integrationRuntimes/{integrationRuntimeName}/nodes/{nodeName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"integrationRuntimeName": _SERIALIZER.url("integration_runtime_name", integration_runtime_name, 'str'),
"nodeName": _SERIALIZER.url("node_name", node_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request(
subscription_id: str,
resource_group_name: str,
workspace_name: str,
integration_runtime_name: str,
node_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-06-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/integrationRuntimes/{integrationRuntimeName}/nodes/{nodeName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"integrationRuntimeName": _SERIALIZER.url("integration_runtime_name", integration_runtime_name, 'str'),
"nodeName": _SERIALIZER.url("node_name", node_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class IntegrationRuntimeNodesOperations(object):
"""IntegrationRuntimeNodesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.synapse.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, seria | lizer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get(
self,
resource_group_name: str,
workspace_name: str,
integration_runtime_name: str,
node_name: str,
**kwargs: Any |
) -> "_models.SelfHostedIntegrationRuntimeNode":
"""Get integration runtime node.
Get an integration runtime node.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param integration_runtime_name: Integration runtime name.
:type integration_runtime_name: str
:param node_name: Integration runtime node name.
:type node_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SelfHost |
closeio/tasktiger | tasktiger/redis_semaphore.py | Python | mit | 3,150 | 0.000317 | """Redis Semaphore lock."""
import os
import time
SYSTEM_LOCK_ID = 'SYSTEM_LOCK'
class Semaphore(object):
"""Semaphore lock using Redis ZSET."""
def __init__(self, redis, name, lock_id, timeout, max_locks=1):
"""
Semaphore lock.
Semaphore logic is implemented in the lua/semaphore.lua script.
Individual locks within the semaphore are managed inside a ZSET
using scores to track when they expire.
Arguments:
redis: Redis client
name: Name of lock. Used as ZSET key.
lock_id: Lock ID
timeout: Timeout in seconds
max_locks: Maximum number of locks allowed for this semaphore
"""
self.redis = redis
self.name = name
self.lock_id = lock_id
self.max_locks = max_locks
self.timeout = timeout
with open(
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'lua/semaphore.lua',
)
) as f:
self._semaphore = self.redis.register_script(f.read())
@classmethod
def get_system_lock(cls, redis, name):
"""
Get system lock timeout for the semaphore.
Arguments:
redis: Redis client
name: Name of lock. Used as ZSET key.
Returns: Time system lock expires or None if lock does not exist
"""
return redis.zscore(name, SYSTEM_LOCK_ID)
@classmethod
def set_system_lock(cls, redis, name, timeout):
"""
Set system lock for the semaphore.
Sets a system lock that will expire in timeout seconds. This
overrides all other locks. Existing locks cannot be renewed
and no new locks will be permitted until the system lock
expires.
Arguments:
redis: Redis client
name: Name of lock. Used as ZSET key.
timeout: Timeout in seconds for system lock
"""
pipeline = redis.pipeline()
pipeline.zadd(name, {SYSTEM_LOCK_ID: time.time() + timeout})
pipeline.expire(
name, timeout + 10
) # tim | eout plus buffer for troubleshooting
pipeline.execute()
def release(self):
"""Release semaphore."""
self.redis.zrem(self.name, self.lock_id)
def acquire(self):
"""
Obtain a semaphore lock.
Returns: Tuple that contains True/False if the lock was acquired and number of |
locks in semaphore.
"""
acquired, locks = self._semaphore(
keys=[self.name],
args=[self.lock_id, self.max_locks, self.timeout, time.time()],
)
# Convert Lua boolean returns to Python booleans
acquired = True if acquired == 1 else False
return acquired, locks
def renew(self):
"""
Attempt to renew semaphore.
Technically this doesn't know the difference between losing the lock
but then successfully getting a new lock versus renewing your lock
before the timeout. Both will return True.
"""
return self.acquire()
|
dhamaniasad/mythbox | resources/lib/twisted/twisted/internet/endpoints.py | Python | gpl-2.0 | 15,010 | 0.001666 | # -*- test-case-name: twisted.internet.test.test_endpoints -*-
# Copyright (c) 2007-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Implementations of L{IStreamServerEndpoint} and L{IStreamClientEndpoint} that
wrap the L{IReactorTCP}, L{IReactorSSL}, and L{IReactorUNIX} interfaces.
@since: 10.1
"""
from zope.interface import implements, directlyProvides
from twisted.internet import interfaces, defer, error
from twisted.internet.protocol import ClientFactory, Protocol
__all__ = ["TCP4ServerEndpoint", "TCP4ClientEndpoint",
"UNIXServerEndpoint", "UNIXClientEndpoint",
"SSL4ServerEndpoint", "SSL4ClientEndpoint"]
class _WrappingProtocol(Protocol):
"""
Wrap another protocol in order to notify my user when a connection has
been made.
@ivar _connectedDeferred: The L{Deferred} that will callback
with the C{wrappedProtocol} when it is connected.
@ivar _wrappedProtocol: An L{IProtocol} provider that will be
connected.
"""
def __init__(self, connectedDeferred, wrappedProtocol):
"""
@param connectedDeferred: The L{Deferred} that will callback
with the C{wrappedProtocol} when it is connected.
@param wrappedProtocol: An L{IProtocol} provider that will be
connected.
"""
self._connectedDeferred = connectedDeferred
self._wrappedProtocol = wrappedProtocol
if interfaces.IHalfCloseableProtocol.providedBy(
self._wrappedProtocol):
directlyProvides(self, interfaces.IHalfCloseableProtocol)
def connectionMade(self):
"""
Conne | ct the C{self._wrappedProtocol} to our C{self.transport} and
callback C{self._ | connectedDeferred} with the C{self._wrappedProtocol}
"""
self._wrappedProtocol.makeConnection(self.transport)
self._connectedDeferred.callback(self._wrappedProtocol)
def dataReceived(self, data):
"""
Proxy C{dataReceived} calls to our C{self._wrappedProtocol}
"""
return self._wrappedProtocol.dataReceived(data)
def connectionLost(self, reason):
"""
Proxy C{connectionLost} calls to our C{self._wrappedProtocol}
"""
return self._wrappedProtocol.connectionLost(reason)
def readConnectionLost(self):
"""
Proxy L{IHalfCloseableProtocol.readConnectionLost} to our
C{self._wrappedProtocol}
"""
self._wrappedProtocol.readConnectionLost()
def writeConnectionLost(self):
"""
Proxy L{IHalfCloseableProtocol.writeConnectionLost} to our
C{self._wrappedProtocol}
"""
self._wrappedProtocol.writeConnectionLost()
class _WrappingFactory(ClientFactory):
"""
Wrap a factory in order to wrap the protocols it builds.
@ivar _wrappedFactory: A provider of I{IProtocolFactory} whose
buildProtocol method will be called and whose resulting protocol
will be wrapped.
@ivar _onConnection: An L{Deferred} that fires when the protocol is
connected
"""
protocol = _WrappingProtocol
def __init__(self, wrappedFactory, canceller):
"""
@param wrappedFactory: A provider of I{IProtocolFactory} whose
buildProtocol method will be called and whose resulting protocol
will be wrapped.
@param canceller: An object that will be called to cancel the
L{self._onConnection} L{Deferred}
"""
self._wrappedFactory = wrappedFactory
self._onConnection = defer.Deferred(canceller=canceller)
def buildProtocol(self, addr):
"""
Proxy C{buildProtocol} to our C{self._wrappedFactory} or errback
the C{self._onConnection} L{Deferred}.
@return: An instance of L{_WrappingProtocol} or C{None}
"""
try:
proto = self._wrappedFactory.buildProtocol(addr)
except:
self._onConnection.errback()
else:
return self.protocol(self._onConnection, proto)
def clientConnectionFailed(self, connector, reason):
"""
Errback the C{self._onConnection} L{Deferred} when the
client connection fails.
"""
self._onConnection.errback(reason)
class TCP4ServerEndpoint(object):
"""
TCP server endpoint with an IPv4 configuration
@ivar _reactor: An L{IReactorTCP} provider.
@type _port: int
@ivar _port: The port number on which to listen for incoming connections.
@type _backlog: int
@ivar _backlog: size of the listen queue
@type _interface: str
@ivar _interface: the hostname to bind to, defaults to '' (all)
"""
implements(interfaces.IStreamServerEndpoint)
def __init__(self, reactor, port, backlog=50, interface=''):
"""
@param reactor: An L{IReactorTCP} provider.
@param port: The port number used listening
@param backlog: size of the listen queue
@param interface: the hostname to bind to, defaults to '' (all)
"""
self._reactor = reactor
self._port = port
self._listenArgs = dict(backlog=50, interface='')
self._backlog = backlog
self._interface = interface
def listen(self, protocolFactory):
"""
Implement L{IStreamServerEndpoint.listen} to listen on a TCP socket
"""
return defer.execute(self._reactor.listenTCP,
self._port,
protocolFactory,
backlog=self._backlog,
interface=self._interface)
class TCP4ClientEndpoint(object):
"""
TCP client endpoint with an IPv4 configuration.
@ivar _reactor: An L{IReactorTCP} provider.
@type _host: str
@ivar _host: The hostname to connect to as a C{str}
@type _port: int
@ivar _port: The port to connect to as C{int}
@type _timeout: int
@ivar _timeout: number of seconds to wait before assuming the
connection has failed.
@type _bindAddress: tuple
@type _bindAddress: a (host, port) tuple of local address to bind
to, or None.
"""
implements(interfaces.IStreamClientEndpoint)
def __init__(self, reactor, host, port, timeout=30, bindAddress=None):
"""
@param reactor: An L{IReactorTCP} provider
@param host: A hostname, used when connecting
@param port: The port number, used when connecting
@param timeout: number of seconds to wait before assuming the
connection has failed.
@param bindAddress: a (host, port tuple of local address to bind to,
or None.
"""
self._reactor = reactor
self._host = host
self._port = port
self._timeout = timeout
self._bindAddress = bindAddress
def connect(self, protocolFactory):
"""
Implement L{IStreamClientEndpoint.connect} to connect via TCP.
"""
def _canceller(deferred):
connector.stopConnecting()
deferred.errback(
error.ConnectingCancelledError(connector.getDestination()))
try:
wf = _WrappingFactory(protocolFactory, _canceller)
connector = self._reactor.connectTCP(
self._host, self._port, wf,
timeout=self._timeout, bindAddress=self._bindAddress)
return wf._onConnection
except:
return defer.fail()
class SSL4ServerEndpoint(object):
"""
SSL secured TCP server endpoint with an IPv4 configuration.
@ivar _reactor: An L{IReactorSSL} provider.
@type _host: str
@ivar _host: The hostname to connect to as a C{str}
@type _port: int
@ivar _port: The port to connect to as C{int}
@type _sslContextFactory: L{OpenSSLCertificateOptions}
@var _sslContextFactory: SSL Configuration information as an
L{OpenSSLCertificateOptions}
@type _backlog: int
@ivar _backlog: size of the listen queue
@type _interface: str
@ivar _interface: the hostname to bind to, defaults to '' (all)
"""
implements(interfaces.IStreamServ |
hmdc/outagenotifier | scripts/outages.py | Python | gpl-2.0 | 2,381 | 0.00882 | #!/usr/bin/env python
"""
Script for printing outage information to the console.
"""
from bs4 import BeautifulSoup
import argparse
import ConfigParser
import hmdclogger
import os
__author__ = "Harvard-MIT Data Center DevOps"
__copyright__ = "Copyright 2015, HMDC"
__credits__ = ["Bradley Frank"]
__license__ = "GPLv2"
__maintainer__ = "HMDC"
__email__ = "linux@lists.hmdc.harvard.edu"
__status__ = "Production"
CONFIG_FILE = "/etc/outagenotifier.conf"
NOTIFICATIONS_FILE = "notifications.xml"
def get_settings(config_file):
"""Parses the conf file for settings."""
config = ConfigParser.ConfigParser()
config.read(config_file)
settings = {
# WorkingFiles
'working_directory': config.get('Worki | ngFiles', 'working_directory'),
}
return settings
def set_logger(debug_level):
"""Creates an instance of HMDCLogger with appropriate handlers."""
hmdclog = hmdclogger.HMDCLogger("outages", debug_level)
hmdclog.log_to_console()
return hmdclog
def parse_xml(source, hmdclog):
"""Reads in messages from notifications XML file."""
hmdclog.log('debug', "Source file: " + source)
if os.path.isfile(source):
| with open(source, "r") as file:
xml_file = BeautifulSoup(file, "xml")
file.close()
hmdclog.log('debug', "Read in file: " + source)
else:
raise Exception("Notifications file not found!")
counter = 0
outages = []
messages = xml_file.find_all("message")
for message in messages:
counter += 1
hmdclog.log('debug', "Parsing message #" + str(counter) + ".")
raw_text = str(message.text)
text = raw_text.decode("unicode_escape")
outages.append(text)
return outages
#
# Setup argument parsing with the argparse module.
#
parser = argparse.ArgumentParser(description="Display RCE outages.")
parser.add_argument('-d', '--debug', action='store_true',
help="Enables verbose output.")
args = parser.parse_args()
#
# Import conf file settings.
#
settings = get_settings(CONFIG_FILE)
#
# Set logging level based on the debug argument.
#
debug_level = 'DEBUG' if args.debug else 'NOTSET'
hmdclog = set_logger(debug_level)
#
# Parse the noifications file for outage messages to display.
#
source = settings['working_directory'] + "/" + NOTIFICATIONS_FILE
messages = parse_xml(source, hmdclog)
#
# Print the messages to console.
#
for message in messages:
print message
|
AbstractGeek/rusmalai-ncbs | packages/examples/test_data.py | Python | mit | 768 | 0.018229 | import ANN
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
data = load_digits()
input = data.data
target = data.target
numLayers = 5
iterations = 20000
#input = [[0,0],[0,1],[1,0],[1,1]]
#target = [0,1,1,0]
nn1 = ANN.FNN(numLayers, input, target, eta=0.05 )
e = nn1.train(iterations)
achieved = nn1.output_layer.output
print achieved
#e = []
#for i in range(20000):
# | e.app | end(nn1.iterate())
#print "Output is {}".format(nn1.output_layer.output)
#print nn1.output_layer.neurons[0].w, nn1.output_layer.prev_layer.neurons[0].output, nn1.output_layer.prev_layer.neurons[1].output, nn1.output_layer.prev_layer.neurons[2].output
plt.plot(e)
plt.show()
#plt.scatter(input.T[0], input.T[1], c= 0.5*(target-achieved)**2)
#plt.show()
|
googleads/google-ads-python | google/ads/googleads/v10/enums/types/user_list_combined_rule_operator.py | Python | apache-2.0 | 1,169 | 0.000855 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v10.enums",
marshal="google.ads.googleads.v10",
manifest={"UserListCombinedRuleOperatorEnum",},
)
class UserListCombinedRuleOperatorEnum(proto.Message):
r"""Logical operator connecting two rules.
"""
class UserListCombinedRuleOperator(proto.Enum):
r"""Enum describing possible user list combined rule operators."""
UNSPECIFIED = 0
UNKNOWN = 1
AND = 2
| AND_NOT = 3
__all__ = | tuple(sorted(__protobuf__.manifest))
|
lorien/grab | grab/spider/__init__.py | Python | mit | 158 | 0 | from grab.spider.base import Spider # noqa
from grab.spid | er.task import Task # noqa
from grab.spider.error import * # noqa py | lint: disable=wildcard-import
|
tensorflow/tensorflow | tensorflow/python/kernel_tests/nn_ops/embedding_ops_test.py | Python | apache-2.0 | 47,859 | 0.009486 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for ops used with embeddings."""
import itertools
import math
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.python.util import compat
def _AsLong(array):
"""Casts arrays elements to long type. Used to convert from numpy tf."""
return [int(x) for x in array]
class ScatterAddSubTest(test.TestCase):
def _TestCase(self, shape, indices, scatter_op=state_ops.scatter_add):
"""Run a random test case with the given shape and indices.
Args:
shape: Shape of the parameters array.
indices: One-dimensional array of ints, the indices of the last dimension
of the parameters to update.
scatter_op: ScatterAdd or ScatterSub.
"""
super(ScatterAddSubTest, self).setUp()
with self.cached_session(use_gpu=False):
# Create a random parameter array of given shape
p_init = np.random.rand(*shape).astype("f")
# Create the shape of the update array. All dimensions except the last
# match the parameter array, the last dimension equals the # of indices.
vals_shape = [len(indices)] + shape[1:]
vals_init = np.random.rand(*vals_shape).astype("f")
v_i = [float(x) for x in vals_init.ravel()]
p = variables.Variable(p_init)
vals = constant_op.constant(v_i, shape=vals_shape, name="vals")
ind = constant_op.constant(indices, dtype=dtypes.int32)
p2 = scatter_op(p, ind, vals, name="updated_p")
# p = init
self.evaluate(variables.global_variables_initializer())
# p += vals
result = self.evaluate(p2)
# Compute the expected 'p' using numpy operations.
for i, ind in enumerate(indices):
if scatter_op == state_ops.scatter_add:
p_init.reshape(shape[0], -1)[ind, :] += (vals_init.reshape(
vals_shape[0], -1)[i, :])
else:
p_init.reshape(shape[0], -1)[ind, :] -= (vals_init.reshape(
vals_shape[0], -1)[i, :])
self.assertTrue(all((p_init == result).ravel()))
@test_util.run_deprecated_v1
def testNoRepetitions(self):
self._TestCase([2, 2], [1])
self._TestCase([4, 4, 4], [2, 0])
self._TestCase([43, 20, 10, 10], [42, 5, 6, 1, 3, 5, 7, 9])
@test_util.run_deprecated_v1
def testWithRepetitions(self):
self._TestCase([2, 2], [1, 1])
self._TestCase([5, 3, 9, 5], [2, 0, 4, 1, 3, 1, 4, 0, 4, 3])
self._TestCase([32, 4, 4], [31] * 8)
@test_util.run_deprecated_v1
def testRandom(self):
# Random shapes of rank 4, random indices
for _ in range(5):
shape = np.random.randint(1, 20, size=4)
indices = np.random.randint(shape[0], size=2 * shape[0])
self._TestCase(_AsLong(list(shape)), list(indices))
@test_util.run_deprecated_v1
def testSubRandom(self):
# Random shapes of rank 4, random indices
for _ in range(5):
shape = np.random.randint(1, 20, size=4)
indices = np.random.randint(shape[0], size=2 * shape[0])
self._TestCase(_AsLong(list(shape)), list(indices), state_ops.scatter_sub)
@test_util.run_deprecated_v1
def testWrongShape(self):
# Indices and values mismatch.
var = variables.Variable(
array_ops.zeros(shape=[1024, 64, 64], dtype=dtypes.float32))
indices = array_ops.placeholder(dtypes.int32, shape=[32])
values = array_ops.placeholder(dtypes.float32, shape=[33, 64, 64])
with self.assertRaises(ValueError):
state_ops.scatter_add(var, indices, values)
# Var and values mismatch.
values = array_ops.placeholder(dtypes.float32, shape=[32, 64, 63])
with self.assertRaises(ValueError):
state_ops.scatter_add(var, indices, values)
def _PName(param_id):
return "p" + str(param_id)
def _EmbeddingParams(num_shards,
vocab_size,
dtype=dtypes.float32,
shape=None,
use_shapeless_placeholder=False):
p = []
params = {}
feed_dict = {}
if not shape:
shape = [10]
for i in range(num_shards):
shard_shape = [vocab_size // num_shards] + shape
if i < vocab_size % num_shards: # Excess goes evenly on the first shards
shard_shape[0] += 1
param_name = _PName(i)
if use_shapeless_placeholder:
param = array_ops.placeholder(dtype, shape=None, name=param_name)
else:
param = constant_op.constant(
1.0, shape=shard_shape, dtype=dtype, name=param_name)
p.append(param)
np_type = "f" if dtype == dtypes.float32 else "d"
val = (np.random.rand(*shard_shape).astype(np_type)) + 1
params[param_name + ":0"] = val
feed_dict[param.name] = val
return p, params, feed_dict
def _EmbeddingParamsAsPartitionedVariable(num_shards,
vocab_size,
dtype=dtypes.float32,
shape=None,
use_resource=False):
p, params, feed_dict = _EmbeddingParams(
num_shards, vocab_size, dtype=dtype, shape=shape)
shape = shape or [10]
partitioned_variable = variable_scope.get_variable(
"p",
shape=[vocab_size] + shape,
initializer=array_ops.concat([params[p_i.name] for p_i in p], 0),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=num_shards, min_slice_size=1),
use_resource=use_resource)
return p, partitioned_variable, params, feed_dict
def _EmbeddingResult(params,
id_vals,
| num_shards,
vocab_size,
partition_strategy="mod",
weight_vals=None):
if weight_vals is None:
weight_vals = np.copy(id_vals)
weight_vals.fill(1)
values = []
weights = []
weights_squared = []
for ids, wts in zip(id_vals, weight_vals):
value_aggregation = None
weight_aggregation = None
squared_weight_aggregation = None
if isinstance(ids, compat.integral_types):
ids = [ids]
wts = [wt | s]
for i, weight_value in zip(ids, wts):
if partition_strategy == "mod":
val = np.copy(params[_PName(i % num_shards) + ":0"][
i // num_shards, :]) * weight_value
elif partition_strategy == "div":
ids_per_partition, extras = divmod(vocab_size, num_shards)
threshold = extras * (ids_per_partition + 1)
if i < threshold:
partition = i // (ids_per_partition + 1)
offset = i % (ids_per_partition + 1)
else:
partition = extras + (i - threshold) // ids_per_partition
offset = (i - threshold) % ids_per_partition
val = np.co |
cloudera/hue | desktop/core/src/desktop/api_public_urls.py | Python | apache-2.0 | 7,696 | 0.007147 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from desktop import api_public
from desktop.lib.botserver import api as botserver_api
if sys.version_info[0] > 2:
from django.urls import re_path
else:
from django | .conf.urls import url as re_path
# "New" query API (i.e. connector based, lean arguments).
# e.g. https://demo.gethue.com/api/query/execute/ | hive
urlpatterns = [
re_path(r'^query/create_notebook/?$', api_public.create_notebook, name='query_create_notebook'),
re_path(r'^query/autocomplete/?$', api_public.autocomplete, name='query_autocomplete_databases'),
]
# Compatibility with "old" private API.
# e.g. https://demo.gethue.com/notebook/api/execute/hive
urlpatterns += [
re_path(r'^get_config/?$', api_public.get_config),
re_path(r'^get_namespaces/(?P<interface>[\w\-]+)/?$', api_public.get_context_namespaces), # To remove
re_path(r'^editor/create_notebook/?$', api_public.create_notebook, name='editor_create_notebook'),
re_path(r'^editor/create_session/?$', api_public.create_session, name='editor_create_session'),
re_path(r'^editor/close_session/?$', api_public.close_session, name='editor_close_session'),
re_path(r'^editor/execute(?:/(?P<dialect>.+))?/?$', api_public.execute, name='editor_execute'),
re_path(r'^editor/check_status/?$', api_public.check_status, name='editor_check_status'),
re_path(r'^editor/fetch_result_data/?$', api_public.fetch_result_data, name='editor_fetch_result_data'),
re_path(r'^editor/fetch_result_metadata/?$', api_public.fetch_result_metadata, name='editor_fetch_result_metadata'),
re_path(r'^editor/fetch_result_size/?$', api_public.fetch_result_size, name='editor_fetch_result_size'),
re_path(r'^editor/cancel_statement/?$', api_public.cancel_statement, name='editor_cancel_statement'),
re_path(r'^editor/close_statement/?$', api_public.close_statement, name='editor_close_statement'),
re_path(r'^editor/get_logs/?$', api_public.get_logs, name='editor_get_logs'),
re_path(r'^editor/get_history/?', api_public.get_history, name='editor_get_history'),
re_path(r'^editor/describe/(?P<database>[^/]*)/?$', api_public.describe, name='editor_describe_database'),
re_path(r'^editor/describe/(?P<database>[^/]*)/(?P<table>[\w_\-]+)/?$', api_public.describe, name='editor_describe_table'),
re_path(
r'^editor/describe/(?P<database>[^/]*)/(?P<table>\w+)/stats(?:/(?P<column>\w+))?/?$',
api_public.describe,
name='editor_describe_column'
),
re_path(r'^editor/autocomplete/?$', api_public.autocomplete, name='editor_autocomplete_databases'),
re_path(
r"^editor/autocomplete/(?P<database>[^/?]*)/?$",
api_public.autocomplete,
name="editor_autocomplete_tables",
),
re_path(
r"^editor/autocomplete/(?P<database>[^/?]*)/(?P<table>[\w_\-]+)/?$",
api_public.autocomplete,
name="editor_autocomplete_columns",
),
re_path(
r"^editor/autocomplete/(?P<database>[^/?]*)/(?P<table>[\w_\-]+)/(?P<column>\w+)/?$",
api_public.autocomplete,
name="editor_autocomplete_column",
),
re_path(
r"^editor/autocomplete/(?P<database>[^/?]*)/(?P<table>[\w_\-]+)/(?P<column>\w+)/(?P<nested>.+)/?$",
api_public.autocomplete,
name="editor_autocomplete_nested",
),
re_path(r'^editor/sample/(?P<database>[^/?]*)/(?P<table>[\w_\-]+)/?$', api_public.get_sample_data, name='editor_sample_data'),
re_path(
r'^editor/sample/(?P<database>[^/?]*)/(?P<table>[\w_\-]+)/(?P<column>\w+)/?$',
api_public.get_sample_data,
name='editor_sample_data_column'
),
]
urlpatterns += [
re_path(r'^storage/get_filesystems/?$', api_public.storage_get_filesystems, name='storage_get_filesystems'),
re_path(r'^storage/view=(?P<path>.*)$', api_public.storage_view, name='storage_view'),
re_path(r'^storage/download=(?P<path>.*)$', api_public.storage_download, name='storage_download'),
re_path(r'^storage/upload/file/?$', api_public.storage_upload_file, name='storage_upload_file'),
]
urlpatterns += [
re_path(
r'^(?P<dialect>.+)/analyze/(?P<database>\w+)/(?P<table>\w+)(?:/(?P<columns>\w+))?/?$',
api_public.analyze_table,
name='dialect_analyze_table'
),
]
# Slack install API for using CORS by default
urlpatterns += [
re_path(r'^slack/install/?$', botserver_api.generate_slack_install_link, name='botserver.api.slack_install_link'),
]
urlpatterns += [
re_path(r'^indexer/guess_format/?$', api_public.guess_format, name='indexer_guess_format'),
re_path(r'^indexer/guess_field_types/?$', api_public.guess_field_types, name='indexer_guess_field_types'),
re_path(r'^indexer/importer/submit', api_public.importer_submit, name='indexer_importer_submit'),
]
urlpatterns += [
re_path(r'^connector/types/?$', api_public.get_connector_types, name='connector_get_types'),
re_path(r'^connector/instances/?$', api_public.get_connectors_instances, name='connector_get_instances'),
re_path(r'^connector/instance/new/(?P<dialect>[\w\-]+)/(?P<interface>[\w\-]+)$', api_public.new_connector, name='connector_new'),
re_path(r'^connector/instance/get/(?P<id>\d+)$', api_public.get_connector, name='connector_get'),
re_path(r'^connector/instance/delete/?$', api_public.delete_connector, name='connector_delete'),
re_path(r'^connector/instance/update/?$', api_public.update_connector, name='connector_update'),
re_path(r'^connector/instance/test/?$', api_public.test_connector, name='connector_test'),
re_path(r'^connector/examples/install/?$', api_public.install_connector_examples, name='connector_install_examples'),
]
urlpatterns += [
re_path(r'^optimizer/top_databases/?$', api_public.top_databases, name='optimizer_top_databases'),
re_path(r'^optimizer/top_tables/?$', api_public.top_tables, name='optimizer_top_tables'),
re_path(r'^optimizer/top_columns/?$', api_public.top_columns, name='optimizer_top_columns'),
re_path(r'^optimizer/top_joins/?$', api_public.top_joins, name='optimizer_top_joins'),
re_path(r'^optimizer/top_filters/?$', api_public.top_filters, name='optimizer_top_filters'),
re_path(r'^optimizer/top_aggs/?$', api_public.top_aggs, name='optimizer_top_aggs'),
re_path(r'^optimizer/query_risk/?$', api_public.query_risk, name='optimizer_query_risk'),
re_path(r'^optimizer/predict/?$', api_public.predict, name='optimizer_predict'),
re_path(r'^optimizer/query_compatibility/?$', api_public.query_compatibility, name='optimizer_query_compatibility'),
re_path(r'^optimizer/similar_queries/?$', api_public.similar_queries, name='optimizer_similar_queries'),
]
urlpatterns += [
re_path(r'^metadata/search/entities_interactive/?$', api_public.search_entities_interactive, name='metadata_entities_interactive'),
]
urlpatterns += [
re_path(r'^iam/users/autocomplete', api_public.list_for_autocomplete, name='iam_users_list_for_autocomplete'),
re_path(r'^iam/users/?$', api_public.get_users_by_id, name='iam_get_users_by_id'),
re_path(r'^iam/get_users/?', api_public.get_users, name='iam_get_users'),
] |
subramani95/neutron | neutron/tests/unit/vmware/nsxlib/test_router.py | Python | apache-2.0 | 45,472 | 0.000044 | # Copyright (c) 2014 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import mock
from oslo.config import cfg
from neutron.common import exceptions
from neutron.openstack.common import uuidutils
from neutron.plu | gins.vmware.api_client import exception as api_exc
from neutron.plugins.vmware.api_client import version as version_module
from neutron.plugins.vmware.common import exceptions as nsx_exc
from neutron.plugins.vmware.common import utils
from neutron.plugins.vmware import nsxlib
from neutron.plugins.vmware.nsxlib import router as routerlib
from neutron.plugins.vmware.nsxlib import switch as switchlib
from neutron.tests.unit import test_api_v2
from neutron.tests.unit. | vmware.nsxlib import base
_uuid = test_api_v2._uuid
class TestNatRules(base.NsxlibTestCase):
def _test_create_lrouter_dnat_rule(self, version):
with mock.patch.object(self.fake_cluster.api_client,
'get_version',
new=lambda: version_module.Version(version)):
tenant_id = 'pippo'
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
tenant_id,
'fake_router',
'192.168.0.1')
nat_rule = routerlib.create_lrouter_dnat_rule(
self.fake_cluster, lrouter['uuid'], '10.0.0.99',
match_criteria={'destination_ip_addresses':
'192.168.0.5'})
uri = nsxlib._build_uri_path(routerlib.LROUTERNAT_RESOURCE,
nat_rule['uuid'],
lrouter['uuid'])
resp_obj = nsxlib.do_request("GET", uri, cluster=self.fake_cluster)
self.assertEqual('DestinationNatRule', resp_obj['type'])
self.assertEqual('192.168.0.5',
resp_obj['match']['destination_ip_addresses'])
def test_create_lrouter_dnat_rule_v2(self):
self._test_create_lrouter_dnat_rule('2.9')
def test_create_lrouter_dnat_rule_v31(self):
self._test_create_lrouter_dnat_rule('3.1')
class TestExplicitLRouters(base.NsxlibTestCase):
def setUp(self):
self.fake_version = '3.2'
super(TestExplicitLRouters, self).setUp()
def _get_lrouter(self, tenant_id, router_name, router_id, relations=None):
schema = '/ws.v1/schema/RoutingTableRoutingConfig'
router = {'display_name': router_name,
'uuid': router_id,
'tags': utils.get_tags(os_tid=tenant_id),
'distributed': False,
'routing_config': {'type': 'RoutingTableRoutingConfig',
'_schema': schema},
'_schema': schema,
'nat_synchronization_enabled': True,
'replication_mode': 'service',
'type': 'LogicalRouterConfig',
'_href': '/ws.v1/lrouter/%s' % router_id, }
if relations:
router['_relations'] = relations
return router
def _get_single_route(self, router_id, route_id='fake_route_id_0',
prefix='0.0.0.0/0', next_hop_ip='1.1.1.1'):
return {'protocol': 'static',
'_href': '/ws.v1/lrouter/%s/rib/%s' % (router_id, route_id),
'prefix': prefix,
'_schema': '/ws.v1/schema/RoutingTableEntry',
'next_hop_ip': next_hop_ip,
'action': 'accept',
'uuid': route_id}
def test_prepare_body_with_implicit_routing_config(self):
router_name = 'fake_router_name'
tenant_id = 'fake_tenant_id'
neutron_router_id = 'pipita_higuain'
router_type = 'SingleDefaultRouteImplicitRoutingConfig'
route_config = {
'default_route_next_hop': {'gateway_ip_address': 'fake_address',
'type': 'RouterNextHop'}, }
body = routerlib._prepare_lrouter_body(router_name, neutron_router_id,
tenant_id, router_type,
**route_config)
expected = {'display_name': 'fake_router_name',
'routing_config': {
'default_route_next_hop':
{'gateway_ip_address': 'fake_address',
'type': 'RouterNextHop'},
'type': 'SingleDefaultRouteImplicitRoutingConfig'},
'tags': utils.get_tags(os_tid='fake_tenant_id',
q_router_id='pipita_higuain'),
'type': 'LogicalRouterConfig',
'replication_mode': cfg.CONF.NSX.replication_mode}
self.assertEqual(expected, body)
def test_prepare_body_without_routing_config(self):
router_name = 'fake_router_name'
tenant_id = 'fake_tenant_id'
neutron_router_id = 'marekiaro_hamsik'
router_type = 'RoutingTableRoutingConfig'
body = routerlib._prepare_lrouter_body(router_name, neutron_router_id,
tenant_id, router_type)
expected = {'display_name': 'fake_router_name',
'routing_config': {'type': 'RoutingTableRoutingConfig'},
'tags': utils.get_tags(os_tid='fake_tenant_id',
q_router_id='marekiaro_hamsik'),
'type': 'LogicalRouterConfig',
'replication_mode': cfg.CONF.NSX.replication_mode}
self.assertEqual(expected, body)
def test_get_lrouter(self):
tenant_id = 'fake_tenant_id'
router_name = 'fake_router_name'
router_id = 'fake_router_id'
relations = {
'LogicalRouterStatus':
{'_href': '/ws.v1/lrouter/%s/status' % router_id,
'lport_admin_up_count': 1,
'_schema': '/ws.v1/schema/LogicalRouterStatus',
'lport_count': 1,
'fabric_status': True,
'type': 'LogicalRouterStatus',
'lport_link_up_count': 0, }, }
with mock.patch.object(nsxlib, 'do_request',
return_value=self._get_lrouter(tenant_id,
router_name,
router_id,
relations)):
lrouter = routerlib.get_lrouter(self.fake_cluster, router_id)
self.assertTrue(
lrouter['_relations']['LogicalRouterStatus']['fabric_status'])
def test_create_lrouter(self):
tenant_id = 'fake_tenant_id'
router_name = 'fake_router_name'
router_id = 'fake_router_id'
nexthop_ip = '10.0.0.1'
with mock.patch.object(
nsxlib, 'do_request',
return_value=self._get_lrouter(tenant_id,
router_name,
router_id)):
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
tenant_id,
router_name, nexthop_ip)
self.assertEqual(lrouter['routing_config']['type'],
'RoutingTableRoutingConfig')
self.assertNotI |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.