text stringlengths 4 1.02M | meta dict |
|---|---|
import platform
import re
# Platform identification constants.
UNKNOWN = 0
RASPBERRY_PI = 1
BEAGLEBONE_BLACK = 2
MINNOWBOARD = 3
def platform_detect():
"""Detect if running on the Raspberry Pi or Beaglebone Black and return the
platform type. Will return RASPBERRY_PI, BEAGLEBONE_BLACK, or UNKNOWN."""
# Handle Raspberry Pi
pi = pi_version()
if pi is not None:
return RASPBERRY_PI
# Handle Beaglebone Black
# TODO: Check the Beaglebone Black /proc/cpuinfo value instead of reading
# the platform.
plat = platform.platform()
if plat.lower().find('armv7l-with-debian') > -1:
return BEAGLEBONE_BLACK
elif plat.lower().find('armv7l-with-ubuntu') > -1:
return BEAGLEBONE_BLACK
elif plat.lower().find('armv7l-with-glibc2.4') > -1:
return BEAGLEBONE_BLACK
# Handle Minnowboard
# Assumption is that mraa is installed
try:
import mraa
if mraa.getPlatformName()=='MinnowBoard MAX' or mraa.getPlatformName()=='MinnowBoard Compatible':
return MINNOWBOARD
except ImportError:
pass
# Couldn't figure out the platform, just return unknown.
return UNKNOWN
def pi_revision():
"""Detect the revision number of a Raspberry Pi, useful for changing
functionality like default I2C bus based on revision."""
# Revision list available at: http://elinux.org/RPi_HardwareHistory#Board_Revision_History
with open('/proc/cpuinfo', 'r') as infile:
for line in infile:
# Match a line of the form "Revision : 0002" while ignoring extra
# info in front of the revsion (like 1000 when the Pi was over-volted).
match = re.match('Revision\s+:\s+.*(\w{4})$', line, flags=re.IGNORECASE)
if match and match.group(1) in ['0000', '0002', '0003']:
# Return revision 1 if revision ends with 0000, 0002 or 0003.
return 1
elif match:
# Assume revision 2 if revision ends with any other 4 chars.
return 2
# Couldn't find the revision, throw an exception.
raise RuntimeError('Could not determine Raspberry Pi revision.')
def pi_version():
"""Detect the version of the Raspberry Pi. Returns either 1, 2 or
None depending on if it's a Raspberry Pi 1 (model A, B, A+, B+),
Raspberry Pi 2 (model B+), or not a Raspberry Pi.
"""
# Check /proc/cpuinfo for the Hardware field value.
# 2708 is pi 1
# 2709 is pi 2
# Anything else is not a pi.
with open('/proc/cpuinfo', 'r') as infile:
cpuinfo = infile.read()
# Match a line like 'Hardware : BCM2709'
match = re.search('^Hardware\s+:\s+(\w+)$', cpuinfo,
flags=re.MULTILINE | re.IGNORECASE)
if not match:
# Couldn't find the hardware, assume it isn't a pi.
return None
if match.group(1) == 'BCM2708':
# Pi 1
return 1
elif match.group(1) == 'BCM2709':
# Pi 2
return 2
else:
# Something else, not a pi.
return None
| {
"content_hash": "7b3c52079db3264f852ce39893ab771c",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 105,
"avg_line_length": 36.174418604651166,
"alnum_prop": 0.6174863387978142,
"repo_name": "steelee/minnow_max_maker",
"id": "a97030938b35ad092b2493a9c39f794cfa45357f",
"size": "4228",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Adafruit_Python_GPIO/Adafruit_GPIO/Platform.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2006"
},
{
"name": "Makefile",
"bytes": "336"
},
{
"name": "Python",
"bytes": "203960"
},
{
"name": "Shell",
"bytes": "3417"
}
],
"symlink_target": ""
} |
try:
from itertools import izip as zip
except ImportError: # will be 3.x series
pass
from collections import Counter
import subprocess
import logging
def grouper(iterable, n, padvalue=None):
"""
Group interable into chunks of size n
Removes last group if shorter than n
grouper('abcdefg', 3) --> (('a','b','c'), ('d','e','f'))
"""
return zip(*[iter(iterable)]*n)
def most_common(values):
total = len(values)
most_common_list = Counter(values).most_common()
most_common_list = [
(pair[0], round(100*pair[1]/float(total)))
for pair in most_common_list
]
most_common = most_common_list[0][0]
return most_common, most_common_list
def remove_silence(input_path, output_path, block=True):
logging.info('Removing silence from: ' + str(input_path) + ' to: ' + str(output_path) + '\n')
sox_args = ['sox', input_path, '-c', '1', output_path, 'silence', '1', '0.1', '0.1%', '-1', '0.1', '0.1%']
process_handle = subprocess.Popen(sox_args, stderr=subprocess.PIPE)
if block:
process_handle.communicate()
return output_path
# This is no longer needed
# def load_audio(object_or_path):
# try:
# audio, sr = librosa.load(object_or_path)
# except:
# audio, sr = parse_audio(object_or_path)
# return audio, sr
#
#
# def parse_audio(file_object, sr=22050, mono=True, offset=0.0, duration=None,
# dtype=np.float32):
# """Load an audio file object as a floating point time series.
# Imitates librosa's load function, but for already opened file objects
#
# Parameters
# ----------
# file_object : opened file object
# Any format supported by `audioread` will work.
# sr : number > 0 [scalar]
# target sampling rate
# 'None' uses the native sampling rate
# mono : bool
# convert signal to mono
# offset : float
# start reading after this time (in seconds)
# duration : float
# only load up to this much audio (in seconds)
# dtype : numeric type
# data type of `y`
# Returns
# -------
# y : np.ndarray [shape=(n,) or (2, n)]
# audio time series
# sr : number > 0 [scalar]
# sampling rate of `y`
# """
#
# import audioread_object
# y = []
# with audioread_object.audio_open(file_object) as input_file:
# sr_native = input_file.samplerate
#
# s_start = int(np.round(sr_native * offset)) * input_file.channels
#
# if duration is None:
# s_end = np.inf
# else:
# s_end = s_start + (int(np.round(sr_native * duration))
# * input_file.channels)
#
# n = 0
#
# for frame in input_file:
# frame = librosa.util.buf_to_float(frame, dtype=dtype)
# n_prev = n
# n = n + len(frame)
#
# if n < s_start:
# # offset is after the current frame
# # keep reading
# continue
#
# if s_end < n_prev:
# # we're off the end. stop reading
# break
#
# if s_end < n:
# # the end is in this frame. crop.
# frame = frame[:s_end - n_prev]
#
# if n_prev <= s_start <= n:
# # beginning is in this frame
# frame = frame[(s_start - n_prev):]
#
# # tack on the current frame
# y.append(frame)
#
# if y:
# y = np.concatenate(y)
#
# if input_file.channels > 1:
# y = y.reshape((-1, 2)).T
# if mono:
# y = librosa.to_mono(y)
#
# if sr is not None:
# if y.ndim > 1:
# y = np.vstack([librosa.resample(yi, sr_native, sr) for yi in y])
# else:
# y = librosa.resample(y, sr_native, sr)
#
# else:
# sr = sr_native
#
# # Final cleanup for dtype and contiguity
# y = np.ascontiguousarray(y, dtype=dtype)
#
# return (y, sr)
| {
"content_hash": "31b53a3376b9a475b99cc679018e11f4",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 110,
"avg_line_length": 30.03676470588235,
"alnum_prop": 0.5263157894736842,
"repo_name": "Curly-Mo/mir-tools",
"id": "2795a50c3315faa0bdef2f842b0a837c387f49fd",
"size": "4085",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33334"
}
],
"symlink_target": ""
} |
"""A set of common utilities used within the environments. These are
not intended as API functions, and will not remain stable over time.
"""
# These submodules should not have any import-time dependencies.
# We want this since we use `utils` during our import-time sanity checks
# that verify that our dependencies are actually present.
from .colorize import colorize
from .ezpickle import EzPickle
from .reraise import reraise
| {
"content_hash": "238879b630095af788e73c76adaa0820",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 72,
"avg_line_length": 43,
"alnum_prop": 0.7930232558139535,
"repo_name": "hparik11/Deep-Learning-Nanodegree-Foundation-Repository",
"id": "6d6aa82efb71d0d84bc742ffa6747697d8490d5b",
"size": "430",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "reinforcement/gym/gym/utils/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3994519"
},
{
"name": "Jupyter Notebook",
"bytes": "26097389"
},
{
"name": "Makefile",
"bytes": "461"
},
{
"name": "Python",
"bytes": "651374"
},
{
"name": "Shell",
"bytes": "711"
}
],
"symlink_target": ""
} |
import asyncio
from pytest import fixture
from analyzer.storages import BaseStorage
LOOP = asyncio.get_event_loop()
class SimpleObj:
def __init__(self, key, unique, not_unique):
self.key = key
self.unique = unique
self.not_unique = not_unique
def __eq__(self, other):
return self.key == other.key and self.unique == other.unique and self.not_unique == other.not_unique
class Storage(BaseStorage):
collection_name = "testcollection"
indexes = (
{
"keys": "key",
"name": "by key",
"unique": True,
},
{
"keys": "unique",
"name": "by unique",
"unique": True,
},
{
"keys": "not_unique",
"name": "by not_unique",
"unique": False,
},
)
id_field = "key"
domain_class = SimpleObj
@fixture
def test_storage(config, event_loop):
storage = Storage(db_name=config["db_name"],
host=config["host"],
port=config["port"],
loop=event_loop)
event_loop.run_until_complete(storage.connect())
yield storage
event_loop.run_until_complete(storage.drop_collection())
| {
"content_hash": "3768d7ab52d11388d2db785f91a41d47",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 108,
"avg_line_length": 24.50980392156863,
"alnum_prop": 0.5336,
"repo_name": "peterVorman/apartments-analyzer",
"id": "01bd792f79a17bb9052afa1c8134ebdc3fc256df",
"size": "1250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integrations/conftest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24911"
},
{
"name": "Shell",
"bytes": "289"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from tapi_server.models.base_model_ import Model
from tapi_server.models.tapi_common_time_period import TapiCommonTimePeriod # noqa: F401,E501
from tapi_server import util
class TapiCommonTimeInterval(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, period=None): # noqa: E501
"""TapiCommonTimeInterval - a model defined in OpenAPI
:param period: The period of this TapiCommonTimeInterval. # noqa: E501
:type period: List[TapiCommonTimePeriod]
"""
self.openapi_types = {
'period': List[TapiCommonTimePeriod]
}
self.attribute_map = {
'period': 'period'
}
self._period = period
@classmethod
def from_dict(cls, dikt) -> 'TapiCommonTimeInterval':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The tapi.common.TimeInterval of this TapiCommonTimeInterval. # noqa: E501
:rtype: TapiCommonTimeInterval
"""
return util.deserialize_model(dikt, cls)
@property
def period(self):
"""Gets the period of this TapiCommonTimeInterval.
none # noqa: E501
:return: The period of this TapiCommonTimeInterval.
:rtype: List[TapiCommonTimePeriod]
"""
return self._period
@period.setter
def period(self, period):
"""Sets the period of this TapiCommonTimeInterval.
none # noqa: E501
:param period: The period of this TapiCommonTimeInterval.
:type period: List[TapiCommonTimePeriod]
"""
self._period = period
| {
"content_hash": "72842bf52a5fe0f3c6113729c1b54173",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 96,
"avg_line_length": 28.630769230769232,
"alnum_prop": 0.6389038151531434,
"repo_name": "OpenNetworkingFoundation/ONFOpenTransport",
"id": "3a62ea24a4bcb659a4c6a93b161cf1237a354e87",
"size": "1878",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "RI/flask_server/tapi_server/models/tapi_common_time_interval.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "D",
"bytes": "2562"
}
],
"symlink_target": ""
} |
from copy import deepcopy
from django.utils import simplejson
__all__ = ['hungarian_to_python', 'lookupattr']
def dumpjs(obj, *args, **kwargs):
"""Dump a Python object as Javascript, with support for a __json__ method."""
class Encoder(simplejson.JSONEncoder):
def iterencode(self, o, _one_shot=False):
#print o
if hasattr(o, '__json__'):
if callable(o.__json__):
print 'foo', o
return o.__json__()
else:
return o.__json__
else:
return super(Encoder, self).iterencode(o)#, _one_shot=_one_shot)
kwargs['cls'] = Encoder
kwargs['sort_keys'] = True
#return simplejson.dumps(obj, *args, **kwargs)
output = simplejson.dumps(obj, *args, **kwargs)
for key, val in obj.items():
if 'fn' == key[0:2]:
output = output.replace(simplejson.dumps(val), val)
return output
class fn(object):
"""Wrapper for a Javascript function that should be encoded without escaping."""
def __init__(self, fndef):
self.fndef = unicode(fndef)
def __getattr__(self, name):
return getattr(self.fndef, name)
def __repr__(self):
return 'fn(%r)' % self.fndef
def __unicode__(self):
return self.fndef
def __json__(self):
return unicode(self.fndef)
def __deepcopy__(self, memo):
return deepcopy(self.fndef, memo)
#return self.__class__(deepcopy(self.fndef, memo))
def hungarian_to_python(name, value):
"""Validate DataTable options specified in Hungarian notation."""
if value is None:
return value
elif name.startswith('fn') and name[2].isupper():
return fn(value)
elif name.startswith('n') and name[1].isupper():
return value
elif name.startswith('m') and name[1].isupper():
return value
elif name.startswith('o') and name[1].isupper():
d = {}
for k, v in dict(value).iteritems():
d[k] = hungarian_to_python(k, v)
return d
elif name.startswith('a') and name[1].isupper():
return list(value)
elif name.startswith('a') and name[1] in 'abfimnos' and name[2].isupper():
a = []
for i in list(value):
a.append(hungarian_to_python(name[1:], i))
return a
elif name.startswith('s') and name[1].isupper():
return unicode(value)
elif name.startswith('b') and name[1].isupper():
return bool(str(value).lower() in ('t', 'true', 'yes', 'y', '1'))
elif name.startswith('f') and name[1].isupper():
return float(value)
elif name.startswith('i') and name[1].isupper():
return int(value)
else:
raise NameError, 'name "%s" is not in hungarian notation' % name
def lookupattr(obj, name, default=None):
"""Recursively lookup an attribute or key on an object."""
name = name.replace('__', '.')
for element in name.split('.'):
try:
attr = obj.__getattribute__(element)
except AttributeError:
try:
attr = obj.__dict__[element]
except (KeyError, AttributeError):
try:
attr = obj[element]
except (KeyError, TypeError):
attr = default
break
except:
attr = default
break
if callable(attr):
obj = attr()
else:
obj = attr
if callable(attr):
return attr()
else:
return attr
| {
"content_hash": "1f34777ab8a85ca3c3d1a26157dbe0e5",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 84,
"avg_line_length": 34.990196078431374,
"alnum_prop": 0.5508545811151583,
"repo_name": "stavrik/test",
"id": "192bc136739a529f1ba5d91161a07603715e8c40",
"size": "3578",
"binary": false,
"copies": "1",
"ref": "refs/heads/migrate",
"path": "core/datatables/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "10020"
},
{
"name": "JavaScript",
"bytes": "514966"
},
{
"name": "Python",
"bytes": "831936"
}
],
"symlink_target": ""
} |
from os.path import abspath
import urlparse
from urlparse import urlsplit
from coherence.extern.et import parse_xml as et_parse_xml
from coherence import SERVER_ID
from twisted.web import server, http, static
from twisted.web import client, error
from twisted.web import proxy, resource, server
from twisted.internet import reactor, protocol, defer, abstract
from twisted.python import failure
from twisted.python.util import InsensitiveDict
try:
from twisted.protocols._c_urlarg import unquote
except ImportError:
from urllib import unquote
try:
import netifaces
have_netifaces = True
except ImportError:
have_netifaces = False
def means_true(value):
if isinstance(value, basestring):
value = value.lower()
return value in [True, 1, '1', 'true', 'yes', 'ok']
def generalise_boolean(value):
""" standardize the different boolean incarnations
transform anything that looks like a "True" into a '1',
and everything else into a '0'
"""
if means_true(value):
return '1'
return '0'
generalize_boolean = generalise_boolean
def parse_xml(data, encoding="utf-8"):
return et_parse_xml(data, encoding)
def parse_http_response(data):
""" don't try to get the body, there are reponses without """
header = data.split('\r\n\r\n')[0]
lines = header.split('\r\n')
cmd = lines[0].split(' ')
lines = map(lambda x: x.replace(': ', ':', 1), lines[1:])
lines = filter(lambda x: len(x) > 0, lines)
headers = [x.split(':', 1) for x in lines]
headers = dict(map(lambda x: (x[0].lower(), x[1]), headers))
return cmd, headers
def get_ip_address(ifname):
"""
determine the IP address by interface name
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/439094
(c) Paul Cannon
Uses the Linux SIOCGIFADDR ioctl to find the IP address associated
with a network interface, given the name of that interface, e.g. "eth0".
The address is returned as a string containing a dotted quad.
Updated to work on BSD. OpenBSD and OSX share the same value for
SIOCGIFADDR, and its likely that other BSDs do too.
Updated to work on Windows,
using the optional Python module netifaces
http://alastairs-place.net/netifaces/
Thx Lawrence for that patch!
"""
if have_netifaces:
if ifname in netifaces.interfaces():
iface = netifaces.ifaddresses(ifname)
ifaceadr = iface[netifaces.AF_INET]
# we now have a list of address dictionaries, there may be multiple addresses bound
return ifaceadr[0]['addr']
import sys
if sys.platform in ('win32', 'sunos5'):
return '127.0.0.1'
from os import uname
import socket
import fcntl
import struct
system_type = uname()[0]
if system_type == "Linux":
SIOCGIFADDR = 0x8915
else:
SIOCGIFADDR = 0xc0206921
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
SIOCGIFADDR,
struct.pack('256s', ifname[:15])
)[20:24])
except:
return '127.0.0.1'
def get_host_address():
""" try to get determine the interface used for
the default route, as this is most likely
the interface we should bind to (on a single homed host!)
"""
import sys
if sys.platform == 'win32':
if have_netifaces:
interfaces = netifaces.interfaces()
if len(interfaces):
return get_ip_address(interfaces[0]) # on windows assume first interface is primary
else:
try:
route_file = '/proc/net/route'
route = open(route_file)
if(route):
tmp = route.readline() # skip first line
while (tmp != ''):
tmp = route.readline()
l = tmp.split('\t')
if (len(l) > 2):
if l[1] == '00000000': # default route...
route.close()
return get_ip_address(l[0])
except IOError, msg:
""" fallback to parsing the output of netstat """
from twisted.internet import utils
def result(r):
from os import uname
(osname, _, _, _, _) = uname()
osname = osname.lower()
lines = r.split('\n')
for l in lines:
l = l.strip(' \r\n')
parts = [x.strip() for x in l.split(' ') if len(x) > 0]
if parts[0] in ('0.0.0.0', 'default'):
if osname[:6] == 'darwin':
return get_ip_address(parts[5])
else:
return get_ip_address(parts[-1])
return '127.0.0.1'
def fail(f):
return '127.0.0.1'
d = utils.getProcessOutput('netstat', ['-rn'])
d.addCallback(result)
d.addErrback(fail)
return d
except Exception, msg:
import traceback
traceback.print_exc()
""" return localhost if we haven't found anything """
return '127.0.0.1'
def de_chunk_payload(response):
try:
import cStringIO as StringIO
except ImportError:
import StringIO
""" This method takes a chunked HTTP data object and unchunks it."""
newresponse = StringIO.StringIO()
# chunked encoding consists of a bunch of lines with
# a length in hex followed by a data chunk and a CRLF pair.
response = StringIO.StringIO(response)
def read_chunk_length():
line = response.readline()
try:
len = int(line.strip(), 16)
except ValueError:
len = 0
return len
len = read_chunk_length()
while (len > 0):
newresponse.write(response.read(len))
line = response.readline() # after chunk and before next chunk length
len = read_chunk_length()
return newresponse.getvalue()
class Request(server.Request):
def process(self):
"Process a request."
# get site from channel
self.site = self.channel.site
# set various default headers
self.setHeader('server', SERVER_ID)
self.setHeader('date', http.datetimeToString())
self.setHeader('content-type', "text/html")
# Resource Identification
url = self.path
#remove trailing "/", if ever
url = url.rstrip('/')
scheme, netloc, path, query, fragment = urlsplit(url)
self.prepath = []
if path == "":
self.postpath = []
else:
self.postpath = map(unquote, path[1:].split('/'))
try:
def deferred_rendering(r):
self.render(r)
resrc = self.site.getResourceFor(self)
if resrc is None:
self.setResponseCode(http.NOT_FOUND, "Error: No resource for path %s" % path)
self.finish()
elif isinstance(resrc, defer.Deferred):
resrc.addCallback(deferred_rendering)
resrc.addErrback(self.processingFailed)
else:
self.render(resrc)
except:
self.processingFailed(failure.Failure())
class Site(server.Site):
noisy = False
requestFactory = Request
def startFactory(self):
pass
#http._logDateTimeStart()
class ProxyClient(proxy.ProxyClient):
def __init__(self, command, rest, version, headers, data, father):
log.Loggable.__init__(self)
#headers["connection"] = "close"
self.send_data = 0
web.ProxyClient.__init__(self, command, rest, version,
headers, data, father)
def handleStatus(self, version, code, message):
if message:
# Add a whitespace to message, this allows empty messages
# transparently
message = " %s" % (message, )
if version == 'ICY':
version = 'HTTP/1.1'
web.ProxyClient.handleStatus(self, version, code, message)
def handleHeader(self, key, value):
if not key.startswith('icy-'):
web.ProxyClient.handleHeader(self, key, value)
def handleResponsePart(self, buffer):
self.send_data += len(buffer)
web.ProxyClient.handleResponsePart(self, buffer)
class ProxyClientFactory(proxy.ProxyClientFactory):
# :fixme: Why here proxy.ProxyClient is used instad of our own
# ProxyClent? Is out ProxyClient used at all?
protocol = proxy.ProxyClient
class ReverseProxyResource(proxy.ReverseProxyResource):
"""
Resource that renders the results gotten from another server
Put this resource in the tree to cause everything below it to be relayed
to a different server.
@ivar proxyClientFactoryClass: a proxy client factory class, used to create
new connections.
@type proxyClientFactoryClass: L{ClientFactory}
@ivar reactor: the reactor used to create connections.
@type reactor: object providing L{twisted.internet.interfaces.IReactorTCP}
"""
proxyClientFactoryClass = ProxyClientFactory
def __init__(self, host, port, path, reactor=reactor):
"""
@param host: the host of the web server to proxy.
@type host: C{str}
@param port: the port of the web server to proxy.
@type port: C{port}
@param path: the base path to fetch data from. Note that you shouldn't
put any trailing slashes in it, it will be added automatically in
request. For example, if you put B{/foo}, a request on B{/bar} will
be proxied to B{/foo/bar}.
@type path: C{str}
"""
resource.Resource.__init__(self)
self.host = host
self.port = port
self.path = path
self.qs = ''
self.reactor = reactor
def getChild(self, path, request):
return ReverseProxyResource(
self.host, self.port, self.path + '/' + path)
def render(self, request):
"""
Render a request by forwarding it to the proxied server.
"""
# RFC 2616 tells us that we can omit the port if it's the default port,
# but we have to provide it otherwise
if self.port == 80:
request.received_headers['host'] = self.host
else:
request.received_headers['host'] = "%s:%d" % (self.host, self.port)
request.content.seek(0, 0)
qs = urlparse.urlparse(request.uri)[4]
if qs == '':
qs = self.qs
if qs:
rest = self.path + '?' + qs
else:
rest = self.path
clientFactory = self.proxyClientFactoryClass(
request.method, rest, request.clientproto,
request.getAllHeaders(), request.content.read(), request)
self.reactor.connectTCP(self.host, self.port, clientFactory)
return server.NOT_DONE_YET
def resetTarget(self, host, port, path, qs=''):
self.host = host
self.port = port
self.path = path
self.qs = qs
class ReverseProxyUriResource(ReverseProxyResource):
uri = None
def __init__(self, uri, reactor=reactor):
self.uri = uri
_, host_port, path, params, _ = urlsplit(uri)
if host_port.find(':') != -1:
host, port = tuple(host_port.split(':'))
port = int(port)
else:
host = host_port
port = 80
if path == '':
path = '/'
if params == '':
rest = path
else:
rest = '?'.join((path, params))
ReverseProxyResource.__init__(self, host, port, rest, reactor)
def resetUri (self, uri):
self.uri = uri
_, host_port, path, params, _ = urlsplit(uri)
if host_port.find(':') != -1:
host, port = tuple(host_port.split(':'))
port = int(port)
else:
host = host_port
port = 80
self.resetTarget(host, port, path, params)
# already on twisted.web since at least 8.0.0
class myHTTPPageGetter(client.HTTPPageGetter):
followRedirect = True
class HeaderAwareHTTPClientFactory(client.HTTPClientFactory):
protocol = myHTTPPageGetter
noisy = False
def buildProtocol(self, addr):
p = client.HTTPClientFactory.buildProtocol(self, addr)
p.method = self.method
p.followRedirect = self.followRedirect
return p
def page(self, page):
client.HTTPClientFactory.page(self, (page, self.response_headers))
# deprecated, do not use
# already in twisted.web since at least 1.3.0
HeaderAwareHTTPDownloader = client.HTTPDownloader
def getPage(url, contextFactory=None, *args, **kwargs):
"""
Download a web page as a string.
Download a page. Return a deferred, which will callback with a
page (as a string) or errback with a description of the error.
See HTTPClientFactory to see what extra args can be passed.
"""
# This function is like twisted.web.client.getPage, except it uses
# our HeaderAwareHTTPClientFactory instead of HTTPClientFactory
# and sets the user agent.
if 'headers' in kwargs and 'user-agent' in kwargs['headers']:
kwargs['agent'] = kwargs['headers']['user-agent']
elif not 'agent' in kwargs:
kwargs['agent'] = "Coherence PageGetter"
return client._makeGetterFactory(
url,
HeaderAwareHTTPClientFactory,
contextFactory=contextFactory,
*args, **kwargs).deferred
def downloadPage(url, file, contextFactory=None, *args, **kwargs):
"""Download a web page to a file.
@param file: path to file on filesystem, or file-like object.
See twisted.web.client.HTTPDownloader to see what extra args can
be passed.
"""
if 'headers' in kwargs and 'user-agent' in kwargs['headers']:
kwargs['agent'] = kwargs['headers']['user-agent']
elif not 'agent' in kwargs:
kwargs['agent'] = "Coherence PageGetter"
return client.downloadPage(url, file, contextFactory=contextFactory,
*args, **kwargs)
# StaticFile used to be a patched version of static.File. The later
# was fixed in TwistedWeb 8.2.0 and 9.0.0, while the patched variant
# contained deprecated and removed code.
StaticFile = static.File
class BufferFile(static.File):
""" taken from twisted.web.static and modified
accordingly to the patch by John-Mark Gurney
http://resnet.uoregon.edu/~gurney_j/jmpc/dist/twisted.web.static.patch
"""
def __init__(self, path, target_size=0, *args):
static.File.__init__(self, path, *args)
self.target_size = target_size
self.upnp_retry = None
def render(self, request):
#print ""
#print "BufferFile", request
# FIXME detect when request is REALLY finished
if request is None or request.finished:
print "No request to render!"
return ''
"""You know what you doing."""
self.restat()
if self.type is None:
self.type, self.encoding = static.getTypeAndEncoding(self.basename(),
self.contentTypes,
self.contentEncodings,
self.defaultType)
if not self.exists():
return self.childNotFound.render(request)
if self.isdir():
return self.redirect(request)
#for content-length
if (self.target_size > 0):
fsize = size = int(self.target_size)
else:
fsize = size = int(self.getFileSize())
#print fsize
if size == int(self.getFileSize()):
request.setHeader('accept-ranges', 'bytes')
if self.type:
request.setHeader('content-type', self.type)
if self.encoding:
request.setHeader('content-encoding', self.encoding)
try:
f = self.openForReading()
except IOError, e:
import errno
if e[0] == errno.EACCES:
return error.ForbiddenResource().render(request)
else:
raise
if request.setLastModified(self.getmtime()) is http.CACHED:
return ''
trans = True
range = request.getHeader('range')
#print "StaticFile", range
tsize = size
if range is not None:
# This is a request for partial data...
bytesrange = range.split('=')
assert bytesrange[0] == 'bytes', \
"Syntactically invalid http range header!"
start, end = bytesrange[1].split('-', 1)
if start:
start = int(start)
# Are we requesting something beyond the current size of the file?
if (start >= self.getFileSize()):
# Retry later!
print bytesrange
print "Requesting data beyond current scope -> postpone rendering!"
self.upnp_retry = reactor.callLater(1.0, self.render, request)
return server.NOT_DONE_YET
f.seek(start)
if end:
#print ":%s" % end
end = int(end)
else:
end = size - 1
else:
lastbytes = int(end)
if size < lastbytes:
lastbytes = size
start = size - lastbytes
f.seek(start)
fsize = lastbytes
end = size - 1
size = end + 1
fsize = end - int(start) + 1
# start is the byte offset to begin, and end is the byte offset
# to end.. fsize is size to send, tsize is the real size of
# the file, and size is the byte position to stop sending.
if fsize <= 0:
request.setResponseCode(http.REQUESTED_RANGE_NOT_SATISFIABLE)
fsize = tsize
trans = False
else:
request.setResponseCode(http.PARTIAL_CONTENT)
request.setHeader('content-range', "bytes %s-%s/%s " % (
str(start), str(end), str(tsize)))
#print "StaticFile", start, end, tsize
request.setHeader('content-length', str(fsize))
if request.method == 'HEAD' or trans == False:
# pretend we're a HEAD request, so content-length
# won't be overwritten.
request.method = 'HEAD'
return ''
#print "StaticFile out", request.headers, request.code
# return data
# size is the byte position to stop sending, not how many bytes to send
BufferFileTransfer(f, size - f.tell(), request)
# and make sure the connection doesn't get closed
return server.NOT_DONE_YET
class BufferFileTransfer(object):
"""
A class to represent the transfer of a file over the network.
"""
request = None
def __init__(self, file, size, request):
self.file = file
self.size = size
self.request = request
self.written = self.file.tell()
request.registerProducer(self, 0)
def resumeProducing(self):
#print "resumeProducing", self.request,self.size,self.written
if not self.request:
return
data = self.file.read(min(abstract.FileDescriptor.bufferSize, self.size - self.written))
if data:
self.written += len(data)
# this .write will spin the reactor, calling .doWrite and then
# .resumeProducing again, so be prepared for a re-entrant call
self.request.write(data)
if self.request and self.file.tell() == self.size:
self.request.unregisterProducer()
self.request.finish()
self.request = None
def pauseProducing(self):
pass
def stopProducing(self):
#print "stopProducing",self.request
self.request.unregisterProducer()
self.file.close()
self.request.finish()
self.request = None
from datetime import datetime, tzinfo, timedelta
import random
class _tz(tzinfo):
def utcoffset(self, dt):
return self._offset
def tzname(self, dt):
return self._name
def dst(self, dt):
return timedelta(0)
class _CET(_tz):
_offset = timedelta(minutes=60)
_name = 'CET'
class _CEST(_tz):
_offset = timedelta(minutes=120)
_name = 'CEST'
_bdates = [datetime(1997,2,28,17,20,tzinfo=_CET()), # Sebastian Oliver
datetime(1999,9,19,4,12,tzinfo=_CEST()), # Patrick Niklas
datetime(2000,9,23,4,8,tzinfo=_CEST()), # Saskia Alexa
datetime(2003,7,23,1,18,tzinfo=_CEST()), # Mara Sophie
# you are the best!
]
def datefaker():
return random.choice(_bdates)
| {
"content_hash": "9a1bc5e0d26e55cdae3485ce342e4c3d",
"timestamp": "",
"source": "github",
"line_count": 676,
"max_line_length": 102,
"avg_line_length": 31.384615384615383,
"alnum_prop": 0.5764046003016591,
"repo_name": "ismaelgaudioso/Coherence",
"id": "c3db2a84f788ab7ecd84ece5180ebd26b3fa53b4",
"size": "21409",
"binary": false,
"copies": "1",
"ref": "refs/heads/maintain/0.6.x",
"path": "coherence/upnp/core/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Groff",
"bytes": "712"
},
{
"name": "Python",
"bytes": "1321655"
},
{
"name": "Shell",
"bytes": "1569"
}
],
"symlink_target": ""
} |
"""
Zabbix Server external inventory script.
========================================
Returns hosts and hostgroups from Zabbix Server.
Configuration is read from `zabbix.ini`.
Tested with Zabbix Server 2.0.6.
"""
from __future__ import print_function
import os, sys
import argparse
import ConfigParser
try:
from zabbix_api import ZabbixAPI
except:
print("Error: Zabbix API library must be installed: pip install zabbix-api.",
file=sys.stderr)
sys.exit(1)
try:
import json
except:
import simplejson as json
class ZabbixInventory(object):
def read_settings(self):
config = ConfigParser.SafeConfigParser()
conf_path = './zabbix.ini'
if not os.path.exists(conf_path):
conf_path = os.path.dirname(os.path.realpath(__file__)) + '/zabbix.ini'
if os.path.exists(conf_path):
config.read(conf_path)
# server
if config.has_option('zabbix', 'server'):
self.zabbix_server = config.get('zabbix', 'server')
# login
if config.has_option('zabbix', 'username'):
self.zabbix_username = config.get('zabbix', 'username')
if config.has_option('zabbix', 'password'):
self.zabbix_password = config.get('zabbix', 'password')
def read_cli(self):
parser = argparse.ArgumentParser()
parser.add_argument('--host')
parser.add_argument('--list', action='store_true')
self.options = parser.parse_args()
def hoststub(self):
return {
'hosts': []
}
def get_host(self, api, name):
data = {}
return data
def get_list(self, api):
hostsData = api.host.get({'output': 'extend', 'selectGroups': 'extend'})
data = {}
data[self.defaultgroup] = self.hoststub()
for host in hostsData:
hostname = host['name']
data[self.defaultgroup]['hosts'].append(hostname)
for group in host['groups']:
groupname = group['name']
if not groupname in data:
data[groupname] = self.hoststub()
data[groupname]['hosts'].append(hostname)
return data
def __init__(self):
self.defaultgroup = 'group_all'
self.zabbix_server = None
self.zabbix_username = None
self.zabbix_password = None
self.read_settings()
self.read_cli()
if self.zabbix_server and self.zabbix_username:
try:
api = ZabbixAPI(server=self.zabbix_server)
api.login(user=self.zabbix_username, password=self.zabbix_password)
except BaseException as e:
print("Error: Could not login to Zabbix server. Check your zabbix.ini.", file=sys.stderr)
sys.exit(1)
if self.options.host:
data = self.get_host(api, self.options.host)
print(json.dumps(data, indent=2))
elif self.options.list:
data = self.get_list(api)
print(json.dumps(data, indent=2))
else:
print("usage: --list ..OR.. --host <hostname>", file=sys.stderr)
sys.exit(1)
else:
print("Error: Configuration of server and credentials are required. See zabbix.ini.", file=sys.stderr)
sys.exit(1)
ZabbixInventory()
| {
"content_hash": "b44151a59fb3673b3144194e2168c71f",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 114,
"avg_line_length": 28.74576271186441,
"alnum_prop": 0.5701650943396226,
"repo_name": "abtreece/ansible",
"id": "28b643bca630270ca5197f36024529dce8871476",
"size": "4168",
"binary": false,
"copies": "85",
"ref": "refs/heads/stable-2.2",
"path": "contrib/inventory/zabbix.py",
"mode": "33261",
"license": "mit",
"language": [],
"symlink_target": ""
} |
from __future__ import unicode_literals
import ckeditor.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AnswerPart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('part_name', models.CharField(max_length=1)),
('part_content', ckeditor.fields.RichTextField()),
('part_respone_type', models.CharField(choices=[(b'Numberic', b'Numbers'), (b'Sketch', b'Sketch'), (b'EXPRESSION', b'Expression'), (b'Text', b'Text'), (b'Prove', b'Prove')], default=b'Text', max_length=10)),
('subpart_name_1', models.CharField(blank=True, max_length=10, null=True)),
('subpart_content_1', ckeditor.fields.RichTextField(blank=True, null=True)),
('respone_type_1', models.CharField(blank=True, choices=[(b'Numberic', b'Numbers'), (b'Sketch', b'Sketch'), (b'EXPRESSION', b'Expression'), (b'Text', b'Text'), (b'Prove', b'Prove')], default=b'Text', max_length=10, null=True)),
('subpart_name_2', models.CharField(blank=True, max_length=10, null=True)),
('subpart_content_2', ckeditor.fields.RichTextField(blank=True, null=True)),
('respone_type_2', models.CharField(blank=True, choices=[(b'Numberic', b'Numbers'), (b'Sketch', b'Sketch'), (b'EXPRESSION', b'Expression'), (b'Text', b'Text'), (b'Prove', b'Prove')], default=b'Text', max_length=10, null=True)),
('subpart_name_3', models.CharField(blank=True, max_length=10, null=True)),
('subpart_content_3', ckeditor.fields.RichTextField(blank=True, null=True)),
('respone_type_3', models.CharField(blank=True, choices=[(b'Numberic', b'Numbers'), (b'Sketch', b'Sketch'), (b'EXPRESSION', b'Expression'), (b'Text', b'Text'), (b'Prove', b'Prove')], default=b'Text', max_length=10, null=True)),
('subpart_name_4', models.CharField(blank=True, max_length=10, null=True)),
('subpart_content_4', ckeditor.fields.RichTextField(blank=True, null=True)),
('respone_type_4', models.CharField(blank=True, choices=[(b'Numberic', b'Numbers'), (b'Sketch', b'Sketch'), (b'EXPRESSION', b'Expression'), (b'Text', b'Text'), (b'Prove', b'Prove')], default=b'Text', max_length=10, null=True)),
],
),
migrations.CreateModel(
name='Concept',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='EducationLevel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
('description', models.TextField(max_length=1000)),
],
),
migrations.CreateModel(
name='Formula',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField()),
('status', models.BooleanField(default=False)),
('inorder_term', models.TextField(blank=True, max_length=1024, null=True)),
('sorted_term', models.TextField(blank=True, max_length=1024, null=True)),
('structure_term', models.TextField(blank=True, max_length=1024, null=True)),
('constant_term', models.TextField(blank=True, max_length=1024, null=True)),
('variable_term', models.TextField(blank=True, max_length=1024, null=True)),
],
),
migrations.CreateModel(
name='FormulaCategory',
fields=[
('name', models.CharField(max_length=200, primary_key=True, serialize=False)),
],
),
migrations.CreateModel(
name='FormulaIndex',
fields=[
('term_index', models.CharField(max_length=255, primary_key=True, serialize=False)),
('docsids', models.TextField(blank=True, null=True)),
('df', models.PositiveIntegerField(blank=True, default=1, verbose_name='frequency')),
],
),
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('qa', models.TextField(null=True)),
('imagepath', models.FileField(upload_to='/static/image/')),
],
),
migrations.CreateModel(
name='KeyPoint',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('type', models.CharField(max_length=200)),
('content', models.TextField()),
('concept', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='keypoints', to='apiv2.Concept')),
],
),
migrations.CreateModel(
name='Keyword',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('content', models.TextField()),
],
),
migrations.CreateModel(
name='Paper',
fields=[
('id', models.CharField(max_length=64, primary_key=True, serialize=False)),
('year', models.IntegerField()),
('month', models.CharField(max_length=64)),
('number', models.IntegerField()),
('no_of_question', models.IntegerField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Paperset',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(null=True)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.CharField(max_length=64, primary_key=True, serialize=False)),
('question_type', models.CharField(blank=True, choices=[(b'EX', b'Exam'), (b'PR', b'Practice')], default='EX', max_length=2)),
('used_for', models.CharField(blank=True, choices=[(b'NO', b'No'), (b'ON', b'Online'), (b'PA', b'Papers'), (b'BO', b'Both online and papers')], default='ON', max_length=2)),
('marks', models.IntegerField(default=1)),
('difficulty_level', models.CharField(choices=[(b'1', b'Very Easy'), (b'2', b'Easy'), (b'3', b'Easy'), (b'4', b'Average'), (b'5', b'Average'), (b'6', b'Average'), (b'7', b'Difficult'), (b'8', b'Difficult'), (b'9', b'Very Difficult'), (b'10', b'Very Difficult')], default='1', max_length=2)),
('response_type', models.CharField(choices=[(b'Numberic', b'Numbers'), (b'Sketch', b'Sketch'), (b'EXPRESSION', b'Expression'), (b'Text', b'Text'), (b'Prove', b'Prove')], default=b'Text', max_length=10)),
('source', models.TextField()),
('content', models.TextField()),
('content_cleaned_text', models.TextField(blank=True)),
('is_sample', models.BooleanField(default=False)),
('concept', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='questions', to='apiv2.Concept')),
('formula_categories', models.ManyToManyField(to='apiv2.FormulaCategory')),
('keypoints', models.ManyToManyField(to='apiv2.KeyPoint')),
('keywords', models.ManyToManyField(to='apiv2.Keyword')),
('paper', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='questions', to='apiv2.Paper')),
],
),
migrations.CreateModel(
name='QuestionText',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField()),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='question_text', to='apiv2.Question')),
],
),
migrations.CreateModel(
name='Solution',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField()),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='apiv2.Question')),
],
),
migrations.CreateModel(
name='Subconcept',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(null=True)),
('concept', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='subconcepts', to='apiv2.Concept')),
],
),
migrations.CreateModel(
name='Subject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
('description', models.TextField(max_length=1000)),
('education_level', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='apiv2.EducationLevel')),
],
),
migrations.CreateModel(
name='TestFormula',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField(max_length=1024)),
('status', models.BooleanField(default=False)),
('inorder_term', models.TextField(blank=True, max_length=1024, null=True)),
('sorted_term', models.TextField(blank=True, max_length=1024, null=True)),
('structure_term', models.TextField(blank=True, max_length=1024, null=True)),
('constant_term', models.TextField(blank=True, max_length=1024, null=True)),
('variable_term', models.TextField(blank=True, max_length=1024, null=True)),
('questions', models.TextField(blank=True, max_length=1024, null=True)),
],
),
migrations.CreateModel(
name='TestFormulaCategory',
fields=[
('name', models.CharField(max_length=200, primary_key=True, serialize=False)),
],
),
migrations.CreateModel(
name='TestFormulaIndex',
fields=[
('term_index', models.CharField(max_length=255, primary_key=True, serialize=False)),
('docsids', models.TextField(blank=True, null=True)),
('df', models.PositiveIntegerField(blank=True, default=1, verbose_name='frequency')),
],
),
migrations.CreateModel(
name='TestQuestion',
fields=[
('id', models.CharField(max_length=64, primary_key=True, serialize=False)),
('category', models.CharField(max_length=250, null=True)),
('question_type', models.CharField(blank=True, choices=[(b'EX', b'Exam'), (b'PR', b'Practice')], default='EX', max_length=2)),
('used_for', models.CharField(blank=True, choices=[(b'NO', b'No'), (b'ON', b'Online'), (b'PA', b'Papers'), (b'BO', b'Both online and papers')], default='ON', max_length=2)),
('marks', models.IntegerField(default=1)),
('difficulty_level', models.CharField(choices=[(b'1', b'Very Easy'), (b'2', b'Easy'), (b'3', b'Easy'), (b'4', b'Average'), (b'5', b'Average'), (b'6', b'Average'), (b'7', b'Difficult'), (b'8', b'Difficult'), (b'9', b'Very Difficult'), (b'10', b'Very Difficult')], default='1', max_length=2)),
('response_type', models.CharField(choices=[(b'Numberic', b'Numbers'), (b'Sketch', b'Sketch'), (b'EXPRESSION', b'Expression'), (b'Text', b'Text'), (b'Prove', b'Prove')], default=b'Text', max_length=10)),
('source', models.TextField()),
('content', models.TextField()),
('is_sample', models.BooleanField(default=False)),
('concept', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='test_questions', to='apiv2.Concept')),
('keypoints', models.ManyToManyField(to='apiv2.KeyPoint')),
('keywords', models.ManyToManyField(to='apiv2.Keyword')),
('paper', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='test_questions', to='apiv2.Paper')),
('subconcept', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='test_questions', to='apiv2.Subconcept')),
],
),
migrations.CreateModel(
name='Topic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('subject', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='apiv2.Subject')),
],
),
migrations.AddField(
model_name='testformula',
name='categories',
field=models.ManyToManyField(to='apiv2.TestFormulaCategory'),
),
migrations.AddField(
model_name='question',
name='subconcept',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='questions', to='apiv2.Subconcept'),
),
migrations.AddField(
model_name='paperset',
name='subject',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='papersets', to='apiv2.Subject'),
),
migrations.AddField(
model_name='paper',
name='paperset',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='apiv2.Paperset'),
),
migrations.AddField(
model_name='paper',
name='subject',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='apiv2.Subject'),
),
migrations.AddField(
model_name='image',
name='qa_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='apiv2.Question'),
),
migrations.AddField(
model_name='formula',
name='categories',
field=models.ManyToManyField(to='apiv2.FormulaCategory'),
),
migrations.AddField(
model_name='formula',
name='concept',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='apiv2.Concept'),
),
migrations.AddField(
model_name='formula',
name='questions',
field=models.ManyToManyField(to='apiv2.Question'),
),
migrations.AddField(
model_name='concept',
name='topic',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='concepts', to='apiv2.Topic'),
),
migrations.AddField(
model_name='answerpart',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='apiv2.Question'),
),
]
| {
"content_hash": "9adf3c9a9bc445b8c6788c43837f6ec0",
"timestamp": "",
"source": "github",
"line_count": 286,
"max_line_length": 307,
"avg_line_length": 57.11888111888112,
"alnum_prop": 0.5661728697355534,
"repo_name": "deka108/meas_deka",
"id": "985e070c0509253a4d466b288e4510e3608a41f0",
"size": "16409",
"binary": false,
"copies": "2",
"ref": "refs/heads/release-deka",
"path": "apiv2/migrations/0001_initial.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "120893"
},
{
"name": "HTML",
"bytes": "500260"
},
{
"name": "JavaScript",
"bytes": "1112443"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "Python",
"bytes": "350763"
}
],
"symlink_target": ""
} |
from django.db import models
from django.utils.translation import ugettext_lazy as _
# Create your models here.
class Trip(models.Model):
route_id = models.ForeignKey('routes.Route', verbose_name=_(u'Route ID'))
service_id = models.ForeignKey('schedule.Calendar', verbose_name=_(u'Service ID'))
trip_id = models.CharField(_(u'Trip ID'), max_length=25, unique=True)
trip_headsign = models.CharField(_(u'Headsign'), max_length=50, blank=True, null=True)
trip_short_name = models.CharField(_(u'Short Name'), max_length=25, blank=True, null=True)
DIRECTIONS = (
(0, _(u'Outbound Travel/One Direction')),
(1, _(u'Inbound Travel/Opposite Direction')),
)
direction_id = models.IntegerField(_(u'Direction'), choices=DIRECTIONS, blank=True, null=True)
block_id = models.CharField(_(u'Block ID'), max_length=15, blank=True, null=True)
shape_id = models.ForeignKey('shapes.Shape', verbose_name=_(u'Shape ID'), max_length=5, blank=True, null=True)
WHEELCHAIR_ACCESSIBLES = (
(0, _(u'No Accessibility Information')),
(1, _(u'Can Be Accommodated')),
(2, _(u'Cannot Be Accommodated')),
)
wheelchair_accessible = models.IntegerField(_(u'Wheelchair Accessible'), choices=WHEELCHAIR_ACCESSIBLES, blank=True, null=False)
BIKES = (
(0, _(u'No Bike Information')),
(1, _(u'Can Be Accommodated')),
(2, _(u'Cannot Be Accomodated')),
)
bikes_allowed = models.IntegerField(_(u'Bike Allowed'), choices=BIKES, blank=True, null=True)
class Meta:
verbose_name = _(u'Trip')
verbose_name_plural = _(u'Trips')
def __unicode__(self):
return self.trip_id
class StopTime(models.Model):
trip_id = models.ForeignKey(Trip, verbose_name=_(u'Trip ID'))
arrival_time = models.TimeField(_(u'Arrival Time'))
departure_time = models.TimeField(_(u'Departure Time'))
stop_id = models.ForeignKey('stops.Stop', verbose_name=_(u'Stop ID'))
stop_sequence = models.PositiveIntegerField(_(u'Stop Sequence'))
stop_headsign = models.CharField(_(u'Headsign'), max_length=100, blank=True, null= True)
PICK_UPS = (
(0, _(u'Regularly')),
(1, _(u'No pickup')),
(2, _(u'Must phone agency')),
(3, _(u'Must coordinate with driver')),
)
pickup_type = models.IntegerField(_(u'Pick-Up Type'), choices=PICK_UPS, default=0, blank=True, null=True)
DROP_OFFS = (
(0, _(u'Regularly')),
(1, _(u'No drop off')),
(2, _(u'Must phone agency')),
(3, _(u'Must coordinate with driver')),
)
drop_off_type = models.IntegerField(_(u'Drop-Off Type'), choices=DROP_OFFS, default=0, blank=True, null=True)
shape_dist_traveled = models.DecimalField(_(u'Distance Travelled'), max_digits=8, decimal_places=4, blank=True, null=True)
TIMEPOINTS = (
(0, _(u'Approximate')),
(1, _(u'Exact')),
)
timepoint = models.IntegerField(_(u'Timepoint'), choices=TIMEPOINTS, default=1, blank=True, null=True)
class Meta:
verbose_name = _(u'Stop Time')
verbose_name_plural = _(u'Stop Times')
def __unicode__(self):
return unicode(self.trip_id) or u'' | {
"content_hash": "d5e05a5752ffe26bc2bb809ab0b7dcfa",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 132,
"avg_line_length": 44.30555555555556,
"alnum_prop": 0.6294670846394984,
"repo_name": "renanalencar/hermes",
"id": "1bb276a10b2fc8e8bcc36b737dc40ca1b4e5cc6e",
"size": "3214",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trips/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "45248"
},
{
"name": "HTML",
"bytes": "48827"
},
{
"name": "JavaScript",
"bytes": "87491"
},
{
"name": "Python",
"bytes": "158986"
}
],
"symlink_target": ""
} |
"""Test the Google Maps Travel Time config flow."""
from homeassistant import config_entries, data_entry_flow
from homeassistant.components.google_travel_time.const import (
ARRIVAL_TIME,
CONF_ARRIVAL_TIME,
CONF_AVOID,
CONF_DEPARTURE_TIME,
CONF_DESTINATION,
CONF_LANGUAGE,
CONF_OPTIONS,
CONF_ORIGIN,
CONF_TIME,
CONF_TIME_TYPE,
CONF_TRAFFIC_MODEL,
CONF_TRANSIT_MODE,
CONF_TRANSIT_ROUTING_PREFERENCE,
CONF_UNITS,
DEFAULT_NAME,
DEPARTURE_TIME,
DOMAIN,
)
from homeassistant.const import (
CONF_API_KEY,
CONF_MODE,
CONF_NAME,
CONF_UNIT_SYSTEM_IMPERIAL,
)
from tests.common import MockConfigEntry
async def test_minimum_fields(hass, validate_config_entry, bypass_setup):
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_API_KEY: "api_key",
CONF_ORIGIN: "location1",
CONF_DESTINATION: "location2",
},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result2["title"] == f"{DEFAULT_NAME}: location1 -> location2"
assert result2["data"] == {
CONF_API_KEY: "api_key",
CONF_ORIGIN: "location1",
CONF_DESTINATION: "location2",
}
async def test_invalid_config_entry(hass, invalidate_config_entry):
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_API_KEY: "api_key",
CONF_ORIGIN: "location1",
CONF_DESTINATION: "location2",
},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] == {"base": "cannot_connect"}
async def test_options_flow(hass, validate_config_entry, bypass_update):
"""Test options flow."""
entry = MockConfigEntry(
domain=DOMAIN,
data={
CONF_API_KEY: "api_key",
CONF_ORIGIN: "location1",
CONF_DESTINATION: "location2",
},
options={
CONF_MODE: "driving",
CONF_ARRIVAL_TIME: "test",
CONF_UNITS: CONF_UNIT_SYSTEM_IMPERIAL,
},
)
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
result = await hass.config_entries.options.async_init(entry.entry_id, data=None)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
CONF_MODE: "driving",
CONF_LANGUAGE: "en",
CONF_AVOID: "tolls",
CONF_UNITS: CONF_UNIT_SYSTEM_IMPERIAL,
CONF_TIME_TYPE: ARRIVAL_TIME,
CONF_TIME: "test",
CONF_TRAFFIC_MODEL: "best_guess",
CONF_TRANSIT_MODE: "train",
CONF_TRANSIT_ROUTING_PREFERENCE: "less_walking",
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == ""
assert result["data"] == {
CONF_MODE: "driving",
CONF_LANGUAGE: "en",
CONF_AVOID: "tolls",
CONF_UNITS: CONF_UNIT_SYSTEM_IMPERIAL,
CONF_ARRIVAL_TIME: "test",
CONF_TRAFFIC_MODEL: "best_guess",
CONF_TRANSIT_MODE: "train",
CONF_TRANSIT_ROUTING_PREFERENCE: "less_walking",
}
assert entry.options == {
CONF_MODE: "driving",
CONF_LANGUAGE: "en",
CONF_AVOID: "tolls",
CONF_UNITS: CONF_UNIT_SYSTEM_IMPERIAL,
CONF_ARRIVAL_TIME: "test",
CONF_TRAFFIC_MODEL: "best_guess",
CONF_TRANSIT_MODE: "train",
CONF_TRANSIT_ROUTING_PREFERENCE: "less_walking",
}
async def test_options_flow_departure_time(hass, validate_config_entry, bypass_update):
"""Test options flow wiith departure time."""
entry = MockConfigEntry(
domain=DOMAIN,
data={
CONF_API_KEY: "api_key",
CONF_ORIGIN: "location1",
CONF_DESTINATION: "location2",
},
)
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
result = await hass.config_entries.options.async_init(entry.entry_id, data=None)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
CONF_MODE: "driving",
CONF_LANGUAGE: "en",
CONF_AVOID: "tolls",
CONF_UNITS: CONF_UNIT_SYSTEM_IMPERIAL,
CONF_TIME_TYPE: DEPARTURE_TIME,
CONF_TIME: "test",
CONF_TRAFFIC_MODEL: "best_guess",
CONF_TRANSIT_MODE: "train",
CONF_TRANSIT_ROUTING_PREFERENCE: "less_walking",
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == ""
assert result["data"] == {
CONF_MODE: "driving",
CONF_LANGUAGE: "en",
CONF_AVOID: "tolls",
CONF_UNITS: CONF_UNIT_SYSTEM_IMPERIAL,
CONF_DEPARTURE_TIME: "test",
CONF_TRAFFIC_MODEL: "best_guess",
CONF_TRANSIT_MODE: "train",
CONF_TRANSIT_ROUTING_PREFERENCE: "less_walking",
}
assert entry.options == {
CONF_MODE: "driving",
CONF_LANGUAGE: "en",
CONF_AVOID: "tolls",
CONF_UNITS: CONF_UNIT_SYSTEM_IMPERIAL,
CONF_DEPARTURE_TIME: "test",
CONF_TRAFFIC_MODEL: "best_guess",
CONF_TRANSIT_MODE: "train",
CONF_TRANSIT_ROUTING_PREFERENCE: "less_walking",
}
async def test_dupe_id(hass, validate_config_entry, bypass_setup):
"""Test setting up the same entry twice fails."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_API_KEY: "test",
CONF_ORIGIN: "location1",
CONF_DESTINATION: "location2",
},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_API_KEY: "test",
CONF_ORIGIN: "location1",
CONF_DESTINATION: "location2",
},
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result2["reason"] == "already_configured"
async def test_import_flow(hass, validate_config_entry, bypass_update):
"""Test import_flow."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={
CONF_API_KEY: "api_key",
CONF_ORIGIN: "location1",
CONF_DESTINATION: "location2",
CONF_NAME: "test_name",
CONF_OPTIONS: {
CONF_MODE: "driving",
CONF_LANGUAGE: "en",
CONF_AVOID: "tolls",
CONF_UNITS: CONF_UNIT_SYSTEM_IMPERIAL,
CONF_ARRIVAL_TIME: "test",
CONF_TRAFFIC_MODEL: "best_guess",
CONF_TRANSIT_MODE: "train",
CONF_TRANSIT_ROUTING_PREFERENCE: "less_walking",
},
},
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "test_name"
assert result["data"] == {
CONF_API_KEY: "api_key",
CONF_ORIGIN: "location1",
CONF_DESTINATION: "location2",
CONF_NAME: "test_name",
CONF_OPTIONS: {
CONF_MODE: "driving",
CONF_LANGUAGE: "en",
CONF_AVOID: "tolls",
CONF_UNITS: CONF_UNIT_SYSTEM_IMPERIAL,
CONF_ARRIVAL_TIME: "test",
CONF_TRAFFIC_MODEL: "best_guess",
CONF_TRANSIT_MODE: "train",
CONF_TRANSIT_ROUTING_PREFERENCE: "less_walking",
},
}
entry = hass.config_entries.async_entries(DOMAIN)[0]
assert entry.data == {
CONF_API_KEY: "api_key",
CONF_ORIGIN: "location1",
CONF_DESTINATION: "location2",
}
assert entry.options == {
CONF_MODE: "driving",
CONF_LANGUAGE: "en",
CONF_AVOID: "tolls",
CONF_UNITS: CONF_UNIT_SYSTEM_IMPERIAL,
CONF_ARRIVAL_TIME: "test",
CONF_TRAFFIC_MODEL: "best_guess",
CONF_TRANSIT_MODE: "train",
CONF_TRANSIT_ROUTING_PREFERENCE: "less_walking",
}
| {
"content_hash": "c62115e019f0f243b68869da081c6900",
"timestamp": "",
"source": "github",
"line_count": 297,
"max_line_length": 87,
"avg_line_length": 32.138047138047135,
"alnum_prop": 0.5858564693556836,
"repo_name": "adrienbrault/home-assistant",
"id": "64dc77903ff514c66c6ce5f686ba61abdddb9af5",
"size": "9545",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/components/google_travel_time/test_config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "32021043"
},
{
"name": "Shell",
"bytes": "4900"
}
],
"symlink_target": ""
} |
"""
This module implements a model checkpoint loader and writer.
"""
import os
import tensorflow as tf
from niftynet.engine.application_variables import global_vars_init_or_restore
from niftynet.engine.signal import \
ITER_FINISHED, SESS_FINISHED, SESS_STARTED
from niftynet.io.misc_io import touch_folder
FILE_PREFIX = 'model.ckpt'
def make_model_name(model_dir):
"""
Make the model checkpoint folder.
the checkpoint file will be located at `model_dir/models/` folder,
the filename will start with FILE_PREFIX.
:param model_dir: niftynet model folder
:return: a partial name of a checkpoint file `model_dir/model/FILE_PREFIX`
"""
_model_dir = touch_folder(os.path.join(model_dir, 'models'))
return os.path.join(_model_dir, FILE_PREFIX)
class ModelRestorer(object):
"""
This class handles restoring the model at the beginning of a session.
"""
def __init__(self,
model_dir,
initial_iter=0,
is_training_action=True,
vars_to_restore=None,
**_unused):
self.initial_iter = initial_iter
self.vars_to_restore = vars_to_restore
self.file_name_prefix = make_model_name(model_dir)
# randomly initialise or restoring model
if is_training_action and initial_iter == 0:
SESS_STARTED.connect(self.rand_init_model)
else:
SESS_STARTED.connect(self.restore_model)
def rand_init_model(self, _sender, **_unused):
"""
Randomly initialising all trainable variables defined in
the default session.
:param _sender:
:param _unused:
:return:
"""
with tf.name_scope('Initialisation'):
init_op = global_vars_init_or_restore()
tf.get_default_session().run(init_op)
tf.logging.info('Parameters from random initialisations ...')
def restore_model(self, _sender, **_unused):
"""
Loading checkpoint files as variable initialisations.
:param _sender:
:param _unused:
:return:
"""
checkpoint = '{}-{}'.format(self.file_name_prefix, self.initial_iter)
to_restore = None # tf.train.Saver's default value, restoring all
if self.vars_to_restore:
# partially restore (updating `to_restore` list)
tf.logging.info("Finding variables to restore...")
import re
# Determine which vars to
# restore using regex matching
var_regex = re.compile(self.vars_to_restore)
to_restore, to_randomise = [], []
for restorable in tf.global_variables():
if var_regex.search(restorable.name):
to_restore.append(restorable)
else:
to_randomise.append(restorable)
if not to_restore:
tf.logging.fatal(
'vars_to_restore specified: %s, but nothing matched.',
self.vars_to_restore)
assert to_restore, 'Nothing to restore (--vars_to_restore)'
var_names = [ # getting first three item to print
var_restore.name for var_restore in to_restore[:3]]
tf.logging.info(
'Restoring %s out of %s variables from %s: \n%s, ...',
len(to_restore),
len(tf.global_variables()),
checkpoint, ',\n'.join(var_names))
# Initialize vars to randomize
init_op = tf.variables_initializer(to_randomise)
tf.get_default_session().run(init_op)
try:
saver = tf.train.Saver(
var_list=to_restore, save_relative_paths=True)
saver.restore(tf.get_default_session(), checkpoint)
except tf.errors.NotFoundError:
tf.logging.fatal(
'checkpoint %s not found or variables to restore do not '
'match the current application graph', checkpoint)
dir_name = os.path.dirname(checkpoint)
if dir_name and not os.path.exists(dir_name):
tf.logging.fatal(
"Model folder not found %s, please check"
"config parameter: model_dir", dir_name)
raise
class ModelSaver(object):
"""
This class handles iteration events to save the model as checkpoint files.
"""
def __init__(self,
model_dir,
save_every_n=0,
max_checkpoints=1,
is_training_action=True,
**_unused):
self.save_every_n = save_every_n
self.max_checkpoints = max_checkpoints
self.file_name_prefix = make_model_name(model_dir)
self.saver = None
# initialise the saver after the graph finalised
SESS_STARTED.connect(self.init_saver)
# save the training model at a positive frequency
if self.save_every_n > 0:
ITER_FINISHED.connect(self.save_model_interval)
# always save the final training model before exiting
if is_training_action:
SESS_FINISHED.connect(self.save_model)
def init_saver(self, _sender, **_unused):
"""
Initialise a model saver.
:param _sender:
:param _unused:
:return:
"""
self.saver = tf.train.Saver(
max_to_keep=self.max_checkpoints, save_relative_paths=True)
def save_model(self, _sender, **msg):
"""
Saving the model at the current iteration.
:param _sender:
:param msg: an iteration message instance
:return:
"""
iter_i = msg['iter_msg'].current_iter
if iter_i >= 0:
self._save_at(iter_i)
def save_model_interval(self, _sender, **msg):
"""
Saving the model according to the frequency of ``save_every_n``.
:param _sender:
:param msg: an iteration message instance
:return:
"""
if not msg['iter_msg'].is_training:
return
iter_i = msg['iter_msg'].current_iter
if iter_i > 0 and iter_i % self.save_every_n == 0:
self._save_at(iter_i)
def _save_at(self, iter_i):
"""
Saving the model at iter i and print a console log.
: param iter_i: integer of the current iteration
: return:
"""
if not self.saver:
return
self.saver.save(sess=tf.get_default_session(),
save_path=self.file_name_prefix,
global_step=iter_i)
tf.logging.info('iter %d saved: %s', iter_i, self.file_name_prefix)
| {
"content_hash": "7feab854c245cf1e7618ba4b1c1d034d",
"timestamp": "",
"source": "github",
"line_count": 197,
"max_line_length": 78,
"avg_line_length": 34.076142131979694,
"alnum_prop": 0.5694920303887978,
"repo_name": "NifTK/NiftyNet",
"id": "5f7af91cc6f2864934ff92fbd0a4086731dd61c9",
"size": "6737",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "niftynet/engine/handler_model.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "381956"
},
{
"name": "C++",
"bytes": "182582"
},
{
"name": "CMake",
"bytes": "3500"
},
{
"name": "Cuda",
"bytes": "69664"
},
{
"name": "Python",
"bytes": "2340002"
},
{
"name": "Shell",
"bytes": "1792"
}
],
"symlink_target": ""
} |
from ..broker import Broker
class UploadedCertificateBroker(Broker):
controller = "uploaded_certificates"
def index(self, **kwargs):
"""Lists the available uploaded certificates. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.2
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for the certificate
:type id: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for the certificate
:type id: Array of Integer
| ``api version min:`` 2.2
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param name: User-defined unique name for the certificate
:type name: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param name: User-defined unique name for the certificate
:type name: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, name, path, created_at, updated_at.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each UploadedCertificate. Valid values are id, name, path, created_at, updated_at. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return uploaded_certificates: An array of the UploadedCertificate objects that match the specified input criteria.
:rtype uploaded_certificates: Array of UploadedCertificate
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def create(self, **kwargs):
"""This method uploads a certificate in the repository
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param name: Unique user-defined name for the certificate
:type name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param certificate_file: PEM-formatted certificate file content
:type certificate_file: String
**Outputs**
"""
return self.api_request(self._get_method_fullname("create"), kwargs)
def export(self, **kwargs):
"""This method exports a certificate from the repository
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The internal NetMRI identifier of the certificate from the repository
:type id: Integer
**Outputs**
"""
return self.api_request(self._get_method_fullname("export"), kwargs)
def destroy(self, **kwargs):
"""This method deletes a certificate from the repository
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The internal NetMRI identifier of the certificate from the repository
:type id: Integer
**Outputs**
"""
return self.api_request(self._get_method_fullname("destroy"), kwargs)
def index_with_detail(self, **kwargs):
"""Lists the available uploaded certificates. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.2
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for the certificate
:type id: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for the certificate
:type id: Array of Integer
| ``api version min:`` 2.2
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param name: User-defined unique name for the certificate
:type name: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param name: User-defined unique name for the certificate
:type name: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, name, path, created_at, updated_at.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each UploadedCertificate. Valid values are id, name, path, created_at, updated_at. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return uploaded_certificates: An array of the UploadedCertificate objects that match the specified input criteria.
:rtype uploaded_certificates: Array of UploadedCertificate
"""
return self.api_list_request(self._get_method_fullname("index_with_detail"), kwargs)
| {
"content_hash": "7a7466784025cd39bd230c23435fc307",
"timestamp": "",
"source": "github",
"line_count": 284,
"max_line_length": 350,
"avg_line_length": 37.443661971830984,
"alnum_prop": 0.53836750047019,
"repo_name": "infobloxopen/infoblox-netmri",
"id": "e3a5360831ecba8151b93505353bfcc837d2a7a7",
"size": "10634",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "infoblox_netmri/api/broker/v2_2_0/uploaded_certificate_broker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2110"
},
{
"name": "Python",
"bytes": "20560"
}
],
"symlink_target": ""
} |
"""Iteration over tf.data.Datasets when eager execution is enabled."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import prefetching_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
class Iterator(iterator_ops.EagerIterator):
"""An iterator producing tf.Tensor objects from a tf.data.Dataset.
NOTE: Unlike the iterator created by the
`tf.data.Dataset.make_one_shot_iterator` method, this class enables
additional experimental functionality, such as prefetching to the GPU.
"""
def __init__(self, dataset):
"""Creates a new iterator over the given dataset.
For example:
```python
dataset = tf.data.Dataset.range(4)
for x in Iterator(dataset):
print(x)
```
Tensors produced will be placed on the device on which this iterator object
was created.
Args:
dataset: A `tf.data.Dataset` object.
Raises:
TypeError: If `dataset` is an unsupported type.
RuntimeError: When invoked without eager execution enabled.
"""
if isinstance(dataset, prefetching_ops._PrefetchToDeviceDataset): # pylint: disable=protected-access
raise TypeError(
"`tf.data.experimental.prefetch_to_device()` is not compatible with "
"`tf.contrib.eager.Iterator`. Use `for ... in dataset:` to iterate "
"over the dataset instead.")
if not context.context().device_spec.device_type:
is_remote_device = False
else:
is_remote_device = context.context().device_spec.device_type != "CPU"
if is_remote_device:
with ops.device(None):
# Let the placer figure out where to place the various functions etc.
# created by the CopyToDeviceDataset.
dataset = dataset.apply(prefetching_ops.copy_to_device(
context.context().device_name))
dataset = dataset.prefetch(1)
super(Iterator, self).__init__(dataset)
def _next_internal(self):
"""Returns a nested structure of `tf.Tensor`s containing the next element.
"""
# This runs in sync mode as iterators use an error status to communicate
# that there is no more data to iterate over.
# TODO(b/77291417): Fix
with context.execution_mode(context.SYNC):
return super(Iterator, self)._next_internal()
| {
"content_hash": "7d9e6532d4dd7f0b2b0e18ba03d8d3e4",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 105,
"avg_line_length": 36.417910447761194,
"alnum_prop": 0.6967213114754098,
"repo_name": "girving/tensorflow",
"id": "3aed121233be1268531495a2fa83fd323412e1fd",
"size": "3129",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/eager/python/datasets.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3325"
},
{
"name": "Batchfile",
"bytes": "10132"
},
{
"name": "C",
"bytes": "343258"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "50036869"
},
{
"name": "CMake",
"bytes": "196127"
},
{
"name": "Dockerfile",
"bytes": "36386"
},
{
"name": "Go",
"bytes": "1254086"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "867313"
},
{
"name": "Jupyter Notebook",
"bytes": "2604735"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "58787"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "42041620"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "477299"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
} |
import cv2
from plantcv.plantcv import analyze_thermal_values, outputs
def test_analyze_thermal_values(test_data):
"""Test for PlantCV."""
# Clear previous outputs
outputs.clear()
# Read in test data
mask = cv2.imread(test_data.thermal_mask, -1)
img = test_data.load_npz(test_data.thermal_obj_file)
_ = analyze_thermal_values(thermal_array=img, mask=mask, histplot=True)
assert outputs.observations['default']['median_temp']['value'] == 33.20922
| {
"content_hash": "e8f0a757c42ff97605b26c37720e80b4",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 78,
"avg_line_length": 36.92307692307692,
"alnum_prop": 0.7020833333333333,
"repo_name": "danforthcenter/plantcv",
"id": "30b5efd1f908564169985d04bf730ffb5e5951b0",
"size": "480",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/plantcv/test_analyze_thermal_values.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1112"
},
{
"name": "Python",
"bytes": "898011"
},
{
"name": "R",
"bytes": "1327"
},
{
"name": "Shell",
"bytes": "3348"
}
],
"symlink_target": ""
} |
from page_sets.system_health import system_health_story
from telemetry import decorators
@decorators.Disabled('win') # crbug.com/656040
class BlankAboutBlankStory(system_health_story.SystemHealthStory):
"""Story that loads the about:blank page."""
NAME = 'blank:about:blank'
URL = 'about:blank'
def _DidLoadDocument(self, action_runner):
# Request a RAF and wait for it to be processed to ensure that the metric
# Startup.FirstWebContents.NonEmptyPaint2 is recorded.
action_runner.ExecuteJavaScript(
"""
window.__hasRunRAF = false;
requestAnimationFrame(function() {
window.__hasRunRAF = true;
});
"""
)
action_runner.WaitForJavaScriptCondition("window.__hasRunRAF")
| {
"content_hash": "f4701f76e436de6a0de8752ab48b6c4a",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 77,
"avg_line_length": 32.56521739130435,
"alnum_prop": 0.6955941255006676,
"repo_name": "Samsung/ChromiumGStreamerBackend",
"id": "dbfd385d95b4fe79f3f980b86d4a86994cc96e0e",
"size": "912",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/perf/page_sets/system_health/blank_stories.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""
Inicialització.
"""
from .test import TestService # noqa
| {
"content_hash": "430617c8a38ea43b4d5d4676f2918cf3",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 37,
"avg_line_length": 12.6,
"alnum_prop": 0.6825396825396826,
"repo_name": "sisap-ics/sidiap",
"id": "86d3a76010ca50a9fdf2c975a102c3bf4236c899",
"size": "88",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "106483"
},
{
"name": "Shell",
"bytes": "2113"
}
],
"symlink_target": ""
} |
'''
'''
import os
ROOT_DIR = os.getcwd()
HOMEPATH = '../../..'
ResultFile = '../../result.txt'
IOTIVITY_DIR = HOMEPATH+'/IotivityOrgSource/target/iotivity'
TC_BUILD_DIR = HOMEPATH+'/IotivityOrgSource/target/iotivity/test'
IOTIVITY_BUILD_COMMAND_LINUX_RC ='scons TARGET_OS=linux'
ORGSOURCEPATH = HOMEPATH + '/IotivityOrgSource'
TARGET = 'target'
x86 = 'x86'
ANDROID_FILTER_LOG_SSM = 'SSMSampleApp'
ANDROID_FILTER_LOG_PPM = 'PPMSampleApp'
ANDROID_FILTER_LOG_NM_HOSTING = 'NMResourceHosting'
ANDROID_FILTER_LOG_NM_CONSUMER = 'NMConsumer'
ANDROID_FILTER_LOG_NM_PROVIDER = 'NMProvider'
ANDROID_FILTER_LOG_TM_CONSERVER = 'CON-SERVER'
ANDROID_FILTER_LOG_TM_SAMPLE = 'TMSample'
ANDROID_FILTER_LOG_CA = 'Sample_Service'
TIZEN_FILTER_LOG_SSM = 'ssmtesterapp'
TIZEN_FILTER_LOG_PPM = 'ppmtesterapp'
TIZEN_FILTER_LOG_NM = 'nmsampleapp'
TIZEN_FILTER_LOG_TM_CONSERVER = 'conserverapp'
TIZEN_FILTER_LOG_TM_SAMPLE = 'tmsampleapp'
GITHUB_USER_KEY = 'github_userid'
GITHUB_PASSWORD_KEY = 'github_password'
CI_SERVER_WIFI_IP_KEY = 'ci_server_wifi_ip'
CI_SERVER_WIFI_IPv6_KEY = 'ci_server_wifi_ipv6'
CI_SERVER_ETHERNET_IP_KEY = 'ci_server_ethernet_ip'
DLM = ';'
INFINITE='infinite'
EQ='='
NE='!='
LT='<'
LE='<='
GT='>'
GE='>='
android='android'
linux='linux'
tizen='tizen'
API='api'
BAT='bat'
SAMPLE_APP='sample_app'
INTEROP_APP='interop_app'
BUILD_IOTIVITY='build_iotivity'
BUILD_TEST='build_test'
TEST_PRE_CONDITION='test_pre_condition'
BUILD_IOTIVITY_FAIL = 2
BUILD_TEST_FAIL = 4
TEST_PRE_CONDITION_PASS = 5
TEST_PRE_CONDITION_FAIL = 6
| {
"content_hash": "bda856b91762cde287bf7f9a33052737",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 65,
"avg_line_length": 23.553846153846155,
"alnum_prop": 0.7250163291966035,
"repo_name": "rzr/iotivity",
"id": "b0e4415e147117ab5e0ca5549be53261db38d7f9",
"size": "2300",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/src/automation/robot/helper/variable.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "12408"
},
{
"name": "C",
"bytes": "8157549"
},
{
"name": "C++",
"bytes": "13386094"
},
{
"name": "CSS",
"bytes": "1069"
},
{
"name": "Dockerfile",
"bytes": "8372"
},
{
"name": "HTML",
"bytes": "1714"
},
{
"name": "Java",
"bytes": "6702288"
},
{
"name": "JavaScript",
"bytes": "167465"
},
{
"name": "Less",
"bytes": "39"
},
{
"name": "M4",
"bytes": "2816"
},
{
"name": "Makefile",
"bytes": "52136"
},
{
"name": "Python",
"bytes": "1469227"
},
{
"name": "RAML",
"bytes": "1535"
},
{
"name": "Roff",
"bytes": "2808"
},
{
"name": "Shell",
"bytes": "200101"
}
],
"symlink_target": ""
} |
"""tornado.options enhancement"""
import io
import traceback
import os
from tornado.options import parse_command_line, options, define
def parse_config_file(filename):
"""Rewrite tornado default parse_config_file.
Parses and loads the Python config file at the given filename.
This version allow customize new options which are not defined before
from a configuration file.
"""
config = {}
exec(compile(io.open(filename, encoding="UTF-8").read().encode("UTF-8"), filename, "exec"), {}, config)
for name in config:
if name in options:
options[name] = config[name]
else:
options.define(name, config[name])
def parse_options(root_dir, settings_file="settings", parse_cmd=True):
"""Parse options file and command line"""
try:
parse_config_file(os.path.join(root_dir, "%s.py" % settings_file))
# print "Using settings.py as default settings."
except Exception as exc:
print("No any default settings, are you sure? Exception: %s" % exc)
try:
parse_config_file(
os.path.join(root_dir, "%s_local.py" % settings_file))
# print "Override some settings with local settings."
except Exception as exc:
print("No local settings. Exception: %s" % exc)
# print traceback.format_exc()
if parse_cmd:
parse_command_line()
| {
"content_hash": "a90bb566cc7e154eab657cb26a69b417",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 107,
"avg_line_length": 31.477272727272727,
"alnum_prop": 0.6483754512635379,
"repo_name": "palmhold/djinn",
"id": "36da52f815f6e91b2c5d5830c4fb745d4f3144c6",
"size": "1991",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "djinn/options.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "51780"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from unittest import skipIf
from django.db import connection, models
from django.test import TestCase
from .models import Post
class TextFieldTests(TestCase):
def test_max_length_passed_to_formfield(self):
"""
TextField passes its max_length attribute to form fields created using
their formfield() method.
"""
tf1 = models.TextField()
tf2 = models.TextField(max_length=2345)
self.assertIsNone(tf1.formfield().max_length)
self.assertEqual(2345, tf2.formfield().max_length)
def test_to_python(self):
"""TextField.to_python() should return a string."""
f = models.TextField()
self.assertEqual(f.to_python(1), '1')
def test_lookup_integer_in_textfield(self):
self.assertEqual(Post.objects.filter(body=24).count(), 0)
@skipIf(connection.vendor == 'mysql', 'Running on MySQL requires utf8mb4 encoding (#18392)')
def test_emoji(self):
p = Post.objects.create(title='Whatever', body='Smile 😀.')
p.refresh_from_db()
self.assertEqual(p.body, 'Smile 😀.')
| {
"content_hash": "7942d801cfbc737e4fea4c04ca4a45ad",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 96,
"avg_line_length": 32.4,
"alnum_prop": 0.6587301587301587,
"repo_name": "erikr/django",
"id": "a3cd516cb4d1b1f41ef43205e3799a1d20735c0b",
"size": "1164",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "tests/model_fields/test_textfield.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "53169"
},
{
"name": "HTML",
"bytes": "173592"
},
{
"name": "JavaScript",
"bytes": "448151"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "12192494"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autosummary',
'sphinx.ext.autodoc',
'sphinx.ext.imgmath',
'numpydoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Hyperion'
copyright = u'2010, Thomas Robitaille'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import hyperion
version = hyperion.__version__
# The full version, including alpha/beta/rc tags.
release = hyperion.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_templates']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'sphinxdoc'
html_style = 'scipy.css'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'HyperionDocumentation'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'hyperion_manual.tex', u'Hyperion Manual',
u'Thomas Robitaille', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
autosummary_generate = True
numpydoc_show_class_members = False
autoclass_content = 'class'
if os.environ.get('READTHEDOCS', None) == 'True':
class Mock(object):
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(self, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
return type(name, (), {})
else:
return Mock()
MOCK_MODULES = ['h5py', 'atpy']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
| {
"content_hash": "b4bf0391a1ca3be37c001f37c60649e5",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 80,
"avg_line_length": 32.014150943396224,
"alnum_prop": 0.6863120671872698,
"repo_name": "hyperion-rt/hyperion",
"id": "53be2228ed901362c8a931ba1a1a7420a7c2f4be",
"size": "7219",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "docs/conf.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "52997"
},
{
"name": "C++",
"bytes": "426981"
},
{
"name": "Fortran",
"bytes": "566390"
},
{
"name": "M4",
"bytes": "6308"
},
{
"name": "Makefile",
"bytes": "8813"
},
{
"name": "Python",
"bytes": "923253"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import codecs
import inspect
import json
import logging
import os
import re
import progressbar
from indic_transliteration import sanscript
from sanskrit_parser.base.sanskrit_base import SanskritObject
from sanskrit_parser.lexical_analyzer.sanskrit_lexical_analyzer import SanskritLexicalAnalyzer
logger = logging.getLogger(__name__)
logging.basicConfig(filename='gen_bg_lexan_passfail.log', filemode='w',
level=logging.INFO)
def process_bg_file(fn, m):
''' Process a single Bg file, up to a max of m lines
'''
fs = []
logger.info("Processing tests from file %s", fn)
basename = os.path.basename(fn) # Save
with codecs.open(fn, "rb", 'utf-8') as f:
# "S0" "S1" "S2" "S3" "S4"
state = "S0" # Wait state
for lnum, l in enumerate(f):
if m != 0:
line = l.strip()
# Samasa splitters
line = line.replace(r'\-', ' ')
# ITRANS Halanta symbol, not handled right?
line = line.replace('.h', '')
logger.info("State {}".format(state))
if line:
if line[0] == '%':
logger.info("Skipping comment: {}".format(line))
continue
if line[0] == '\\':
logger.info("Skipping command: {}".format(line))
continue
if state == "S0": # Default State
if not line:
logger.info("Skipping blank: {}".format(line))
continue
elif line[-2:] == " .": # Found |
prev = line
pnum = lnum
state = "S1"
logger.info("Moving to S1: {}".format(line))
else:
logger.info("Skipping unknown: {}".format(line))
continue
elif state == "S1":
if not line:
logger.info("Found blank, moving to S0: {}".format(line))
state = "S0"
continue
if line[-2:] == " .": # Found | , non verse
prev = line
pnum = lnum
state = "S0"
r = [prev[:-2], line.split(" ")[:-1], prev, line, False,
basename, pnum]
logger.info("Appending {}".format(r))
fs.append(r)
if m > 0:
m = m - 1
logger.info("Moving to S0: {}".format(line))
elif line[-2:] == "..": # Found ||, verse
prev2 = line
pnum2 = lnum
state = "S2"
logger.info("Moving to S2: {}".format(line))
else:
logger.info("Going to S0: unknown: {}".format(line))
state = "S0"
continue
elif state == "S2":
if not line:
logger.info("Found blank: {}".format(line))
continue
if line[-2:] == " .": # Found | verse split 1
split1 = line
# snum1 = lnum
state = "S3"
else:
logger.info("Going to S0: unknown: {}".format(line))
state = "S0"
continue
elif state == "S3":
if not line:
logger.info("Found blank, going to S0: {}".format(line))
state = "S0"
continue
if line[-2:] == "..": # Found | verse split 2
split2 = line
# snum2 = lnum
state = "S0"
r = [prev[:-2], split1.split(" ")[:-1], prev, split1, False,
basename, pnum]
logger.info("Appending {}".format(r))
fs.append(r)
rprev2 = prev2[:prev2.find("..")].strip()
rsplit2 = split2[:split2.find("..")].strip().split(" ")
r = [rprev2, rsplit2, prev2, split2, False,
basename, pnum2]
logger.info("Appending {}".format(r))
fs.append(r)
if m > 0:
m = m - 1
logger.info("Going to S0: {}".format(line))
else:
logger.info("Going to S0: unknown: {}".format(line))
state = "S0"
continue
else:
break
return fs
def get_bg_refs(lexan, maxrefs=200):
fs = []
# Max splits for the next file, initialize
m = maxrefs
flist = ["sandhi_test_data/gitAanvayasandhivigraha.itx"]
for fn in flist:
if m != 0:
r = process_bg_file(fn, m)
if r is not None:
fs.extend(r)
m = m - len(r)
else:
break
return fs
# FIXME - need to store the modified f,s instead of input references
def test_splits(lexan, bg_refs):
# Check if s is in splits
def _in_splits(s, splits):
return s in [list(map(str, ss)) for ss in splits]
f = bg_refs[0]
s = [re.sub('H$', 's', SanskritObject(sx, encoding=sanscript.ITRANS).canonical()) for sx in bg_refs[1]]
i = SanskritObject(f, encoding=sanscript.ITRANS)
try:
# for sss in s:
# if not lexan.forms.valid(sss):
# return "Skip"
graph = lexan.getSandhiSplits(i)
if graph is None:
logger.error("FAIL: Empty split for {}".format(i.canonical().encode('utf-8')))
return False
# Reducing max_paths to 100
splits = graph.findAllPaths(max_paths=100, sort=False)
r = _in_splits(s, splits)
if splits is None or (not r):
logger.error("FAIL: {} not in {}".format(s, splits))
return r
except: # noqa
logger.warning("Split Exception: {}".format(i.canonical().encode('utf-8')))
return "Error"
if __name__ == "__main__":
base_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
directory = os.path.join(base_dir, "test_data_SanskritLexicalAnalyzer")
passing = codecs.open(os.path.join(directory, "bg_passing.txt"), "w", encoding='utf-8')
split_passing = codecs.open(os.path.join(directory, "bg_split_passing.txt"), "w", encoding='utf-8')
failing = codecs.open(os.path.join(directory, "bg_failing.txt"), "w", encoding='utf-8')
skip = codecs.open(os.path.join(directory, "bg_skip.txt"), "w", encoding='utf-8')
error = codecs.open(os.path.join(directory, "bg_error.txt"), "w", encoding='utf-8')
lexan = SanskritLexicalAnalyzer()
maxrefs = 20000
bar = progressbar.ProgressBar(maxval=maxrefs)
fail_count = skip_count = error_count = pass_count = split_count = 0
for full, split, ofull, osplit, splitp, filename, linenum in \
bar(get_bg_refs(lexan=lexan, maxrefs=maxrefs)):
test = json.dumps({"full": full,
"split": split,
"orig_full": ofull,
"orig_split": osplit,
"filename": filename,
"linenum": linenum}) + "\n"
sr = test_splits(lexan, (full, split))
if sr == "Skip":
skip.write(test)
skip_count += 1
elif sr == "Error":
error.write(test)
error_count += 1
elif sr:
if splitp:
split_passing.write(test)
split_count += 1
else:
passing.write(test)
pass_count += 1
else:
failing.write(test)
fail_count += 1
passing.close()
failing.close()
error.close()
skip.close()
print("Pass = %d, Split Pass = %d, Fail = %d, Skip = %d, Error = %d" % (pass_count, split_count, fail_count, skip_count, error_count))
| {
"content_hash": "7aa9c5b68548dc4b36496e1d2441f995",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 138,
"avg_line_length": 39.985849056603776,
"alnum_prop": 0.45287247847115725,
"repo_name": "kmadathil/sanskrit_parser",
"id": "1a77a80286e4f6b83bf024b735482b3f1784627b",
"size": "8500",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/obsolete/generate_bg_Lexical_Analyzer_pass_fail.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "391756"
},
{
"name": "Shell",
"bytes": "135"
}
],
"symlink_target": ""
} |
from conary.repository import changeset
| {
"content_hash": "ae191b1e05e14d11cfcf9b38e676fec8",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 39,
"avg_line_length": 40,
"alnum_prop": 0.875,
"repo_name": "sassoftware/conary",
"id": "b68389422fe67fa45cac70739f04ea66cd5c3b17",
"size": "652",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "conary/repository/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "229804"
},
{
"name": "CSS",
"bytes": "3920"
},
{
"name": "Cython",
"bytes": "8856"
},
{
"name": "Genshi",
"bytes": "47582"
},
{
"name": "HTML",
"bytes": "10509"
},
{
"name": "Makefile",
"bytes": "42947"
},
{
"name": "Perl",
"bytes": "34287"
},
{
"name": "Python",
"bytes": "10583906"
},
{
"name": "Shell",
"bytes": "12630"
}
],
"symlink_target": ""
} |
import FWCore.ParameterSet.Config as cms
process = cms.Process("Demo")
# initialize MessageLogger and output report
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.cerr.threshold = 'INFO'
process.MessageLogger.categories.append('Demo')
process.MessageLogger.cerr.INFO = cms.untracked.PSet(
limit = cms.untracked.int32(-1)
)
process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) )
# process all events
#process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
# process 10 events
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(10) )
process.source = cms.Source("PoolSource",
# replace 'myfile.root' with the source file you want to use
fileNames = cms.untracked.vstring(
'file:/afs/cern.ch/cms/Tutorials/TWIKI_DATA/TTJets_8TeV_53X.root'
)
)
process.dump=cms.EDAnalyzer('EventContentAnalyzer')
process.demo = cms.EDAnalyzer('DemoAnalyzer'
)
# process.p = cms.Path(process.demo)
process.p = cms.Path(process.demo*process.dump)
| {
"content_hash": "e12bad63c8ff8df2a3e947482041f89e",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 80,
"avg_line_length": 31.757575757575758,
"alnum_prop": 0.7509541984732825,
"repo_name": "diana-hep/c2numpy",
"id": "e9afb86f3cde818297ed6bc725cd9147719e6778",
"size": "1048",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/CMSSW-with-C-interface/Demo/DemoAnalyzer/python/ConfFile_cfg.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2038"
},
{
"name": "C++",
"bytes": "30789"
},
{
"name": "Jupyter Notebook",
"bytes": "130476"
},
{
"name": "Python",
"bytes": "6508"
}
],
"symlink_target": ""
} |
"""
Write a program that randomizes a number
and prints the sum total of its digits.
For example if the number was: 2345
The result should be: 14
"""
from random import randint
num = randint(1,9999)
print "Starting with: %d" % num
# Numeric calculation
num_i = num
sum_d = 0
while num_i != 0:
digit = num_i % 10
sum_d += digit
num_i /= 10
print "Total sum of digits =", sum_d
# String calculation
num_s = str(num)
total = 0
for digit in num_s:
total += int(digit)
print "Total = ",total
| {
"content_hash": "8a31971aaff90d5ea4713427f8989f89",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 40,
"avg_line_length": 17,
"alnum_prop": 0.6607843137254902,
"repo_name": "nonZero/python-examples",
"id": "cb194f9331d7b94c63ba5e35b78c62bdded8029d",
"size": "510",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "05_syntax_lab/03.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "39479"
}
],
"symlink_target": ""
} |
import logging
import openerp
from openerp import tools
from openerp.osv import fields, osv
from openerp.tools.translate import _
class pos_config(osv.osv):
_inherit = 'pos.config'
_columns = {
'discount_pc': fields.float('Discount Percentage', help='The discount percentage'),
'discount_product_id': fields.many2one('product.product','Discount Product', help='The product used to model the discount'),
}
_defaults = {
'discount_pc': 10,
}
| {
"content_hash": "eef792f015faefff70b6228e560a8b00",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 132,
"avg_line_length": 27.166666666666668,
"alnum_prop": 0.6768916155419223,
"repo_name": "diogocs1/comps",
"id": "f8d24eb638bae28752342b5ef967e7863dc37f8f",
"size": "1468",
"binary": false,
"copies": "309",
"ref": "refs/heads/master",
"path": "web/addons/pos_discount/discount.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "701"
},
{
"name": "CSS",
"bytes": "856533"
},
{
"name": "HTML",
"bytes": "299671"
},
{
"name": "Java",
"bytes": "620166"
},
{
"name": "JavaScript",
"bytes": "5844302"
},
{
"name": "Makefile",
"bytes": "21002"
},
{
"name": "PHP",
"bytes": "14259"
},
{
"name": "Python",
"bytes": "10647376"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "17746"
},
{
"name": "XSLT",
"bytes": "120278"
}
],
"symlink_target": ""
} |
import functools
import json
import optparse
import tempfile
import unittest
from webkitpy.common.system.executive import ScriptError
from webkitpy.common.system.executive_mock import MockExecutive
from webkitpy.common.system.output_capture import OutputCapture
from webkitpy.common.system.platform_info_mock import MockPlatformInfo
from webkitpy.common.system.system_host import SystemHost
from webkitpy.common.system.system_host_mock import MockSystemHost
from webkitpy.layout_tests.models.test_expectations import TestExpectations
from webkitpy.layout_tests.port.base import Port, VirtualTestSuite
from webkitpy.layout_tests.port.test import add_unit_tests_to_mock_filesystem, LAYOUT_TEST_DIR, TestPort
class PortTest(unittest.TestCase):
def make_port(self, executive=None, with_tests=False, port_name=None, **kwargs):
host = MockSystemHost()
if executive:
host.executive = executive
if with_tests:
add_unit_tests_to_mock_filesystem(host.filesystem)
return TestPort(host, **kwargs)
return Port(host, port_name or 'baseport', **kwargs)
def test_pretty_patch_os_error(self):
port = self.make_port(executive=MockExecutive(exception=OSError))
self.assertEqual(port.pretty_patch_text('patch.txt'),
port._pretty_patch_error_html)
# This tests repeated calls to make sure we cache the result.
self.assertEqual(port.pretty_patch_text('patch.txt'),
port._pretty_patch_error_html)
def test_pretty_patch_script_error(self):
# FIXME: This is some ugly white-box test hacking ...
port = self.make_port(executive=MockExecutive(exception=ScriptError))
port._pretty_patch_available = True
self.assertEqual(port.pretty_patch_text('patch.txt'),
port._pretty_patch_error_html)
# This tests repeated calls to make sure we cache the result.
self.assertEqual(port.pretty_patch_text('patch.txt'),
port._pretty_patch_error_html)
def test_setup_test_run(self):
port = self.make_port()
# This routine is a no-op. We just test it for coverage.
port.setup_test_run()
def test_test_dirs(self):
port = self.make_port()
port.host.filesystem.write_text_file(port.layout_tests_dir() + '/canvas/test', '')
port.host.filesystem.write_text_file(port.layout_tests_dir() + '/css2.1/test', '')
dirs = port.test_dirs()
self.assertIn('canvas', dirs)
self.assertIn('css2.1', dirs)
def test_skipped_perf_tests(self):
port = self.make_port()
def add_text_file(dirname, filename, content='some content'):
dirname = port.host.filesystem.join(port.perf_tests_dir(), dirname)
port.host.filesystem.maybe_make_directory(dirname)
port.host.filesystem.write_text_file(port.host.filesystem.join(dirname, filename), content)
add_text_file('inspector', 'test1.html')
add_text_file('inspector', 'unsupported_test1.html')
add_text_file('inspector', 'test2.html')
add_text_file('inspector/resources', 'resource_file.html')
add_text_file('unsupported', 'unsupported_test2.html')
add_text_file('', 'Skipped', '\n'.join(['Layout', '', 'SunSpider', 'Supported/some-test.html']))
self.assertEqual(port.skipped_perf_tests(), ['Layout', 'SunSpider', 'Supported/some-test.html'])
def test_get_option__set(self):
options, _ = optparse.OptionParser().parse_args([])
options.foo = 'bar'
port = self.make_port(options=options)
self.assertEqual(port.get_option('foo'), 'bar')
def test_get_option__unset(self):
port = self.make_port()
self.assertIsNone(port.get_option('foo'))
def test_get_option__default(self):
port = self.make_port()
self.assertEqual(port.get_option('foo', 'bar'), 'bar')
def test_additional_platform_directory(self):
port = self.make_port(port_name='foo')
port.FALLBACK_PATHS = {'': ['foo']}
test_file = 'fast/test.html'
# No additional platform directory
self.assertEqual(
port.expected_baselines(test_file, '.txt'),
[(None, 'fast/test-expected.txt')])
self.assertEqual(port.baseline_version_dir(), '/mock-checkout/third_party/WebKit/LayoutTests/platform/foo')
# Simple additional platform directory
port._options.additional_platform_directory = ['/tmp/local-baselines']
port.host.filesystem.write_text_file('/tmp/local-baselines/fast/test-expected.txt', 'foo')
self.assertEqual(
port.expected_baselines(test_file, '.txt'),
[('/tmp/local-baselines', 'fast/test-expected.txt')])
self.assertEqual(port.baseline_version_dir(), '/tmp/local-baselines')
# Multiple additional platform directories
port._options.additional_platform_directory = ['/foo', '/tmp/local-baselines']
self.assertEqual(
port.expected_baselines(test_file, '.txt'),
[('/tmp/local-baselines', 'fast/test-expected.txt')])
self.assertEqual(port.baseline_version_dir(), '/foo')
# Flag-specific baseline directory
port._options.additional_platform_directory = []
port._options.additional_driver_flag = ['--special-flag']
self.assertEqual(port.baseline_search_path(), [
'/mock-checkout/third_party/WebKit/LayoutTests/flag-specific/special-flag/platform/foo',
'/mock-checkout/third_party/WebKit/LayoutTests/flag-specific/special-flag',
'/mock-checkout/third_party/WebKit/LayoutTests/platform/foo'])
def test_nonexistant_expectations(self):
port = self.make_port(port_name='foo')
port.expectations_files = lambda: ['/mock-checkout/third_party/WebKit/LayoutTests/platform/exists/TestExpectations',
'/mock-checkout/third_party/WebKit/LayoutTests/platform/nonexistant/TestExpectations']
port.host.filesystem.write_text_file('/mock-checkout/third_party/WebKit/LayoutTests/platform/exists/TestExpectations', '')
self.assertEqual('\n'.join(port.expectations_dict().keys()),
'/mock-checkout/third_party/WebKit/LayoutTests/platform/exists/TestExpectations')
def test_additional_expectations(self):
port = self.make_port(port_name='foo')
port.port_name = 'foo'
port.host.filesystem.write_text_file(
'/mock-checkout/third_party/WebKit/LayoutTests/platform/foo/TestExpectations', '')
port.host.filesystem.write_text_file(
'/tmp/additional-expectations-1.txt', 'content1\n')
port.host.filesystem.write_text_file(
'/tmp/additional-expectations-2.txt', 'content2\n')
port.host.filesystem.write_text_file(
'/mock-checkout/third_party/WebKit/LayoutTests/FlagExpectations/special-flag', 'content3')
self.assertEqual('\n'.join(port.expectations_dict().values()), '')
port._options.additional_expectations = [
'/tmp/additional-expectations-1.txt']
self.assertEqual('\n'.join(port.expectations_dict().values()), 'content1\n')
port._options.additional_expectations = [
'/tmp/nonexistent-file', '/tmp/additional-expectations-1.txt']
self.assertEqual('\n'.join(port.expectations_dict().values()), 'content1\n')
port._options.additional_expectations = [
'/tmp/additional-expectations-1.txt', '/tmp/additional-expectations-2.txt']
self.assertEqual('\n'.join(port.expectations_dict().values()), 'content1\n\ncontent2\n')
port._options.additional_driver_flag = ['--special-flag']
self.assertEqual('\n'.join(port.expectations_dict().values()), 'content3\ncontent1\n\ncontent2\n')
def test_flag_specific_expectations(self):
port = self.make_port(port_name='foo')
port.port_name = 'foo'
port.host.filesystem.write_text_file(
'/mock-checkout/third_party/WebKit/LayoutTests/FlagExpectations/special-flag-a', 'aa')
port.host.filesystem.write_text_file(
'/mock-checkout/third_party/WebKit/LayoutTests/FlagExpectations/special-flag-b', 'bb')
port.host.filesystem.write_text_file(
'/mock-checkout/third_party/WebKit/LayoutTests/FlagExpectations/README.txt', 'cc')
self.assertEqual('\n'.join(port.expectations_dict().values()), '')
self.assertEqual('\n'.join(port.all_expectations_dict().values()), 'bb\naa')
def test_additional_env_var(self):
port = self.make_port(options=optparse.Values({'additional_env_var': ['FOO=BAR', 'BAR=FOO']}))
self.assertEqual(port.get_option('additional_env_var'), ['FOO=BAR', 'BAR=FOO'])
environment = port.setup_environ_for_server()
self.assertTrue(('FOO' in environment) & ('BAR' in environment))
self.assertEqual(environment['FOO'], 'BAR')
self.assertEqual(environment['BAR'], 'FOO')
def test_find_no_paths_specified(self):
port = self.make_port(with_tests=True)
tests = port.tests([])
self.assertNotEqual(len(tests), 0)
def test_find_one_test(self):
port = self.make_port(with_tests=True)
tests = port.tests(['failures/expected/image.html'])
self.assertEqual(len(tests), 1)
def test_find_glob(self):
port = self.make_port(with_tests=True)
tests = port.tests(['failures/expected/im*'])
self.assertEqual(len(tests), 2)
def test_find_with_skipped_directories(self):
port = self.make_port(with_tests=True)
tests = port.tests(['userscripts'])
self.assertNotIn('userscripts/resources/iframe.html', tests)
def test_find_with_skipped_directories_2(self):
port = self.make_port(with_tests=True)
tests = port.tests(['userscripts/resources'])
self.assertEqual(tests, [])
@staticmethod
def _add_manifest_to_mock_file_system(filesystem):
filesystem.write_text_file(LAYOUT_TEST_DIR + '/external/wpt/MANIFEST.json', json.dumps({
'items': {
'testharness': {
'dom/ranges/Range-attributes.html': [
['/dom/ranges/Range-attributes.html', {}]
],
'dom/ranges/Range-attributes-slow.html': [
['/dom/ranges/Range-attributes.html', {'timeout': 'long'}]
],
'console/console-is-a-namespace.any.js': [
['/console/console-is-a-namespace.any.html', {}],
['/console/console-is-a-namespace.any.worker.html', {}],
],
},
'manual': {},
'reftest': {
'html/dom/elements/global-attributes/dir_auto-EN-L.html': [
[
'/html/dom/elements/global-attributes/dir_auto-EN-L.html',
[
[
'/html/dom/elements/global-attributes/dir_auto-EN-L-ref.html',
'=='
]
],
{'timeout': 'long'}
]
],
},
}}))
filesystem.write_text_file(LAYOUT_TEST_DIR + '/external/wpt/dom/ranges/Range-attributes.html', '')
filesystem.write_text_file(LAYOUT_TEST_DIR + '/external/wpt/console/console-is-a-namespace.any.js', '')
filesystem.write_text_file(LAYOUT_TEST_DIR + '/external/wpt/common/blank.html', 'foo')
def test_find_none_if_not_in_manifest(self):
port = self.make_port(with_tests=True)
PortTest._add_manifest_to_mock_file_system(port.host.filesystem)
self.assertNotIn('external/wpt/common/blank.html', port.tests([]))
def test_find_one_if_in_manifest(self):
port = self.make_port(with_tests=True)
PortTest._add_manifest_to_mock_file_system(port.host.filesystem)
self.assertIn('external/wpt/dom/ranges/Range-attributes.html', port.tests([]))
self.assertNotIn('external/wpt/console/console-is-a-namespace.any.js', port.tests([]))
self.assertEqual(port.tests(['external']), ['external/wpt/dom/ranges/Range-attributes.html'])
self.assertEqual(port.tests(['external/']), ['external/wpt/dom/ranges/Range-attributes.html'])
self.assertEqual(port.tests(['external/csswg-test']), [])
self.assertEqual(port.tests(['external/wpt']), ['external/wpt/dom/ranges/Range-attributes.html'])
self.assertEqual(port.tests(['external/wpt/']), ['external/wpt/dom/ranges/Range-attributes.html'])
self.assertEqual(port.tests(['external/wpt/dom/ranges/Range-attributes.html']),
['external/wpt/dom/ranges/Range-attributes.html'])
def test_is_test_file(self):
port = self.make_port(with_tests=True)
is_test_file = functools.partial(Port.is_test_file, port, port.host.filesystem)
self.assertTrue(is_test_file('', 'foo.html'))
self.assertTrue(is_test_file('', 'foo.svg'))
self.assertTrue(is_test_file('', 'test-ref-test.html'))
self.assertTrue(is_test_file('inspector-unit', 'trie.js'))
self.assertFalse(is_test_file('inspector-unit', 'foo.html'))
self.assertFalse(is_test_file('inspector', 'devtools.js'))
self.assertFalse(is_test_file('', 'foo.png'))
self.assertFalse(is_test_file('', 'foo-expected.html'))
self.assertFalse(is_test_file('', 'foo-expected.svg'))
self.assertFalse(is_test_file('', 'foo-expected.xht'))
self.assertFalse(is_test_file('', 'foo-expected-mismatch.html'))
self.assertFalse(is_test_file('', 'foo-expected-mismatch.svg'))
self.assertFalse(is_test_file('', 'foo-expected-mismatch.xhtml'))
self.assertFalse(is_test_file('', 'foo-ref.html'))
self.assertFalse(is_test_file('', 'foo-notref.html'))
self.assertFalse(is_test_file('', 'foo-notref.xht'))
self.assertFalse(is_test_file('', 'foo-ref.xhtml'))
self.assertFalse(is_test_file('', 'ref-foo.html'))
self.assertFalse(is_test_file('', 'notref-foo.xhr'))
def test_is_test_file_in_wpt(self):
port = self.make_port(with_tests=True)
filesystem = port.host.filesystem
PortTest._add_manifest_to_mock_file_system(filesystem)
# A file not in MANIFEST.json is not a test even if it has .html suffix.
self.assertFalse(port.is_test_file(filesystem, LAYOUT_TEST_DIR + '/external/wpt/common', 'blank.html'))
# .js is not a test in general, but it is if MANIFEST.json contains an
# entry for it.
self.assertTrue(port.is_test_file(filesystem, LAYOUT_TEST_DIR + '/external/wpt/console', 'console-is-a-namespace.any.js'))
# A file in external/wpt, not a sub directory.
self.assertFalse(port.is_test_file(filesystem, LAYOUT_TEST_DIR + '/external/wpt', 'testharness_runner.html'))
# A file in external/wpt_automation.
self.assertTrue(port.is_test_file(filesystem, LAYOUT_TEST_DIR + '/external/wpt_automation', 'foo.html'))
def test_is_slow_wpt_test(self):
port = self.make_port(with_tests=True)
filesystem = port.host.filesystem
PortTest._add_manifest_to_mock_file_system(filesystem)
self.assertFalse(port.is_slow_wpt_test('external/wpt/dom/ranges/Range-attributes.html'))
self.assertFalse(port.is_slow_wpt_test('dom/ranges/Range-attributes.html'))
self.assertTrue(port.is_slow_wpt_test('external/wpt/dom/ranges/Range-attributes-slow.html'))
self.assertTrue(port.is_slow_wpt_test('external/wpt/html/dom/elements/global-attributes/dir_auto-EN-L.html'))
def test_parse_reftest_list(self):
port = self.make_port(with_tests=True)
port.host.filesystem.files['bar/reftest.list'] = '\n'.join(['== test.html test-ref.html',
'',
'# some comment',
'!= test-2.html test-notref.html # more comments',
'== test-3.html test-ref.html',
'== test-3.html test-ref2.html',
'!= test-3.html test-notref.html',
'fuzzy(80,500) == test-3 test-ref.html'])
# Note that we don't support the syntax in the last line; the code should ignore it, rather than crashing.
reftest_list = Port._parse_reftest_list(port.host.filesystem, 'bar')
self.assertEqual(reftest_list, {
'bar/test.html': [('==', 'bar/test-ref.html')],
'bar/test-2.html': [('!=', 'bar/test-notref.html')],
'bar/test-3.html': [('==', 'bar/test-ref.html'), ('==', 'bar/test-ref2.html'), ('!=', 'bar/test-notref.html')]})
def test_reference_files(self):
port = self.make_port(with_tests=True)
self.assertEqual(port.reference_files('passes/svgreftest.svg'),
[('==', port.layout_tests_dir() + '/passes/svgreftest-expected.svg')])
self.assertEqual(port.reference_files('passes/xhtreftest.svg'),
[('==', port.layout_tests_dir() + '/passes/xhtreftest-expected.html')])
self.assertEqual(port.reference_files('passes/phpreftest.php'),
[('!=', port.layout_tests_dir() + '/passes/phpreftest-expected-mismatch.svg')])
def test_reference_files_from_manifest(self):
port = self.make_port(with_tests=True)
PortTest._add_manifest_to_mock_file_system(port.host.filesystem)
self.assertEqual(port.reference_files('external/wpt/html/dom/elements/global-attributes/dir_auto-EN-L.html'),
[('==', port.layout_tests_dir() +
'/external/wpt/html/dom/elements/global-attributes/dir_auto-EN-L-ref.html')])
def test_operating_system(self):
self.assertEqual('mac', self.make_port().operating_system())
def test_http_server_supports_ipv6(self):
port = self.make_port()
self.assertTrue(port.http_server_supports_ipv6())
port.host.platform.os_name = 'cygwin'
self.assertFalse(port.http_server_supports_ipv6())
port.host.platform.os_name = 'win'
self.assertFalse(port.http_server_supports_ipv6())
def test_check_httpd_success(self):
port = self.make_port(executive=MockExecutive())
port.path_to_apache = lambda: '/usr/sbin/httpd'
capture = OutputCapture()
capture.capture_output()
self.assertTrue(port.check_httpd())
_, _, logs = capture.restore_output()
self.assertEqual('', logs)
def test_httpd_returns_error_code(self):
port = self.make_port(executive=MockExecutive(exit_code=1))
port.path_to_apache = lambda: '/usr/sbin/httpd'
capture = OutputCapture()
capture.capture_output()
self.assertFalse(port.check_httpd())
_, _, logs = capture.restore_output()
self.assertEqual('httpd seems broken. Cannot run http tests.\n', logs)
def test_test_exists(self):
port = self.make_port(with_tests=True)
self.assertTrue(port.test_exists('passes'))
self.assertTrue(port.test_exists('passes/text.html'))
self.assertFalse(port.test_exists('passes/does_not_exist.html'))
self.assertTrue(port.test_exists('virtual'))
self.assertFalse(port.test_exists('virtual/does_not_exist.html'))
self.assertTrue(port.test_exists('virtual/virtual_passes/passes/text.html'))
def test_test_isfile(self):
port = self.make_port(with_tests=True)
self.assertFalse(port.test_isfile('passes'))
self.assertTrue(port.test_isfile('passes/text.html'))
self.assertFalse(port.test_isfile('passes/does_not_exist.html'))
self.assertFalse(port.test_isfile('virtual'))
self.assertTrue(port.test_isfile('virtual/virtual_passes/passes/text.html'))
self.assertFalse(port.test_isfile('virtual/does_not_exist.html'))
def test_test_isdir(self):
port = self.make_port(with_tests=True)
self.assertTrue(port.test_isdir('passes'))
self.assertFalse(port.test_isdir('passes/text.html'))
self.assertFalse(port.test_isdir('passes/does_not_exist.html'))
self.assertFalse(port.test_isdir('passes/does_not_exist/'))
self.assertTrue(port.test_isdir('virtual'))
self.assertFalse(port.test_isdir('virtual/does_not_exist.html'))
self.assertFalse(port.test_isdir('virtual/does_not_exist/'))
self.assertFalse(port.test_isdir('virtual/virtual_passes/passes/text.html'))
def test_tests(self):
port = self.make_port(with_tests=True)
tests = port.tests([])
self.assertIn('passes/text.html', tests)
self.assertIn('virtual/virtual_passes/passes/text.html', tests)
tests = port.tests(['passes'])
self.assertIn('passes/text.html', tests)
self.assertIn('passes/virtual_passes/test-virtual-passes.html', tests)
self.assertNotIn('virtual/virtual_passes/passes/text.html', tests)
tests = port.tests(['virtual/virtual_passes/passes'])
self.assertNotIn('passes/text.html', tests)
self.assertIn('virtual/virtual_passes/passes/test-virtual-passes.html', tests)
self.assertNotIn('passes/test-virtual-passes.html', tests)
self.assertNotIn('virtual/virtual_passes/passes/test-virtual-virtual/passes.html', tests)
self.assertNotIn('virtual/virtual_passes/passes/virtual_passes/passes/test-virtual-passes.html', tests)
def test_build_path(self):
port = self.make_port(options=optparse.Values({'build_directory': '/my-build-directory/'}))
self.assertEqual(port._build_path(), '/my-build-directory/Release')
def test_dont_require_http_server(self):
port = self.make_port()
self.assertEqual(port.requires_http_server(), False)
def test_can_load_actual_virtual_test_suite_file(self):
port = Port(SystemHost(), 'baseport')
# If this call returns successfully, we found and loaded the LayoutTests/VirtualTestSuites.
_ = port.virtual_test_suites()
def test_good_virtual_test_suite_file(self):
port = self.make_port()
port.host.filesystem.write_text_file(
port.host.filesystem.join(port.layout_tests_dir(), 'VirtualTestSuites'),
'[{"prefix": "bar", "base": "fast/bar", "args": ["--bar"]}]')
# If this call returns successfully, we found and loaded the LayoutTests/VirtualTestSuites.
_ = port.virtual_test_suites()
def test_virtual_test_suite_file_is_not_json(self):
port = self.make_port()
port.host.filesystem.write_text_file(
port.host.filesystem.join(port.layout_tests_dir(), 'VirtualTestSuites'),
'{[{[')
self.assertRaises(ValueError, port.virtual_test_suites)
def test_missing_virtual_test_suite_file(self):
port = self.make_port()
self.assertRaises(AssertionError, port.virtual_test_suites)
def test_is_wptserve_test(self):
port = self.make_port()
self.assertTrue(port.is_wptserve_test('external/wpt/foo/bar.html'))
self.assertFalse(port.is_wptserve_test('http/wpt/foo.html'))
def test_default_results_directory(self):
port = self.make_port(options=optparse.Values({'target': 'Default', 'configuration': 'Release'}))
# By default the results directory is in the build directory: out/<target>.
self.assertEqual(port.default_results_directory(), '/mock-checkout/out/Default/layout-test-results')
def test_results_directory(self):
port = self.make_port(options=optparse.Values({'results_directory': 'some-directory/results'}))
# A results directory can be given as an option, and it is relative to current working directory.
self.assertEqual(port.host.filesystem.cwd, '/')
self.assertEqual(port.results_directory(), '/some-directory/results')
def _assert_config_file_for_platform(self, port, platform, config_file):
port.host.platform = MockPlatformInfo(os_name=platform)
self.assertEqual(port._apache_config_file_name_for_platform(), config_file) # pylint: disable=protected-access
def _assert_config_file_for_linux_distribution(self, port, distribution, config_file):
port.host.platform = MockPlatformInfo(os_name='linux', linux_distribution=distribution)
self.assertEqual(port._apache_config_file_name_for_platform(), config_file) # pylint: disable=protected-access
def test_apache_config_file_name_for_platform(self):
port = self.make_port()
# pylint: disable=protected-access
port._apache_version = lambda: '2.2'
self._assert_config_file_for_platform(port, 'cygwin', 'cygwin-httpd.conf')
self._assert_config_file_for_platform(port, 'linux', 'apache2-httpd-2.2.conf')
self._assert_config_file_for_linux_distribution(port, 'arch', 'arch-httpd-2.2.conf')
self._assert_config_file_for_linux_distribution(port, 'debian', 'debian-httpd-2.2.conf')
self._assert_config_file_for_linux_distribution(port, 'slackware', 'apache2-httpd-2.2.conf')
self._assert_config_file_for_linux_distribution(port, 'redhat', 'redhat-httpd-2.2.conf')
self._assert_config_file_for_platform(port, 'mac', 'apache2-httpd-2.2.conf')
# win32 isn't a supported sys.platform. AppleWin/WinCairo/WinCE ports all use cygwin.
self._assert_config_file_for_platform(port, 'win32', 'apache2-httpd-2.2.conf')
self._assert_config_file_for_platform(port, 'barf', 'apache2-httpd-2.2.conf')
def test_skips_test_in_smoke_tests(self):
port = self.make_port(with_tests=True)
port.default_smoke_test_only = lambda: True
port.host.filesystem.write_text_file(port.path_to_smoke_tests_file(), 'passes/text.html\n')
self.assertTrue(port.skips_test(
'failures/expected/image.html',
generic_expectations=TestExpectations(port, include_overrides=False),
full_expectations=TestExpectations(port, include_overrides=True)))
def test_skips_test_no_skip_smoke_tests_file(self):
port = self.make_port(with_tests=True)
port.default_smoke_test_only = lambda: True
self.assertFalse(port.skips_test(
'failures/expected/image.html',
generic_expectations=TestExpectations(port, include_overrides=False),
full_expectations=TestExpectations(port, include_overrides=True)))
def test_skips_test_port_doesnt_skip_smoke_tests(self):
port = self.make_port(with_tests=True)
port.default_smoke_test_only = lambda: False
self.assertFalse(port.skips_test(
'failures/expected/image.html',
generic_expectations=TestExpectations(port, include_overrides=False),
full_expectations=TestExpectations(port, include_overrides=True)))
def test_skips_test_skip_in_generic_expectations(self):
port = self.make_port(with_tests=True)
port.default_smoke_test_only = lambda: False
port.host.filesystem.write_text_file(
port.path_to_generic_test_expectations_file(),
'Bug(test) failures/expected/image.html [ Skip ]\n')
self.assertFalse(port.skips_test(
'failures/expected/image.html',
generic_expectations=TestExpectations(port, include_overrides=False),
full_expectations=TestExpectations(port, include_overrides=True)))
def test_skips_test_skip_in_full_expectations(self):
port = self.make_port(with_tests=True)
port.default_smoke_test_only = lambda: False
port.host.filesystem.write_text_file(
port.host.filesystem.join(port.layout_tests_dir(), 'NeverFixTests'),
'Bug(test) failures/expected/image.html [ WontFix ]\n')
self.assertTrue(port.skips_test(
'failures/expected/image.html',
generic_expectations=TestExpectations(port, include_overrides=False),
full_expectations=TestExpectations(port, include_overrides=True)))
class NaturalCompareTest(unittest.TestCase):
def setUp(self):
self._port = TestPort(MockSystemHost())
def assert_cmp(self, x, y, result):
self.assertEqual(cmp(self._port._natural_sort_key(x), self._port._natural_sort_key(y)), result)
def test_natural_compare(self):
self.assert_cmp('a', 'a', 0)
self.assert_cmp('ab', 'a', 1)
self.assert_cmp('a', 'ab', -1)
self.assert_cmp('', '', 0)
self.assert_cmp('', 'ab', -1)
self.assert_cmp('1', '2', -1)
self.assert_cmp('2', '1', 1)
self.assert_cmp('1', '10', -1)
self.assert_cmp('2', '10', -1)
self.assert_cmp('foo_1.html', 'foo_2.html', -1)
self.assert_cmp('foo_1.1.html', 'foo_2.html', -1)
self.assert_cmp('foo_1.html', 'foo_10.html', -1)
self.assert_cmp('foo_2.html', 'foo_10.html', -1)
self.assert_cmp('foo_23.html', 'foo_10.html', 1)
self.assert_cmp('foo_23.html', 'foo_100.html', -1)
class KeyCompareTest(unittest.TestCase):
def setUp(self):
self._port = TestPort(MockSystemHost())
def assert_cmp(self, x, y, result):
self.assertEqual(cmp(self._port.test_key(x), self._port.test_key(y)), result)
def test_test_key(self):
self.assert_cmp('/a', '/a', 0)
self.assert_cmp('/a', '/b', -1)
self.assert_cmp('/a2', '/a10', -1)
self.assert_cmp('/a2/foo', '/a10/foo', -1)
self.assert_cmp('/a/foo11', '/a/foo2', 1)
self.assert_cmp('/ab', '/a/a/b', -1)
self.assert_cmp('/a/a/b', '/ab', 1)
self.assert_cmp('/foo-bar/baz', '/foo/baz', -1)
class VirtualTestSuiteTest(unittest.TestCase):
def test_basic(self):
suite = VirtualTestSuite(prefix='suite', base='base/foo', args=['--args'])
self.assertEqual(suite.name, 'virtual/suite/base/foo')
self.assertEqual(suite.base, 'base/foo')
self.assertEqual(suite.args, ['--args'])
self.assertEqual(suite.reference_args, suite.args)
def test_default_reference_args(self):
suite = VirtualTestSuite(prefix='suite', base='base/foo', args=['--args'], references_use_default_args=True)
self.assertEqual(suite.args, ['--args'])
self.assertEqual(suite.reference_args, [])
def test_non_default_reference_args(self):
suite = VirtualTestSuite(prefix='suite', base='base/foo', args=['--args'], references_use_default_args=False)
self.assertEqual(suite.args, ['--args'])
self.assertEqual(suite.reference_args, suite.args)
def test_no_slash(self):
self.assertRaises(AssertionError, VirtualTestSuite, prefix='suite/bar', base='base/foo', args=['--args'])
| {
"content_hash": "f5f8d088f91d587243c03b08ea7b8371",
"timestamp": "",
"source": "github",
"line_count": 615,
"max_line_length": 130,
"avg_line_length": 50.895934959349596,
"alnum_prop": 0.6290533848758826,
"repo_name": "youtube/cobalt",
"id": "44b772a621ab7a9c88b77dc340f3a97fa28190e8",
"size": "32828",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/base_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from gi.repository import GObject, Gtk, GdkPixbuf
from gettext import gettext as _
from sugar3.graphics.palette import Palette
from sugar3.graphics.palettemenu import PaletteMenuBox
from sugar3.graphics.palettemenu import PaletteMenuItem
from sugar3.graphics.tray import TrayButton
import constants
import utils
class RecdButton(TrayButton):
__gsignals__ = {
'remove-requested': (GObject.SignalFlags.RUN_LAST, None, ()),
'copy-clipboard-requested': (GObject.SignalFlags.RUN_LAST, None, ()),
}
def __init__(self, recd):
TrayButton.__init__(self)
self._recd = recd
self.set_icon_widget(self.get_image())
self._copy_menu_item_handler = None
palette = Palette(recd.title)
self.set_palette(palette)
self._box = PaletteMenuBox()
palette.set_content(self._box)
self._box.show()
self._rem_menu_item = PaletteMenuItem(
_('Erase'), icon_name='edit-delete')
self._rem_menu_item_handler = self._rem_menu_item.connect(
'activate', self._remove_clicked)
self._box.append_item(self._rem_menu_item)
self._rem_menu_item.show()
self._add_copy_menu_item()
def _add_copy_menu_item(self):
if self._recd.buddy and not self._recd.downloadedFromBuddy:
return
self._copy_menu_item = PaletteMenuItem(
_('Copy to clipboard'), icon_name='edit-copy')
self._copy_menu_item_handler = self._copy_menu_item.connect(
'activate', self._copy_clipboard_clicked)
self._box.append_item(self._copy_menu_item)
self._copy_menu_item.show()
def get_recd(self):
return self._recd
def get_image(self):
ipb = self._recd.getThumbPixbuf()
if ipb:
w = ipb.get_width()
h = ipb.get_height()
a = float(w) / float(h)
else:
a = 16. / 9
if a < 1.4:
paths = {constants.TYPE_PHOTO: 'object-photo.svg',
constants.TYPE_VIDEO: 'object-video.svg',
constants.TYPE_AUDIO: 'object-audio.svg'}
x = 8
y = 8
else:
paths = {constants.TYPE_PHOTO: 'object-photo-16to9.svg',
constants.TYPE_VIDEO: 'object-video-16to9.svg',
constants.TYPE_AUDIO: 'object-audio-16to9.svg'}
x = 9
y = 18
path = paths[self._recd.type]
pixbuf = utils.load_colored_svg(path, self._recd.colorStroke,
self._recd.colorFill)
if ipb:
ipb.composite(pixbuf, x, y, w, h, x, y, 1, 1,
GdkPixbuf.InterpType.BILINEAR, 255)
img = Gtk.Image()
img.set_from_pixbuf(pixbuf)
img.show()
return img
def cleanup(self):
self._rem_menu_item.disconnect(self._rem_menu_item_handler)
if self._copy_menu_item_handler is not None:
self._copy_menu_item.disconnect(self._copy_menu_item_handler)
def _remove_clicked(self, widget):
self.emit('remove-requested')
def _copy_clipboard_clicked(self, widget):
self.emit('copy-clipboard-requested')
| {
"content_hash": "c36f7ae5321aea43c968abb1432e5b4e",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 77,
"avg_line_length": 32.62626262626262,
"alnum_prop": 0.5770897832817338,
"repo_name": "godiard/record-activity",
"id": "1ef42d3ccb1f490d79f942e4bb7466599ccf9b13",
"size": "4385",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "button.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "154572"
}
],
"symlink_target": ""
} |
"""
Support for typing ctypes function pointers.
"""
from __future__ import absolute_import
import ctypes
import sys
from numba import types
from . import templates
from .typeof import typeof_impl
CTYPES_MAP = {
None: types.none,
ctypes.c_bool: types.boolean,
ctypes.c_int8: types.int8,
ctypes.c_int16: types.int16,
ctypes.c_int32: types.int32,
ctypes.c_int64: types.int64,
ctypes.c_uint8: types.uint8,
ctypes.c_uint16: types.uint16,
ctypes.c_uint32: types.uint32,
ctypes.c_uint64: types.uint64,
ctypes.c_float: types.float32,
ctypes.c_double: types.float64,
ctypes.c_void_p: types.voidptr,
ctypes.py_object: types.ffi_forced_object,
}
def convert_ctypes(ctypeobj):
try:
return CTYPES_MAP[ctypeobj]
except KeyError:
raise TypeError("unhandled ctypes type: %s" % ctypeobj)
def is_ctypes_funcptr(obj):
try:
# Is it something of which we can get the address
ctypes.cast(obj, ctypes.c_void_p)
except ctypes.ArgumentError:
return False
else:
# Does it define argtypes and restype
return hasattr(obj, 'argtypes') and hasattr(obj, 'restype')
@typeof_impl.register(ctypes._CFuncPtr)
def typeof_ctypes_function(val, c):
if is_ctypes_funcptr(val):
return make_function_type(val)
def get_pointer(ctypes_func):
"""
Get a pointer to the underlying function for a ctypes function as an
integer.
"""
return ctypes.cast(ctypes_func, ctypes.c_void_p).value
def make_function_type(cfnptr):
"""
Return a Numba type for the given ctypes function pointer.
"""
if cfnptr.argtypes is None:
raise TypeError("ctypes function %r doesn't define its argument types; "
"consider setting the `argtypes` attribute"
% (cfnptr.__name__,))
cargs = [convert_ctypes(a)
for a in cfnptr.argtypes]
cret = convert_ctypes(cfnptr.restype)
if sys.platform == 'win32' and not cfnptr._flags_ & ctypes._FUNCFLAG_CDECL:
# 'stdcall' calling convention under Windows
cconv = 'x86_stdcallcc'
else:
# Default C calling convention
cconv = None
sig = templates.signature(cret, *cargs)
return types.ExternalFunctionPointer(sig, cconv=cconv,
get_pointer=get_pointer)
| {
"content_hash": "7b43debc89e94d55c9c492c08cbcd1f6",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 80,
"avg_line_length": 26.775280898876403,
"alnum_prop": 0.6437263953000419,
"repo_name": "pombredanne/numba",
"id": "a3a94fc37ee34cac400833503f2214a6b05e7ef0",
"size": "2383",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "numba/typing/ctypes_utils.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "2212"
},
{
"name": "C",
"bytes": "249112"
},
{
"name": "C++",
"bytes": "17024"
},
{
"name": "Cuda",
"bytes": "214"
},
{
"name": "HTML",
"bytes": "98846"
},
{
"name": "PowerShell",
"bytes": "3153"
},
{
"name": "Python",
"bytes": "3320040"
},
{
"name": "Shell",
"bytes": "120"
}
],
"symlink_target": ""
} |
"""
Query subclasses which provide extra functionality beyond simple data retrieval.
"""
from django.core.exceptions import FieldError
from django.db.models.sql.constants import *
from django.db.models.sql.datastructures import Date
from django.db.models.sql.query import Query
from django.db.models.sql.where import AND
__all__ = ['DeleteQuery', 'UpdateQuery', 'InsertQuery', 'DateQuery',
'CountQuery']
class DeleteQuery(Query):
"""
Delete queries are done through this class, since they are more constrained
than general queries.
"""
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
assert len(self.tables) == 1, \
"Can only delete from one table at a time."
result = ['DELETE FROM %s' % self.quote_name_unless_alias(self.tables[0])]
where, params = self.where.as_sql()
result.append('WHERE %s' % where)
return ' '.join(result), tuple(params)
def do_query(self, table, where):
self.tables = [table]
self.where = where
self.execute_sql(None)
def delete_batch_related(self, pk_list):
"""
Set up and execute delete queries for all the objects related to the
primary key values in pk_list. To delete the objects themselves, use
the delete_batch() method.
More than one physical query may be executed if there are a
lot of values in pk_list.
"""
from django.contrib.contenttypes import generic
cls = self.model
for related in cls._meta.get_all_related_many_to_many_objects():
if not isinstance(related.field, generic.GenericRelation):
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
where = self.where_class()
where.add((None, related.field.m2m_reverse_name(),
related.field, 'in',
pk_list[offset : offset+GET_ITERATOR_CHUNK_SIZE]),
AND)
self.do_query(related.field.m2m_db_table(), where)
for f in cls._meta.many_to_many:
w1 = self.where_class()
if isinstance(f, generic.GenericRelation):
from django.contrib.contenttypes.models import ContentType
field = f.rel.to._meta.get_field(f.content_type_field_name)
w1.add((None, field.column, field, 'exact',
ContentType.objects.get_for_model(cls).id), AND)
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
where = self.where_class()
where.add((None, f.m2m_column_name(), f, 'in',
pk_list[offset : offset + GET_ITERATOR_CHUNK_SIZE]),
AND)
if w1:
where.add(w1, AND)
self.do_query(f.m2m_db_table(), where)
def delete_batch(self, pk_list):
"""
Set up and execute delete queries for all the objects in pk_list. This
should be called after delete_batch_related(), if necessary.
More than one physical query may be executed if there are a
lot of values in pk_list.
"""
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
where = self.where_class()
field = self.model._meta.pk
where.add((None, field.column, field, 'in',
pk_list[offset : offset + GET_ITERATOR_CHUNK_SIZE]), AND)
self.do_query(self.model._meta.db_table, where)
class UpdateQuery(Query):
"""
Represents an "update" SQL query.
"""
def __init__(self, *args, **kwargs):
super(UpdateQuery, self).__init__(*args, **kwargs)
self._setup_query()
def _setup_query(self):
"""
Runs on initialization and after cloning. Any attributes that would
normally be set in __init__ should go in here, instead, so that they
are also set up after a clone() call.
"""
self.values = []
self.related_ids = None
if not hasattr(self, 'related_updates'):
self.related_updates = {}
def clone(self, klass=None, **kwargs):
return super(UpdateQuery, self).clone(klass,
related_updates=self.related_updates.copy, **kwargs)
def execute_sql(self, result_type=None):
"""
Execute the specified update. Returns the number of rows affected by
the primary update query (there could be other updates on related
tables, but their rowcounts are not returned).
"""
cursor = super(UpdateQuery, self).execute_sql(result_type)
rows = cursor.rowcount
del cursor
for query in self.get_related_updates():
query.execute_sql(result_type)
return rows
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
self.pre_sql_setup()
if not self.values:
return '', ()
table = self.tables[0]
qn = self.quote_name_unless_alias
result = ['UPDATE %s' % qn(table)]
result.append('SET')
values, update_params = [], []
for name, val, placeholder in self.values:
if val is not None:
values.append('%s = %s' % (qn(name), placeholder))
update_params.append(val)
else:
values.append('%s = NULL' % qn(name))
result.append(', '.join(values))
where, params = self.where.as_sql()
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(update_params + params)
def pre_sql_setup(self):
"""
If the update depends on results from other tables, we need to do some
munging of the "where" conditions to match the format required for
(portable) SQL updates. That is done here.
Further, if we are going to be running multiple updates, we pull out
the id values to update at this point so that they don't change as a
result of the progressive updates.
"""
self.select_related = False
self.clear_ordering(True)
super(UpdateQuery, self).pre_sql_setup()
count = self.count_active_tables()
if not self.related_updates and count == 1:
return
# We need to use a sub-select in the where clause to filter on things
# from other tables.
query = self.clone(klass=Query)
query.bump_prefix()
query.extra_select = {}
first_table = query.tables[0]
if query.alias_refcount[first_table] == 1:
# We can remove one table from the inner query.
query.unref_alias(first_table)
for i in xrange(1, len(query.tables)):
table = query.tables[i]
if query.alias_refcount[table]:
break
join_info = query.alias_map[table]
query.select = [(join_info[RHS_ALIAS], join_info[RHS_JOIN_COL])]
must_pre_select = False
else:
query.select = []
query.add_fields([query.model._meta.pk.name])
must_pre_select = not self.connection.features.update_can_self_select
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.where = self.where_class()
if self.related_updates or must_pre_select:
# Either we're using the idents in multiple update queries (so
# don't want them to change), or the db backend doesn't support
# selecting from the updating table (e.g. MySQL).
idents = []
for rows in query.execute_sql(MULTI):
idents.extend([r[0] for r in rows])
self.add_filter(('pk__in', idents))
self.related_ids = idents
else:
# The fast path. Filters and updates in one query.
self.add_filter(('pk__in', query))
for alias in self.tables[1:]:
self.alias_refcount[alias] = 0
def clear_related(self, related_field, pk_list):
"""
Set up and execute an update query that clears related entries for the
keys in pk_list.
This is used by the QuerySet.delete_objects() method.
"""
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
self.where = self.where_class()
f = self.model._meta.pk
self.where.add((None, f.column, f, 'in',
pk_list[offset : offset + GET_ITERATOR_CHUNK_SIZE]),
AND)
self.values = [(related_field.column, None, '%s')]
self.execute_sql(None)
def add_update_values(self, values):
"""
Convert a dictionary of field name to value mappings into an update
query. This is the entry point for the public update() method on
querysets.
"""
values_seq = []
for name, val in values.iteritems():
field, model, direct, m2m = self.model._meta.get_field_by_name(name)
if not direct or m2m:
raise FieldError('Cannot update model field %r (only non-relations and foreign keys permitted).' % field)
values_seq.append((field, model, val))
return self.add_update_fields(values_seq)
def add_update_fields(self, values_seq):
"""
Turn a sequence of (field, model, value) triples into an update query.
Used by add_update_values() as well as the "fast" update path when
saving models.
"""
from django.db.models.base import Model
for field, model, val in values_seq:
# FIXME: Some sort of db_prep_* is probably more appropriate here.
if field.rel and isinstance(val, Model):
val = val.pk
# Getting the placeholder for the field.
if hasattr(field, 'get_placeholder'):
placeholder = field.get_placeholder(val)
else:
placeholder = '%s'
if model:
self.add_related_update(model, field.column, val, placeholder)
else:
self.values.append((field.column, val, placeholder))
def add_related_update(self, model, column, value, placeholder):
"""
Adds (name, value) to an update query for an ancestor model.
Updates are coalesced so that we only run one update query per ancestor.
"""
try:
self.related_updates[model].append((column, value, placeholder))
except KeyError:
self.related_updates[model] = [(column, value, placeholder)]
def get_related_updates(self):
"""
Returns a list of query objects: one for each update required to an
ancestor model. Each query will have the same filtering conditions as
the current query but will only update a single table.
"""
if not self.related_updates:
return []
result = []
for model, values in self.related_updates.iteritems():
query = UpdateQuery(model, self.connection)
query.values = values
if self.related_ids:
query.add_filter(('pk__in', self.related_ids))
result.append(query)
return result
class InsertQuery(Query):
def __init__(self, *args, **kwargs):
super(InsertQuery, self).__init__(*args, **kwargs)
self.columns = []
self.values = []
self.params = ()
def clone(self, klass=None, **kwargs):
extras = {'columns': self.columns[:], 'values': self.values[:],
'params': self.params}
return super(InsertQuery, self).clone(klass, extras)
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
result = ['INSERT INTO %s' % qn(self.model._meta.db_table)]
result.append('(%s)' % ', '.join([qn(c) for c in self.columns]))
result.append('VALUES (%s)' % ', '.join(self.values))
return ' '.join(result), self.params
def execute_sql(self, return_id=False):
cursor = super(InsertQuery, self).execute_sql(None)
if return_id:
return self.connection.ops.last_insert_id(cursor,
self.model._meta.db_table, self.model._meta.pk.column)
def insert_values(self, insert_values, raw_values=False):
"""
Set up the insert query from the 'insert_values' dictionary. The
dictionary gives the model field names and their target values.
If 'raw_values' is True, the values in the 'insert_values' dictionary
are inserted directly into the query, rather than passed as SQL
parameters. This provides a way to insert NULL and DEFAULT keywords
into the query, for example.
"""
placeholders, values = [], []
for field, val in insert_values:
if hasattr(field, 'get_placeholder'):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
placeholders.append(field.get_placeholder(val))
else:
placeholders.append('%s')
self.columns.append(field.column)
values.append(val)
if raw_values:
self.values.extend(values)
else:
self.params += tuple(values)
self.values.extend(placeholders)
class DateQuery(Query):
"""
A DateQuery is a normal query, except that it specifically selects a single
date field. This requires some special handling when converting the results
back to Python objects, so we put it in a separate class.
"""
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
if resolve_columns:
from django.db.models.fields import DateTimeField
fields = [DateTimeField()]
else:
from django.db.backends.util import typecast_timestamp
needs_string_cast = self.connection.features.needs_datetime_string_cast
offset = len(self.extra_select)
for rows in self.execute_sql(MULTI):
for row in rows:
date = row[offset]
if resolve_columns:
date = self.resolve_columns([date], fields)[0]
elif needs_string_cast:
date = typecast_timestamp(str(date))
yield date
def add_date_select(self, field, lookup_type, order='ASC'):
"""
Converts the query into a date extraction query.
"""
result = self.setup_joins([field.name], self.get_meta(),
self.get_initial_alias(), False)
alias = result[3][-1]
select = Date((alias, field.column), lookup_type,
self.connection.ops.date_trunc_sql)
self.select = [select]
self.select_fields = [None]
self.select_related = False # See #7097.
self.distinct = True
self.order_by = order == 'ASC' and [1] or [-1]
class CountQuery(Query):
"""
A CountQuery knows how to take a normal query which would select over
multiple distinct columns and turn it into SQL that can be used on a
variety of backends (it requires a select in the FROM clause).
"""
def get_from_clause(self):
result, params = self._query.as_sql()
return ['(%s) A1' % result], params
def get_ordering(self):
return ()
| {
"content_hash": "a888472bb05f0c4467f880d5b6acb3cf",
"timestamp": "",
"source": "github",
"line_count": 394,
"max_line_length": 121,
"avg_line_length": 40.494923857868024,
"alnum_prop": 0.5818238796615481,
"repo_name": "hugs/django",
"id": "5ca041cbded64e7db61a93280ccedd7e30efb192",
"size": "15955",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/db/models/sql/subqueries.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "76551"
},
{
"name": "Python",
"bytes": "3998235"
},
{
"name": "Shell",
"bytes": "804"
}
],
"symlink_target": ""
} |
__author__ = 'Joe Linn'
import json
import pylastica.index
class Action(object):
OP_TYPE_CREATE = 'create'
OP_TYPE_INDEX = 'index'
OP_TYPE_DELETE = 'delete'
OP_TYPE_UPDATE = 'update'
def __init__(self, op_type=OP_TYPE_INDEX, metadata=None, source=None):
"""
@param op_type: see OP_TYPE_* class properties for options
@type op_type: str
@param metadata:
@type metadata: dict
@param source:
@type source: dict
"""
self.op_type = op_type
self.metadata = metadata
self.source = source
@property
def op_type(self):
"""
@return:
@rtype: str
"""
return self._op_type
@op_type.setter
def op_type(self, op_type):
"""
@param op_type: see OP_TYPE_* class properties for options
@type op_type: str
"""
self._op_type = op_type
@property
def metadata(self):
"""
@return:
@rtype: dict
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
@param metadata:
@type metadata: dict
"""
if metadata is None:
metadata = {}
self._metadata = metadata
@property
def action_metadata(self):
"""
@return:
@rtype: dict
"""
return {self.op_type: self.metadata}
@property
def source(self):
"""
@return:
@rtype: dict
"""
return self._source
@source.setter
def source(self, source):
"""
@param source:
@type source: dict
"""
self._source = source
def has_source(self):
"""
@return:
@rtype: bool
"""
return self._source is not None
def set_index(self, index):
"""
@param index:
@type index: str or pylastica.index.Index
@return:
@rtype: self
"""
if isinstance(index, pylastica.index.Index):
index = index.name
self._metadata['_index'] = index
return self
def set_type(self, doc_type):
"""
@param doc_type:
@type doc_type: str or pylastica.doc_type.DocType
@return:
@rtype: self
"""
if isinstance(doc_type, pylastica.doc_type.DocType):
self.set_index(doc_type.index.name)
doc_type = doc_type.name
self._metadata['_type'] = type
return self
def set_id(self, doc_id):
"""
@param doc_id:
@type doc_id: str
@return:
@rtype: self
"""
self._metadata['_id'] = doc_id
return self
def to_list(self):
"""
@return:
@rtype: list
"""
data = [self.action_metadata]
if self.has_source():
data.append(self.source)
return data
def to_string(self):
"""
@return:
@rtype: str
"""
from pylastica.bulk import Bulk
string = json.dumps(self.action_metadata) + Bulk.DELIMITER
if self.has_source():
source = self.source
if isinstance(source, str):
string += source
else:
string += json.dumps(source)
string += Bulk.DELIMITER
return string
def __str__(self):
"""
@return:
@rtype: str
"""
return self.to_string()
@staticmethod
def is_valid_op_type(op_type):
"""
Determines whether or not the given op type is a valid bulk operation
@param op_type:
@type op_type: str
@return:
@rtype: bool
"""
valid = [
Action.OP_TYPE_INDEX,
Action.OP_TYPE_CREATE,
Action.OP_TYPE_DELETE,
Action.OP_TYPE_UPDATE
]
return op_type in valid
| {
"content_hash": "be16a4b8dcba354c7dc62a52ad5a6ae8",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 77,
"avg_line_length": 20.936842105263157,
"alnum_prop": 0.49019607843137253,
"repo_name": "jlinn/pylastica",
"id": "6975ca021bb1ee9a2dd56230281a83ed0a22ec9c",
"size": "3978",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pylastica/bulk/action/action.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "6774"
},
{
"name": "Python",
"bytes": "547260"
},
{
"name": "Shell",
"bytes": "1771"
}
],
"symlink_target": ""
} |
import os, struct, numpy
dB_MIN = -80.
PWI_THS = 60.
PW_MIN = PWI_THS * (10.** (dB_MIN / 20.))
class SpectrumBase(object):
'''Base type of spectrums'''
def __init__(self, audio):
self.audio = audio
def walk(self, win, step, start, end, join_channels):
'''Generator of spectrum frames'''
raise NotImplementedError
class SpectrogramFile(object):
'''Read or Write spectrogram into file'''
def __init__(self, path, mode='r'):
assert mode in ('r','w')
self._mode = mode
if self._mode == 'r' \
and not os.path.isfile(path):
raise ValueError("`%s' is not a file." % path)
self._path = path
self._fh = open(self._path, mode+'b')
def _read_header(self):
'''
Little-endian
|... 4 bytes unsigned int ...|... 4 bytes unsigned int ...|
| frames count | dimensions count |
'''
self._fh.seek(0)
buf = self._fh.read(4*2)
fc, dc = struct.unpack("<II", buf)
return fc, dc
def _write_header(self, fc, dc):
self._fh.seek(0)
self._fh.write(struct.pack("<II",int(fc), int(dc)))
def _read_frame(self, dc):
buf = self._fh.read(dc * 8)
if len(buf) < 8:
return None
return numpy.array(struct.unpack('<'+'d'*dc, buf))
def _write_frame(self, vector):
buf = struct.pack('<'+'d'*len(vector), *vector)
self._fh.write(buf)
def close(self):
self._fh.close()
def walk(self, offset=0, limit=None):
fc, dc = self._read_header()
offset = int(offset)
limit = limit is None and fc or int(limit)
assert 0 <= offset < fc
end = offset + limit
end = end > fc and fc or end
if offset > 0:
self._fh.seek(offset * 8 * dc, os.SEEK_CUR)
for idx in xrange(offset, end):
vector = self._read_frame(dc)
if vector is None:
raise StopIteration
yield vector
def dump(self, spectrum_iter):
self._write_header(0,0)
fc, dc = 0, None
for vector in spectrum_iter:
self._write_frame(vector)
fc += 1
if dc is None:
dc = len(vector)
self._write_header(fc, dc)
| {
"content_hash": "2b9e0b8c6f0730c2035d521abfb88efd",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 67,
"avg_line_length": 28.156626506024097,
"alnum_prop": 0.5126230209670518,
"repo_name": "dongying/dear",
"id": "0209b0e0ce297c4715a34fd10dee27dd9a1100ab",
"size": "2361",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dear/spectrum/_base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46689"
}
],
"symlink_target": ""
} |
from appdirs import *
#---- self test code
if __name__ == "__main__":
print("applib: user data dir: %s" % user_data_dir("Komodo", "ActiveState"))
print("applib: site data dir: %s" % site_data_dir("Komodo", "ActiveState"))
print("applib: user cache dir: %s" % user_cache_dir("Komodo", "ActiveState"))
| {
"content_hash": "3adab6b61d917800f769e9ecbd9881ce",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 81,
"avg_line_length": 31.6,
"alnum_prop": 0.6139240506329114,
"repo_name": "ActiveState/applib",
"id": "dc6412ef7d6b9b28224865f14881e9056368ebfc",
"size": "414",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "applib/location.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "133800"
}
],
"symlink_target": ""
} |
"""Ancient Greek phonology
"""
| {
"content_hash": "ed1bcb5a0a4fdc562bb16973f98beb91",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 26,
"avg_line_length": 15.5,
"alnum_prop": 0.6774193548387096,
"repo_name": "diyclassics/cltk",
"id": "30d626a43530018424b9c20a366816e24c8fa51f",
"size": "31",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/cltk/phonology/grc/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "128319"
},
{
"name": "Makefile",
"bytes": "2296"
},
{
"name": "Python",
"bytes": "3335682"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import sys
from google.api_core.protobuf_helpers import get_messages
from google.api import http_pb2
from google.cloud.videointelligence_v1p1beta1.proto import video_intelligence_pb2
from google.longrunning import operations_pb2
from google.protobuf import any_pb2
from google.protobuf import descriptor_pb2
from google.protobuf import duration_pb2
from google.protobuf import empty_pb2
from google.protobuf import timestamp_pb2
from google.rpc import status_pb2
_shared_modules = [
http_pb2,
operations_pb2,
any_pb2,
descriptor_pb2,
duration_pb2,
empty_pb2,
timestamp_pb2,
status_pb2,
]
_local_modules = [
video_intelligence_pb2,
]
names = []
for module in _shared_modules:
for name, message in get_messages(module).items():
setattr(sys.modules[__name__], name, message)
names.append(name)
for module in _local_modules:
for name, message in get_messages(module).items():
message.__module__ = 'google.cloud.videointelligence_v1p1beta1.types'
setattr(sys.modules[__name__], name, message)
names.append(name)
__all__ = tuple(sorted(names))
| {
"content_hash": "8bfa54573ff20196e68f44e3f5614df0",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 81,
"avg_line_length": 27.162790697674417,
"alnum_prop": 0.7226027397260274,
"repo_name": "jonparrott/gcloud-python",
"id": "8bd345a758d4c0deec392d3cc731d3233a908c96",
"size": "1770",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "videointelligence/google/cloud/videointelligence_v1p1beta1/types.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Protocol Buffer",
"bytes": "62009"
},
{
"name": "Python",
"bytes": "3459300"
},
{
"name": "Shell",
"bytes": "7548"
}
],
"symlink_target": ""
} |
from django.contrib.syndication.views import Feed
from blog.models import Post
from django.core.urlresolvers import reverse
class BlogLatestEntries(Feed):
title = "Thiago Pagonha"
link = "/blog/"
description = "Últimas notícias no blog."
def items(self):
return Post.objects.filter(visible=True)[:5]
def item_title(self, item):
return item.title
def item_description(self, item):
return item.short_description
def item_link(self, item):
return reverse('blog.views.post', args=[item.slug]) | {
"content_hash": "f56295d955ac7ba413cea77b38c36f0e",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 59,
"avg_line_length": 29,
"alnum_prop": 0.656896551724138,
"repo_name": "thiagopa/thiagopagonha",
"id": "af38394f7a3aec1d507bb271ffd65b336baea8d3",
"size": "606",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blog/feeds.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "24184"
},
{
"name": "JavaScript",
"bytes": "1446"
},
{
"name": "Python",
"bytes": "21235"
}
],
"symlink_target": ""
} |
"""Server Version Class."""
import logging
class ServerVersion(object): # Can't import APIClassTemplate due to dependency loop.
"""
Get the FMC's version information.
Set instance variables for each version info returned as well as return the whole response text.
:return: requests' response.text
"""
URL_SUFFIX = "/info/serverversion"
def __init__(self, fmc):
"""
Initialize ServerVersion object.
:param fmc (object): FMC object
:param **kwargs: Any other values passed during instantiation.
:return: None
"""
logging.debug("In __init__() for ServerVersion class.")
self.fmc = fmc
self.URL = f"{self.fmc.platform_url}{self.URL_SUFFIX}"
self.vdbVersion = None
self.sruVersion = None
self.serverVersion = None
self.geoVersion = None
def get(self):
"""
Send GET to FMC.
:return: requests response
"""
logging.debug("GET method for API for ServerVersion.")
response = self.fmc.send_to_api(method="get", url=self.URL)
if "items" in response:
logging.info(
"Populating vdbVersion, sruVersion, serverVersion, and geoVersion FMC instance variables."
)
self.vdbVersion = response["items"][0]["vdbVersion"]
self.sruVersion = response["items"][0]["sruVersion"]
self.serverVersion = response["items"][0]["serverVersion"]
self.geoVersion = response["items"][0]["geoVersion"]
return response
def post(self):
"""POST method for API for ServerVersion not supported."""
logging.info("POST method for API for ServerVersion not supported.")
pass
def put(self):
"""PUT method for API for ServerVersion not supported."""
logging.info("PUT method for API for ServerVersion not supported.")
pass
def delete(self):
"""DELETE method for API for ServerVersion not supported."""
logging.info("DELETE method for API for ServerVersion not supported.")
pass
| {
"content_hash": "4d977cf009c2f08083ff8ed2fee2fe0f",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 106,
"avg_line_length": 33.046875,
"alnum_prop": 0.6146572104018913,
"repo_name": "daxm/fmcapi",
"id": "b76d56baa7359eeb8a5dc23f125b97f579005a2e",
"size": "2115",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fmcapi/api_objects/system_information/serverversion.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "985"
},
{
"name": "Python",
"bytes": "572788"
},
{
"name": "Shell",
"bytes": "1591"
}
],
"symlink_target": ""
} |
"""Tests for tf.keras models using DistributionStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import mirrored_strategy
from tensorflow.contrib.distribute.python import tpu_strategy
from tensorflow.contrib.distribute.python import values
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.estimator import keras as keras_lib
from tensorflow.python.estimator import run_config as run_config_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import distributed_training_utils
from tensorflow.python.ops.parsing_ops import gen_parsing_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import rmsprop
_RANDOM_SEED = 1337
_TRAIN_SIZE = 200
_INPUT_SIZE = (10,)
_NUM_CLASS = 2
# TODO(anjalisridhar): Add a decorator that will allow us to run these tests as
# part of the tf.keras unit tests suite.
def simple_sequential_model():
model = keras.models.Sequential()
model.add(keras.layers.Dense(16, activation='relu', input_shape=_INPUT_SIZE))
model.add(keras.layers.Dropout(0.1))
model.add(keras.layers.Dense(_NUM_CLASS, activation='softmax'))
return model
def simple_functional_model():
a = keras.layers.Input(shape=_INPUT_SIZE)
b = keras.layers.Dense(16, activation='relu')(a)
b = keras.layers.Dropout(0.1)(b)
b = keras.layers.Dense(_NUM_CLASS, activation='softmax')(b)
model = keras.models.Model(inputs=[a], outputs=[b])
return model
def multi_inputs_multi_outputs_model():
input_a = keras.layers.Input(shape=(16,), name='input_a')
input_b = keras.layers.Input(shape=(16,), name='input_b')
input_m = keras.layers.Input(shape=(8,), dtype='string', name='input_m')
dense = keras.layers.Dense(8, name='dense_1')
interm_a = dense(input_a)
# Read m
interm_m = keras.layers.Lambda(gen_parsing_ops.string_to_number)(input_m)
interm_s = keras.layers.Lambda(lambda k: k[0] * k[1])([interm_m, interm_a])
interm_b = dense(input_b)
merged = keras.layers.concatenate([interm_s, interm_b], name='merge')
output_c = keras.layers.Dense(3, activation='softmax', name='dense_2')(merged)
output_d = keras.layers.Dense(2, activation='softmax', name='dense_3')(merged)
model = keras.models.Model(
inputs=[input_a, input_b, input_m], outputs=[output_c, output_d])
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.GradientDescentOptimizer(0.001),
metrics={
'dense_2': 'categorical_accuracy',
'dense_3': 'categorical_accuracy'
})
return model
def get_ds_train_input_fn():
np.random.seed(_RANDOM_SEED)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=_INPUT_SIZE,
num_classes=_NUM_CLASS)
y_train = keras.utils.to_categorical(y_train)
dataset = dataset_ops.Dataset.from_tensor_slices((x_train, y_train))
dataset = dataset.batch(32)
return dataset
def get_ds_test_input_fn():
np.random.seed(_RANDOM_SEED)
_, (x_test, y_test) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=_INPUT_SIZE,
num_classes=_NUM_CLASS)
y_test = keras.utils.to_categorical(y_test)
dataset = dataset_ops.Dataset.from_tensor_slices((x_test, y_test))
dataset = dataset.batch(32)
return dataset
def get_multi_inputs_multi_outputs_data():
(a_train, c_train), (a_test, c_test) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=(16,),
num_classes=3,
random_seed=_RANDOM_SEED)
(b_train, d_train), (b_test, d_test) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=(16,),
num_classes=2,
random_seed=_RANDOM_SEED)
(m_train, _), (m_test, _) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=(8,),
num_classes=2,
random_seed=_RANDOM_SEED)
c_train = keras.utils.to_categorical(c_train)
c_test = keras.utils.to_categorical(c_test)
d_train = keras.utils.to_categorical(d_train)
d_test = keras.utils.to_categorical(d_test)
train_data = {
'input_a': a_train,
'input_b': b_train,
'input_m': m_train,
'output_c': c_train,
'output_d': d_train
}
test_data = {
'input_a': a_test,
'input_b': b_test,
'input_m': m_test,
'output_c': c_test,
'output_d': d_test
}
return (train_data, test_data)
def batch_wrapper(dataset, batch_size, distribution):
# TPUs currently require fully defined input shapes, drop_remainder ensures
# the input will have fully defined shapes.
if isinstance(distribution, tpu_strategy.TPUStrategy):
return dataset.batch(batch_size, drop_remainder=True)
else:
return dataset.batch(batch_size)
def get_model():
x = keras.layers.Input(shape=(3,), name='input')
y = keras.layers.Dense(4, name='dense')(x)
model = keras.Model(x, y)
return model
def get_dataset(distribution):
inputs = np.zeros((10, 3), dtype=np.float32)
targets = np.zeros((10, 4), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = batch_wrapper(dataset, 10, distribution)
return dataset
def get_predict_dataset(distribution):
inputs = np.zeros((10, 3), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices(inputs)
dataset = dataset.repeat(100)
dataset = batch_wrapper(dataset, 10, distribution)
return dataset
strategies = [combinations.default_strategy,
combinations.one_device_strategy,
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus,
combinations.tpu_strategy_one_step]
def strategy_combinations():
return combinations.combine(
distribution=strategies,
mode=['graph'])
def strategy_and_optimizer_combinations():
return combinations.combine(
distribution=strategies,
optimizer=[combinations.adagrad_optimizer_v1_fn,
combinations.adam_optimizer_v1_fn,
combinations.gradient_descent_optimizer_v1_fn,
combinations.rmsprop_optimizer_v1_fn],
mode=['graph'])
class TestEstimatorDistributionStrategy(test_util.TensorFlowTestCase):
def setUp(self):
self._base_dir = os.path.join(self.get_temp_dir(),
'keras_mirrored_strategy_test')
gfile.MakeDirs(self._base_dir)
self._config = run_config_lib.RunConfig(
tf_random_seed=_RANDOM_SEED, model_dir=self._base_dir)
self._dist = mirrored_strategy.MirroredStrategy(
devices=['/device:GPU:0', '/device:GPU:1'])
def tearDown(self):
writer_cache.FileWriterCache.clear()
if os.path.isdir(self._base_dir):
gfile.DeleteRecursively(self._base_dir)
def test_train_functional_with_distribution_strategy(self):
dist = mirrored_strategy.MirroredStrategy(
devices=['/device:GPU:0', '/device:GPU:1'])
keras_model = simple_functional_model()
keras_model.compile(
loss='categorical_crossentropy',
metrics=[keras.metrics.CategoricalAccuracy()],
optimizer=rmsprop.RMSPropOptimizer(learning_rate=0.01))
config = run_config_lib.RunConfig(tf_random_seed=_RANDOM_SEED,
model_dir=self._base_dir,
train_distribute=dist,
eval_distribute=dist)
with self.cached_session():
est_keras = keras_lib.model_to_estimator(
keras_model=keras_model, config=config)
before_eval_results = est_keras.evaluate(
input_fn=get_ds_test_input_fn, steps=1)
est_keras.train(input_fn=get_ds_train_input_fn, steps=_TRAIN_SIZE / 16)
after_eval_results = est_keras.evaluate(input_fn=get_ds_test_input_fn,
steps=1)
self.assertLess(after_eval_results['loss'], before_eval_results['loss'])
writer_cache.FileWriterCache.clear()
gfile.DeleteRecursively(self._config.model_dir)
def test_train_sequential_with_distribution_strategy(self):
dist = mirrored_strategy.MirroredStrategy(
devices=['/device:GPU:0', '/device:GPU:1'])
keras_model = simple_sequential_model()
keras_model.compile(
loss='categorical_crossentropy',
metrics=[keras.metrics.CategoricalAccuracy()],
optimizer=rmsprop.RMSPropOptimizer(learning_rate=0.01))
config = run_config_lib.RunConfig(tf_random_seed=_RANDOM_SEED,
model_dir=self._base_dir,
train_distribute=dist)
with self.cached_session():
est_keras = keras_lib.model_to_estimator(
keras_model=keras_model, config=config)
before_eval_results = est_keras.evaluate(
input_fn=get_ds_test_input_fn, steps=1)
est_keras.train(input_fn=get_ds_train_input_fn, steps=_TRAIN_SIZE / 16)
after_eval_results = est_keras.evaluate(input_fn=get_ds_test_input_fn,
steps=1)
self.assertLess(after_eval_results['loss'], before_eval_results['loss'])
writer_cache.FileWriterCache.clear()
gfile.DeleteRecursively(self._config.model_dir)
def test_multi_inputs_multi_outputs_with_input_fn_as_dict(self):
train_data, test_data = get_multi_inputs_multi_outputs_data()
def train_input_fn():
input_dict = {
'input_a': train_data['input_a'],
'input_b': train_data['input_b'],
'input_m': train_data['input_m'].astype(np.str)
}
output_dict = {
'dense_2': train_data['output_c'],
'dense_3': train_data['output_d']
}
return dataset_ops.Dataset.from_tensor_slices((input_dict,
output_dict)).batch(16)
def eval_input_fn():
input_dict = {
'input_a': test_data['input_a'],
'input_b': test_data['input_b'],
'input_m': test_data['input_m'].astype(np.str)
}
output_dict = {
'dense_2': test_data['output_c'],
'dense_3': test_data['output_d']
}
return dataset_ops.Dataset.from_tensor_slices((input_dict,
output_dict)).batch(16)
self.do_test_multi_inputs_multi_outputs_with_input_fn(
train_input_fn, eval_input_fn)
def do_test_multi_inputs_multi_outputs_with_input_fn(self, train_input_fn,
eval_input_fn):
config = run_config_lib.RunConfig(
tf_random_seed=_RANDOM_SEED,
model_dir=self._base_dir,
train_distribute=self._dist)
with self.cached_session():
model = multi_inputs_multi_outputs_model()
est_keras = keras_lib.model_to_estimator(keras_model=model, config=config)
baseline_eval_results = est_keras.evaluate(
input_fn=eval_input_fn, steps=1)
est_keras.train(input_fn=train_input_fn, steps=_TRAIN_SIZE / 16)
eval_results = est_keras.evaluate(input_fn=eval_input_fn, steps=1)
self.assertLess(eval_results['loss'], baseline_eval_results['loss'])
def test_keras_optimizer_with_distribution_strategy(self):
dist = mirrored_strategy.MirroredStrategy(
devices=['/device:GPU:0', '/device:GPU:1'])
keras_model = simple_sequential_model()
keras_model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizers.rmsprop(lr=0.01))
config = run_config_lib.RunConfig(tf_random_seed=_RANDOM_SEED,
model_dir=self._base_dir,
train_distribute=dist)
with self.cached_session():
est_keras = keras_lib.model_to_estimator(keras_model=keras_model,
config=config)
with self.assertRaisesRegexp(ValueError,
'Only TensorFlow native optimizers are '
'supported with DistributionStrategy.'):
est_keras.train(input_fn=get_ds_train_input_fn, steps=_TRAIN_SIZE / 16)
writer_cache.FileWriterCache.clear()
gfile.DeleteRecursively(self._config.model_dir)
class TestDistributionStrategyWithNumpyArrays(test.TestCase,
parameterized.TestCase):
@combinations.generate(strategy_combinations())
def test_creating_var_with_numpy_arrays(self, distribution):
with self.cached_session():
x = np.asarray(np.random.random((64, 3)), dtype=np.float32)
var_x = distributed_training_utils.get_var_for_numpy(distribution, x)
val = self.evaluate(var_x.value())
# Verify that the numpy value is copied to the variable.
self.assertAllEqual(x, val)
def test_calculating_batch_params(self):
# This verifies that we calculate the number of steps when the batch size
# is specified.
with self.cached_session():
# 64 is the number of input samples.
inputs = np.zeros((64, 3), dtype=np.float32)
# The number of towers is equal to 3.
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:0',
'/device:CPU:0',
'/device:GPU:1'])
with self.assertRaisesRegexp(ValueError, 'Please specify a batch_size '
'that is smaller than'):
# The batch size(128) is larger than the number of input
# samples(64).
distributed_training_utils.get_input_batch_params(inputs,
128,
strategy)
with self.assertRaisesRegexp(ValueError, 'is smaller than the number '
'of towers'):
# The batch size(32) * num_towers(3) is 96 which is greater than the
# number of input samples(64).
distributed_training_utils.get_input_batch_params(inputs,
32,
strategy)
# The number of towers now is equal to 2.
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:0',
'/device:CPU:0'])
# 32 is the batch size per tower.
steps = distributed_training_utils.get_input_batch_params(inputs,
32,
strategy)
# The number of batches is the ratio of input samples(64) to
# batch size(32) which is 2. The number of steps(1) is the ratio of
# number of batches(2) to the number of towers(2).
self.assertEqual(steps, 1)
# 16 is the batch size per tower.
steps = distributed_training_utils.get_input_batch_params(inputs,
16,
strategy)
# The number of batches is the ratio of input samples(64) to
# batch size(16) which is 4. The number of steps(2) is the ratio of
# number of batches(4) to the number of towers(2).
self.assertEqual(steps, 2)
def test_calculating_batch_size(self):
with self.cached_session():
# 64 is the number of input samples.
inputs = np.zeros((64, 3), dtype=np.float32)
targets = np.zeros((64, 4), dtype=np.float32)
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:0',
'/device:CPU:0'])
strategy._require_static_shapes = True
model.compile(optimizer, loss, distribute=strategy)
iterator = model._distribution_standardize_user_data(inputs,
targets,
batch_size=None,
check_steps=True,
steps_name='steps',
steps=3)
# The global batch size(21) across all towers is the ratio of the input
# samples(64) to the steps(3).
# The batch size(10) per device is the ratio of the global batch size(21)
# to the number of towers(2).
# The global batch size and batch size are rounded integer values.
self.assertEqual(10, distributed_training_utils.get_batch_dimension(
iterator._iterator))
@combinations.generate(strategy_combinations())
def test_calling_model_with_numpy_arrays(self, distribution):
with self.cached_session():
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae']
model.compile(optimizer, loss, metrics=metrics, distribute=distribution)
inputs = np.zeros((64, 3), dtype=np.float32)
targets = np.zeros((64, 4), dtype=np.float32)
# Call fit with validation data
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=0,
validation_data=(inputs, targets))
# TODO(anjalisridhar): We need tests for when the batch size and steps are
# smaller and results in a 0 batch_size and steps value.
model.evaluate(inputs, targets)
# with steps
model.evaluate(inputs, targets, steps=2)
# with batch_size
model.evaluate(inputs, targets, batch_size=8)
model.predict(inputs)
# with steps
model.predict(inputs, steps=2)
# with batch_size
model.predict(inputs, batch_size=8)
@combinations.generate(strategy_combinations())
def test_calling_model_with_nested_numpy_arrays(self, distribution):
with self.cached_session():
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(3,), name='input_b')
dense = keras.layers.Dense(4, name='dense')
c = dense(a)
d = dense(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss, distribute=distribution)
input_a_np = np.asarray(np.random.random((64, 3)), dtype=np.float32)
input_b_np = np.asarray(np.random.random((64, 3)), dtype=np.float32)
inputs = [input_a_np, input_b_np]
output_d_np = np.asarray(np.random.random((64, 4)), dtype=np.float32)
output_e_np = np.asarray(np.random.random((64, 4)), dtype=np.float32)
targets = [output_d_np, output_e_np]
# Call fit with validation data
model.fit(inputs, targets, epochs=1, batch_size=8, verbose=0)
# TODO(anjalisridhar): We need tests for when the batch size and steps are
# smaller and results in a 0 batch_size and steps value.
model.evaluate(inputs, targets)
# with steps
model.evaluate(inputs, targets, steps=2)
# with batch_size
model.evaluate(inputs, targets, batch_size=8)
model.predict(inputs)
# with steps
model.predict(inputs, steps=2)
# with batch_size
model.predict(inputs, batch_size=8)
class TestDistributionStrategyWithDatasets(test.TestCase,
parameterized.TestCase):
@combinations.generate(strategy_combinations())
def test_calling_model_on_same_dataset(self, distribution):
with self.cached_session():
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
model.compile(optimizer, loss, metrics=metrics, distribute=distribution)
dataset = get_dataset(distribution)
# Call fit with validation data
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
validation_data=dataset, validation_steps=2)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
validation_data=dataset, validation_steps=2)
model.predict(get_predict_dataset(distribution), steps=2)
@combinations.generate(strategy_combinations())
def test_model_interleaved_eval_same_as_direct_eval(self, distribution):
with self.cached_session():
loss = 'mse'
user_controlled_model = get_model()
user_controlled_optimizer = gradient_descent.GradientDescentOptimizer(
0.001)
user_controlled_metrics = ['mae', keras.metrics.CategoricalAccuracy()]
user_controlled_model.compile(user_controlled_optimizer, loss,
metrics=user_controlled_metrics,
distribute=distribution)
interleaved_model = get_model()
interleaved_optimizer = gradient_descent.GradientDescentOptimizer(0.001)
interleaved_metrics = ['mae', keras.metrics.CategoricalAccuracy()]
interleaved_model.compile(interleaved_optimizer, loss,
metrics=interleaved_metrics,
distribute=distribution)
dataset = get_dataset(distribution)
# Call fit with validation interleaved
interleaved_output = interleaved_model.fit(dataset, epochs=2,
steps_per_epoch=2, verbose=0,
validation_data=dataset,
validation_steps=2)
# Manually control the validation running after each epoch.
user_controlled_output = []
for _ in range(2):
user_controlled_model.fit(
dataset, epochs=1, steps_per_epoch=2, verbose=0)
user_controlled_output.append(
user_controlled_model.evaluate(dataset, steps=2))
self.assertEqual(interleaved_output.history['val_loss'],
[x[0] for x in user_controlled_output])
self.assertEqual(interleaved_output.history['val_mean_absolute_error'],
[x[1] for x in user_controlled_output])
self.assertEqual(interleaved_output.history['val_categorical_accuracy'],
[x[2] for x in user_controlled_output])
# TODO(priyag): Enable this test for TPU. Currently tuples/dict don't work
# as clone_model's input_tensors argument only seems to accept list and not
# tuples or dict.
def test_fit_with_tuple_and_dict_dataset_inputs(self):
with self.cached_session():
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(3,), name='input_b')
dense = keras.layers.Dense(4, name='dense')
c = dense(a)
d = dense(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.001)
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:0',
'/device:CPU:0'])
model.compile(optimizer, loss, metrics=metrics, distribute=strategy)
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_d_np = np.random.random((10, 4))
output_e_np = np.random.random((10, 4))
# Test with tuples
dataset_tuple = dataset_ops.Dataset.from_tensor_slices((
(input_a_np, input_b_np), (output_d_np, output_e_np)))
dataset_tuple = dataset_tuple.repeat(100)
dataset_tuple = dataset_tuple.batch(10)
model.fit(dataset_tuple, epochs=1, steps_per_epoch=2, verbose=1)
# Test with dict
dataset_dict = dataset_ops.Dataset.from_tensor_slices((
{'input_a': input_a_np, 'input_b': input_b_np},
(output_d_np, output_e_np)))
dataset_dict = dataset_dict.repeat(100)
dataset_dict = dataset_dict.batch(10)
model.fit(dataset_dict, epochs=1, steps_per_epoch=2, verbose=1)
@combinations.generate(strategy_combinations())
def test_fit_eval_and_predict_methods_on_dataset(self, distribution):
with self.cached_session():
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
model.compile(optimizer, loss, metrics=metrics, distribute=distribution)
dataset = get_dataset(distribution)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset, steps=2, verbose=1)
model.predict(get_predict_dataset(distribution), steps=2)
@combinations.generate(strategy_and_optimizer_combinations())
def test_fit_eval_and_predict_with_optimizer(self, distribution, optimizer):
with self.cached_session():
model = get_model()
loss = 'mse'
model.compile(optimizer(), loss, distribute=distribution)
dataset = get_dataset(distribution)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset, steps=2, verbose=1)
model.predict(get_predict_dataset(distribution), steps=2)
def test_dataset_input_shape_validation(self):
with self.cached_session():
model = get_model()
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:1',
'/device:GPU:0'])
model.compile(optimizer, loss, distribute=strategy)
# User forgets to batch the dataset
inputs = np.zeros((10, 3), dtype=np.float32)
targets = np.zeros((10, 4), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
with self.assertRaisesRegexp(ValueError, 'expected input to have shape'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)
# Wrong input shape
inputs = np.zeros((10, 5), dtype=np.float32)
targets = np.zeros((10, 4), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
with self.assertRaisesRegexp(ValueError,
'expected input to have shape'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)
@combinations.generate(combinations.combine(
distribution=[combinations.tpu_strategy_one_step],
mode=['graph']))
def test_dataset_input_shape_fully_defined(self, distribution):
with self.cached_session():
model = get_model()
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss, distribute=distribution)
dataset = get_dataset(distribution)
# Input shapes are not fully known. Batch dimension is unknown as we are
# not using the drop_remainder argument.
dataset = dataset.repeat(100).batch(10)
with self.assertRaisesRegexp(ValueError, 'requires fully defined shapes'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)
def test_learning_phase_value(self):
# TODO(anjalisridhar): Modify this test to use Lambdas since we can compare
# meaningful values. Currently we don't pass the learning phase if the
# Lambda layer uses the learning phase.
with self.cached_session():
x = keras.layers.Input(shape=(1,), name='input')
y = keras.layers.Dense(1, kernel_initializer='ones')(x)
z = keras.layers.Dropout(0.9999)(y)
model = keras.Model(x, z)
initial_weights = model.get_weights()
optimizer = gradient_descent.GradientDescentOptimizer(0.005)
loss = 'mse'
metrics = ['acc']
strategy = mirrored_strategy.MirroredStrategy(
['/device:GPU:0', '/device:GPU:1'])
model.compile(optimizer, loss, metrics=metrics, distribute=strategy)
inputs = np.ones((10, 1), dtype=np.float32)
targets = np.ones((10, 1), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat().batch(8)
hist = model.fit(dataset, epochs=1, steps_per_epoch=20, verbose=1)
self.assertAlmostEqual(hist.history['acc'][0], 0, 0)
model.set_weights(initial_weights)
evaluate_output = model.evaluate(dataset, steps=20)
self.assertAlmostEqual(evaluate_output[1], 1, 0)
inputs = np.ones((10, 1), dtype=np.float32)
predict_dataset = dataset_ops.Dataset.from_tensor_slices(inputs)
predict_dataset = predict_dataset.repeat().batch(5)
output = model.predict(predict_dataset, steps=10)
ref_output = np.ones((50, 1), dtype=np.float32)
self.assertArrayNear(output[0], ref_output, 1e-1)
class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase):
def test_validating_dataset_input_tensors_with_shape_mismatch(self):
with self.cached_session():
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:0',
'/device:CPU:0'])
a = constant_op.constant([1, 2], shape=(1, 2))
b = constant_op.constant([[1, 2], [1, 2]], shape=(2, 2))
x = values.DistributedValues({'/device:CPU:0': a, '/device:GPU:0': b})
y = values.DistributedValues({'/device:CPU:0': a, '/device:GPU:0': a})
with strategy.scope():
# Removed device and input tensor shape details from the error message
# since the order of the device and the corresponding input tensor shape
# is not deterministic over different runs.
with self.assertRaisesRegexp(ValueError,
'Input tensor shapes do not match for '
'distributed tensor inputs '
'DistributedValues:.+'):
distributed_training_utils.validate_distributed_dataset_inputs(
strategy, x, y)
def test_validating_dataset_input_tensors_with_dtype_mismatch(self):
with self.cached_session():
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:0',
'/device:CPU:0'])
a = constant_op.constant([1, 2], shape=(1, 2), dtype=dtypes.int32)
b = constant_op.constant([1, 2], shape=(1, 2), dtype=dtypes.float64)
x = values.DistributedValues({'/device:CPU:0': a, '/device:GPU:0': b})
y = values.DistributedValues({'/device:CPU:0': a, '/device:GPU:0': a})
with strategy.scope():
# Removed device and input tensor dtype details from the error message
# since the order of the device and the corresponding input tensor dtype
# is not deterministic over different runs.
with self.assertRaisesRegexp(ValueError,
'Input tensor dtypes do not match for '
'distributed tensor inputs '
'DistributedValues:.+'):
distributed_training_utils.validate_distributed_dataset_inputs(
strategy, x, y)
def test_unsupported_features(self):
with self.cached_session():
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae']
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:1',
'/device:GPU:0'])
model.compile(optimizer, loss, metrics=metrics, distribute=strategy)
dataset = get_dataset(strategy)
# Test with validation split
with self.assertRaisesRegexp(
ValueError, '`validation_split` argument is not '
'supported when input `x` is a dataset or a '
'dataset iterator.+'):
model.fit(dataset,
epochs=1, steps_per_epoch=2, verbose=0,
validation_split=0.5, validation_steps=2)
# Test with sample weight.
sample_weight = np.random.random((10,))
with self.assertRaisesRegexp(
NotImplementedError, '`sample_weight` is currently not supported '
'when using DistributionStrategy.'):
model.fit(
dataset,
epochs=1,
steps_per_epoch=2,
verbose=0,
sample_weight=sample_weight)
# Test with not specifying the `steps` argument.
with self.assertRaisesRegexp(
ValueError, 'you should specify the `steps_per_epoch` argument'):
model.fit(dataset, epochs=1, verbose=0)
with self.assertRaisesRegexp(ValueError,
'you should specify the `steps` argument'):
model.evaluate(dataset, verbose=0)
with self.assertRaisesRegexp(ValueError,
'you should specify the `steps` argument'):
model.predict(dataset, verbose=0)
def test_calling_with_unsupported_predefined_callbacks(self):
with self.cached_session():
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae']
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:1',
'/device:GPU:0'])
model.compile(optimizer, loss, metrics=metrics, distribute=strategy)
dataset = get_dataset(strategy)
def schedule(_):
return 0.001
with self.assertRaisesRegexp(ValueError,
'LearningRateScheduler callback is not '
'supported with DistributionStrategy.'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
callbacks=[keras.callbacks.LearningRateScheduler(schedule)])
with self.assertRaisesRegexp(ValueError,
'ReduceLROnPlateau callback is not '
'supported with DistributionStrategy.'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
callbacks=[keras.callbacks.ReduceLROnPlateau()])
with self.assertRaisesRegexp(ValueError,
'histogram_freq in the TensorBoard callback '
'is not supported when using '
'DistributionStrategy.'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
callbacks=[keras.callbacks.TensorBoard(histogram_freq=10)])
class TestDistributionStrategyWithLossMasking(test.TestCase):
# TODO(priyag): Enable all strategies for this test. Currently it does not
# work for TPU due to some invalid datatype.
def test_masking(self):
with self.cached_session():
np.random.seed(1337)
x = np.array([[[1], [1]], [[0], [0]]])
model = keras.models.Sequential()
model.add(keras.layers.Masking(mask_value=0, input_shape=(2, 1)))
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(1, kernel_initializer='one')))
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:1',
'/device:GPU:0'])
model.compile(loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
distribute=strategy)
y = np.array([[[1], [1]], [[1], [1]]])
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
hist = model.fit(x=dataset, epochs=1, steps_per_epoch=2)
self.assertEqual(hist.history['loss'][0], 0)
class TestDistributionStrategyWithNormalizationLayer(
test.TestCase, parameterized.TestCase):
@combinations.generate(strategy_combinations())
def test_batchnorm_correctness(self, distribution):
with self.cached_session():
model = keras.models.Sequential()
norm = keras.layers.BatchNormalization(input_shape=(10,), momentum=0.8)
model.add(norm)
model.compile(loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
distribute=distribution)
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10))
x = x.astype('float32')
dataset = dataset_ops.Dataset.from_tensor_slices((x, x))
dataset = dataset.repeat(100)
dataset = batch_wrapper(dataset, 32, distribution)
predict_dataset = dataset_ops.Dataset.from_tensor_slices(x)
predict_dataset = predict_dataset.repeat(100)
predict_dataset = batch_wrapper(predict_dataset, 32, distribution)
model.fit(dataset, epochs=4, verbose=0, steps_per_epoch=10)
out = model.predict(predict_dataset, steps=2)
out -= keras.backend.eval(norm.beta)
out /= keras.backend.eval(norm.gamma)
np.testing.assert_allclose(out.mean(), 0.0, atol=1e-1)
np.testing.assert_allclose(out.std(), 1.0, atol=1e-1)
class TestDistributionStrategyCorrectness(test.TestCase,
parameterized.TestCase):
@combinations.generate(strategy_combinations())
def test_metric_correctness(self, distribution):
with self.cached_session():
keras.backend.set_image_data_format('channels_last')
num_samples = 10000
x_train = np.random.randint(0, 2, num_samples)
x_train = np.reshape(x_train, (num_samples, 1))
y_train = x_train
x_train = x_train.astype('float32')
y_train = y_train.astype('float32')
# Create identity model.
model = keras.Sequential()
model.add(
keras.layers.Dense(1, input_shape=(1,), kernel_initializer='ones'))
model.compile(
loss=keras.losses.mean_squared_error,
optimizer=gradient_descent.GradientDescentOptimizer(0.5),
metrics=[keras.metrics.BinaryAccuracy()],
distribute=distribution)
batch_size = 64
batch_size //= distribution.num_towers
train_dataset = dataset_ops.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = batch_wrapper(train_dataset, batch_size, distribution)
history = model.fit(x=train_dataset, epochs=1, steps_per_epoch=10)
self.assertEqual(history.history['binary_accuracy'], [1.0])
@combinations.generate(strategy_combinations())
def test_correctness(self, distribution):
with self.cached_session():
keras.backend.set_image_data_format('channels_last')
num_samples = 10000
# Train and predict datasets are created with the same input numpy arrays.
x_train = np.random.rand(num_samples, 1)
y_train = 3 * x_train
x_train = x_train.astype('float32')
y_train = y_train.astype('float32')
# The model is built once and the initial weights are saved.
# This is used to initialize the model for both the distribution and
# non-distribution run.
model = keras.Sequential()
model.add(keras.layers.Dense(1, input_shape=(1,)))
initial_weights = model.get_weights()
def fit_and_predict(with_distribution=None):
model.set_weights(initial_weights)
model.compile(
loss=keras.losses.mean_squared_error,
optimizer=gradient_descent.GradientDescentOptimizer(0.5),
distribute=with_distribution)
batch_size = 64
if with_distribution:
batch_size //= with_distribution.num_towers
train_dataset = dataset_ops.Dataset.from_tensor_slices((x_train,
y_train))
train_dataset = batch_wrapper(train_dataset, batch_size, distribution)
# We have initialized the model to the same weight for the distribution
# and non-distribution run. If you want to initialize the model to
# random weights for each run, you need to run the model through the
# entire dataset at least once to ensure that the weights converge to
# the same value.
model.fit(x=train_dataset, epochs=1, steps_per_epoch=10)
weights = model.get_weights()
x_predict = [[1.], [2.], [3.], [4.]]
predict_batch_size = 4
if with_distribution:
predict_batch_size //= with_distribution.num_towers
predict_dataset = dataset_ops.Dataset.from_tensor_slices(x_predict)
predict_dataset = batch_wrapper(predict_dataset,
predict_batch_size, distribution)
predict_result = model.predict(predict_dataset, steps=1)
predict_result = np.reshape(predict_result, (4, 1))
return weights, predict_result
wts_with_ds, predict_with_ds = fit_and_predict(
with_distribution=distribution)
wts_without_ds, predict_without_ds = fit_and_predict(
with_distribution=None)
# Verify that the weights are the same within some limits of tolerance.
np.testing.assert_allclose(wts_with_ds[0], wts_without_ds[0], rtol=1e-3)
# Verify that the predicted outputs are the same within some limits of
# tolerance.
np.testing.assert_allclose(predict_with_ds, predict_without_ds, rtol=1e-3)
# TODO(priyag): Add a test for TPUStrategy with steps_per_run > 1.
if __name__ == '__main__':
test.main()
| {
"content_hash": "04bad8d52e20f4d8e31b33f7785dc660",
"timestamp": "",
"source": "github",
"line_count": 1015,
"max_line_length": 80,
"avg_line_length": 41.70147783251232,
"alnum_prop": 0.6219198147754389,
"repo_name": "alshedivat/tensorflow",
"id": "f8714b52b23f96379db8a793e6ecd4daf3a9adbe",
"size": "43016",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/distribute/python/keras_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3325"
},
{
"name": "Batchfile",
"bytes": "10132"
},
{
"name": "C",
"bytes": "439824"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "50398044"
},
{
"name": "CMake",
"bytes": "199209"
},
{
"name": "Dockerfile",
"bytes": "36386"
},
{
"name": "Go",
"bytes": "1276639"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "871083"
},
{
"name": "Jupyter Notebook",
"bytes": "2604347"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "61311"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "40038696"
},
{
"name": "RobotFramework",
"bytes": "890"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "486609"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
} |
from flask import Blueprint
blueprint = Blueprint(
'opportunities', __name__, url_prefix='/beacon',
static_folder='../static', template_folder='../templates'
)
from . import views
| {
"content_hash": "dab562ab42ae363e1f14d6ce09607cbb",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 61,
"avg_line_length": 23.75,
"alnum_prop": 0.6842105263157895,
"repo_name": "codeforamerica/pittsburgh-purchasing-suite",
"id": "a00ee71db666dd7185b215b5538d7cb5ff59b029",
"size": "215",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "purchasing/opportunities/front/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "24436"
},
{
"name": "HTML",
"bytes": "316825"
},
{
"name": "JavaScript",
"bytes": "25552"
},
{
"name": "Makefile",
"bytes": "560"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "675910"
},
{
"name": "Shell",
"bytes": "3373"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import print_function
import copy
import six
import ujson
from django.utils.translation import ugettext as _
from django.conf import settings
from importlib import import_module
from six.moves import filter, map
from typing import (
Any, Dict, Iterable, Optional, Sequence, Set, Text, Tuple
)
session_engine = import_module(settings.SESSION_ENGINE)
from zerver.lib.alert_words import user_alert_words
from zerver.lib.attachments import user_attachments
from zerver.lib.narrow import check_supported_events_narrow_filter
from zerver.lib.request import JsonableError
from zerver.lib.actions import validate_user_access_to_subscribers_helper, \
do_get_streams, get_default_streams_for_realm, \
gather_subscriptions_helper, get_realm_aliases, \
get_status_dict, streams_to_dicts_sorted
from zerver.tornado.event_queue import request_event_queue, get_user_events
from zerver.models import Client, Message, UserProfile, \
get_user_profile_by_email, get_active_user_dicts_in_realm, \
realm_filters_for_realm, get_owned_bot_dicts
def get_realm_user_dicts(user_profile):
# type: (UserProfile) -> List[Dict[str, Text]]
return [{'email': userdict['email'],
'user_id': userdict['id'],
'is_admin': userdict['is_realm_admin'],
'is_bot': userdict['is_bot'],
'full_name': userdict['full_name']}
for userdict in get_active_user_dicts_in_realm(user_profile.realm)]
# Fetch initial data. When event_types is not specified, clients want
# all event types. Whenever you add new code to this function, you
# should also add corresponding events for changes in the data
# structures and new code to apply_events (and add a test in EventsRegisterTest).
def fetch_initial_state_data(user_profile, event_types, queue_id):
# type: (UserProfile, Optional[Iterable[str]], str) -> Dict[str, Any]
state = {'queue_id': queue_id} # type: Dict[str, Any]
if event_types is None:
want = lambda msg_type: True
else:
want = set(event_types).__contains__
if want('alert_words'):
state['alert_words'] = user_alert_words(user_profile)
if want('attachments'):
state['attachments'] = user_attachments(user_profile)
if want('message'):
# The client should use get_old_messages() to fetch messages
# starting with the max_message_id. They will get messages
# newer than that ID via get_events()
messages = Message.objects.filter(usermessage__user_profile=user_profile).order_by('-id')[:1]
if messages:
state['max_message_id'] = messages[0].id
else:
state['max_message_id'] = -1
if want('muted_topics'):
state['muted_topics'] = ujson.loads(user_profile.muted_topics)
if want('pointer'):
state['pointer'] = user_profile.pointer
if want('presence'):
state['presences'] = get_status_dict(user_profile)
if want('realm'):
state['realm_name'] = user_profile.realm.name
state['realm_restricted_to_domain'] = user_profile.realm.restricted_to_domain
state['realm_invite_required'] = user_profile.realm.invite_required
state['realm_invite_by_admins_only'] = user_profile.realm.invite_by_admins_only
state['realm_authentication_methods'] = user_profile.realm.authentication_methods_dict()
state['realm_create_stream_by_admins_only'] = user_profile.realm.create_stream_by_admins_only
state['realm_add_emoji_by_admins_only'] = user_profile.realm.add_emoji_by_admins_only
state['realm_allow_message_editing'] = user_profile.realm.allow_message_editing
state['realm_message_content_edit_limit_seconds'] = user_profile.realm.message_content_edit_limit_seconds
state['realm_default_language'] = user_profile.realm.default_language
state['realm_waiting_period_threshold'] = user_profile.realm.waiting_period_threshold
if want('realm_domain'):
state['realm_domain'] = user_profile.realm.domain
if want('realm_domains'):
state['realm_domains'] = get_realm_aliases(user_profile.realm)
if want('realm_emoji'):
state['realm_emoji'] = user_profile.realm.get_emoji()
if want('realm_filters'):
state['realm_filters'] = realm_filters_for_realm(user_profile.realm_id)
if want('realm_user'):
state['realm_users'] = get_realm_user_dicts(user_profile)
if want('realm_bot'):
state['realm_bots'] = get_owned_bot_dicts(user_profile)
if want('referral'):
state['referrals'] = {'granted': user_profile.invites_granted,
'used': user_profile.invites_used}
if want('subscription'):
subscriptions, unsubscribed, never_subscribed = gather_subscriptions_helper(user_profile)
state['subscriptions'] = subscriptions
state['unsubscribed'] = unsubscribed
state['never_subscribed'] = never_subscribed
if want('update_message_flags'):
# There's no initial data for message flag updates, client will
# get any updates during a session from get_events()
pass
if want('stream'):
state['streams'] = do_get_streams(user_profile)
if want('default_streams'):
state['realm_default_streams'] = streams_to_dicts_sorted(get_default_streams_for_realm(user_profile.realm))
if want('update_display_settings'):
state['twenty_four_hour_time'] = user_profile.twenty_four_hour_time
state['left_side_userlist'] = user_profile.left_side_userlist
default_language = user_profile.default_language
state['default_language'] = default_language
if want('update_global_notifications'):
state['enable_stream_desktop_notifications'] = user_profile.enable_stream_desktop_notifications
state['enable_stream_sounds'] = user_profile.enable_stream_sounds
state['enable_desktop_notifications'] = user_profile.enable_desktop_notifications
state['enable_sounds'] = user_profile.enable_sounds
state['enable_offline_email_notifications'] = user_profile.enable_offline_email_notifications
state['enable_offline_push_notifications'] = user_profile.enable_offline_push_notifications
state['enable_online_push_notifications'] = user_profile.enable_online_push_notifications
state['enable_digest_emails'] = user_profile.enable_digest_emails
return state
def apply_events(state, events, user_profile):
# type: (Dict[str, Any], Iterable[Dict[str, Any]], UserProfile) -> None
for event in events:
if event['type'] == "message":
state['max_message_id'] = max(state['max_message_id'], event['message']['id'])
elif event['type'] == "pointer":
state['pointer'] = max(state['pointer'], event['pointer'])
elif event['type'] == "realm_user":
person = event['person']
def our_person(p):
# type: (Dict[str, Any]) -> bool
return p['user_id'] == person['user_id']
if event['op'] == "add":
state['realm_users'].append(person)
elif event['op'] == "remove":
state['realm_users'] = [user for user in state['realm_users'] if not our_person(user)]
elif event['op'] == 'update':
for p in state['realm_users']:
if our_person(p):
# In the unlikely event that the current user
# just changed to/from being an admin, we need
# to add/remove the data on all bots in the
# realm. This is ugly and probably better
# solved by removing the all-realm-bots data
# given to admin users from this flow.
if ('is_admin' in person and 'realm_bots' in state and
user_profile.email == person['email']):
if p['is_admin'] and not person['is_admin']:
state['realm_bots'] = []
if not p['is_admin'] and person['is_admin']:
state['realm_bots'] = get_owned_bot_dicts(user_profile)
# Now update the person
p.update(person)
elif event['type'] == 'realm_bot':
if event['op'] == 'add':
state['realm_bots'].append(event['bot'])
if event['op'] == 'remove':
email = event['bot']['email']
state['realm_bots'] = [b for b in state['realm_bots'] if b['email'] != email]
if event['op'] == 'update':
for bot in state['realm_bots']:
if bot['email'] == event['bot']['email']:
bot.update(event['bot'])
elif event['type'] == 'stream':
if event['op'] == 'create':
for stream in event['streams']:
if not stream['invite_only']:
stream_data = copy.deepcopy(stream)
stream_data['subscribers'] = []
# Add stream to never_subscribed (if not invite_only)
state['never_subscribed'].append(stream_data)
if event['op'] == 'delete':
deleted_stream_ids = {stream['stream_id'] for stream in event['streams']}
state['streams'] = [s for s in state['streams'] if s['stream_id'] not in deleted_stream_ids]
state['never_subscribed'] = [stream for stream in state['never_subscribed'] if
stream['stream_id'] not in deleted_stream_ids]
if event['op'] == 'update':
# For legacy reasons, we call stream data 'subscriptions' in
# the state var here, for the benefit of the JS code.
for obj in state['subscriptions']:
if obj['name'].lower() == event['name'].lower():
obj[event['property']] = event['value']
# Also update the pure streams data
for stream in state['streams']:
if stream['name'].lower() == event['name'].lower():
prop = event['property']
if prop in stream:
stream[prop] = event['value']
elif event['op'] == "occupy":
state['streams'] += event['streams']
elif event['op'] == "vacate":
stream_ids = [s["stream_id"] for s in event['streams']]
state['streams'] = [s for s in state['streams'] if s["stream_id"] not in stream_ids]
elif event['type'] == 'default_streams':
state['realm_default_streams'] = event['default_streams']
elif event['type'] == 'realm':
if event['op'] == "update":
field = 'realm_' + event['property']
state[field] = event['value']
elif event['op'] == "update_dict":
for key, value in event['data'].items():
state['realm_' + key] = value
elif event['type'] == "subscription":
if event['op'] in ["add"]:
# Convert the user_profile IDs to emails since that's what register() returns
# TODO: Clean up this situation
for item in event["subscriptions"]:
item["subscribers"] = [get_user_profile_by_email(email).id for email in item["subscribers"]]
def name(sub):
# type: (Dict[str, Any]) -> Text
return sub['name'].lower()
if event['op'] == "add":
added_names = set(map(name, event["subscriptions"]))
was_added = lambda s: name(s) in added_names
# add the new subscriptions
state['subscriptions'] += event['subscriptions']
# remove them from unsubscribed if they had been there
state['unsubscribed'] = [s for s in state['unsubscribed'] if not was_added(s)]
# remove them from never_subscribed if they had been there
state['never_subscribed'] = [s for s in state['never_subscribed'] if not was_added(s)]
elif event['op'] == "remove":
removed_names = set(map(name, event["subscriptions"]))
was_removed = lambda s: name(s) in removed_names
# Find the subs we are affecting.
removed_subs = list(filter(was_removed, state['subscriptions']))
# Remove our user from the subscribers of the removed subscriptions.
for sub in removed_subs:
sub['subscribers'] = [id for id in sub['subscribers'] if id != user_profile.id]
# We must effectively copy the removed subscriptions from subscriptions to
# unsubscribe, since we only have the name in our data structure.
state['unsubscribed'] += removed_subs
# Now filter out the removed subscriptions from subscriptions.
state['subscriptions'] = [s for s in state['subscriptions'] if not was_removed(s)]
elif event['op'] == 'update':
for sub in state['subscriptions']:
if sub['name'].lower() == event['name'].lower():
sub[event['property']] = event['value']
elif event['op'] == 'peer_add':
user_id = event['user_id']
for sub in state['subscriptions']:
if (sub['name'] in event['subscriptions'] and
user_id not in sub['subscribers']):
sub['subscribers'].append(user_id)
for sub in state['never_subscribed']:
if (sub['name'] in event['subscriptions'] and
user_id not in sub['subscribers']):
sub['subscribers'].append(user_id)
elif event['op'] == 'peer_remove':
user_id = event['user_id']
for sub in state['subscriptions']:
if (sub['name'] in event['subscriptions'] and
user_id in sub['subscribers']):
sub['subscribers'].remove(user_id)
elif event['type'] == "presence":
state['presences'][event['email']] = event['presence']
elif event['type'] == "update_message":
# The client will get the updated message directly
pass
elif event['type'] == "reaction":
# The client will get the message with the reactions directly
pass
elif event['type'] == "referral":
state['referrals'] = event['referrals']
elif event['type'] == "update_message_flags":
# The client will get the message with the updated flags directly
pass
elif event['type'] == "realm_domains":
if event['op'] == 'add':
state['realm_domains'].append(event['alias'])
elif event['op'] == 'change':
for realm_domain in state['realm_domains']:
if realm_domain['domain'] == event['alias']['domain']:
realm_domain['allow_subdomains'] = event['alias']['allow_subdomains']
elif event['op'] == 'remove':
state['realm_domains'] = [alias for alias in state['realm_domains'] if alias['domain'] != event['domain']]
elif event['type'] == "realm_emoji":
state['realm_emoji'] = event['realm_emoji']
elif event['type'] == "alert_words":
state['alert_words'] = event['alert_words']
elif event['type'] == "muted_topics":
state['muted_topics'] = event["muted_topics"]
elif event['type'] == "realm_filters":
state['realm_filters'] = event["realm_filters"]
elif event['type'] == "update_display_settings":
if event['setting_name'] == "twenty_four_hour_time":
state['twenty_four_hour_time'] = event["setting"]
if event['setting_name'] == 'left_side_userlist':
state['left_side_userlist'] = event["setting"]
elif event['type'] == "update_global_notifications":
if event['notification_name'] == "enable_stream_desktop_notifications":
state['enable_stream_desktop_notifications'] = event['setting']
elif event['notification_name'] == "enable_stream_sounds":
state['enable_stream_sounds'] = event['setting']
elif event['notification_name'] == "enable_desktop_notifications":
state['enable_desktop_notifications'] = event['setting']
elif event['notification_name'] == "enable_sounds":
state['enable_sounds'] = event['setting']
elif event['notification_name'] == "enable_offline_email_notifications":
state['enable_offline_email_notifications'] = event['setting']
elif event['notification_name'] == "enable_offline_push_notifications":
state['enable_offline_push_notifications'] = event['setting']
elif event['notification_name'] == "enable_online_push_notifications":
state['enable_online_push_notifications'] = event['setting']
elif event['notification_name'] == "enable_digest_emails":
state['enable_digest_emails'] = event['setting']
else:
raise ValueError("Unexpected event type %s" % (event['type'],))
def do_events_register(user_profile, user_client, apply_markdown=True,
event_types=None, queue_lifespan_secs=0, all_public_streams=False,
narrow=[]):
# type: (UserProfile, Client, bool, Optional[Iterable[str]], int, bool, Iterable[Sequence[Text]]) -> Dict[str, Any]
# Technically we don't need to check this here because
# build_narrow_filter will check it, but it's nicer from an error
# handling perspective to do it before contacting Tornado
check_supported_events_narrow_filter(narrow)
queue_id = request_event_queue(user_profile, user_client, apply_markdown,
queue_lifespan_secs, event_types, all_public_streams,
narrow=narrow)
if queue_id is None:
raise JsonableError(_("Could not allocate event queue"))
if event_types is not None:
event_types_set = set(event_types) # type: Optional[Set[str]]
else:
event_types_set = None
ret = fetch_initial_state_data(user_profile, event_types_set, queue_id)
# Apply events that came in while we were fetching initial data
events = get_user_events(user_profile, queue_id, -1)
apply_events(ret, events, user_profile)
if events:
ret['last_event_id'] = events[-1]['id']
else:
ret['last_event_id'] = -1
return ret
| {
"content_hash": "77e37766ea1fd705bd5f624f8988cb76",
"timestamp": "",
"source": "github",
"line_count": 382,
"max_line_length": 122,
"avg_line_length": 49.92670157068063,
"alnum_prop": 0.578125,
"repo_name": "sonali0901/zulip",
"id": "8b7dc3cf17b7c0c867df61837e99e4cf27cff88e",
"size": "19192",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zerver/lib/events.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "276623"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "GCC Machine Description",
"bytes": "142"
},
{
"name": "Groovy",
"bytes": "5509"
},
{
"name": "HTML",
"bytes": "505551"
},
{
"name": "JavaScript",
"bytes": "1535744"
},
{
"name": "Nginx",
"bytes": "1280"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "401825"
},
{
"name": "Puppet",
"bytes": "86466"
},
{
"name": "Python",
"bytes": "3352710"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "37885"
}
],
"symlink_target": ""
} |
from .. import NextGenInstanceResource, NextGenListResource
class Activity(NextGenInstanceResource):
"""
An Activity resource
"""
def delete(self):
"""
Delete an activity.
"""
return self.parent.delete_instance(self.name)
def update(self, **kwargs):
"""
Update an activity.
"""
return self.parent.update_instance(self.name, kwargs)
class Activities(NextGenListResource):
""" A list of Activity resources """
name = "Activities"
instance = Activity
def create(self, friendly_name, available):
"""
Create an Activity.
:param friendly_name: A human-readable name for the activity, such as
'On Call', 'Break', 'Email', etc. Must be unique in this Workspace.
These names will be used to calculate and expose statistics about
workers, and give you visibility into the state of each of your
workers.
:param available: Boolean value indicating whether the worker should be
eligible to receive a Task when they occupy this Activity. For
example, a call center might have an activity named 'On Call' with
an availability set to 'false'.
"""
return self.create_instance({'friendly_name': friendly_name,
'available': available})
def delete(self, sid):
"""
Delete the given activity
"""
return self.delete_instance(sid)
def update(self, sid, **kwargs):
"""
Update an :class:`Activity` with the given parameters.
All the parameters are describe above in :meth:`create`
"""
return self.update_instance(sid, kwargs)
| {
"content_hash": "2d0296165c9174b286a42757afdee111",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 79,
"avg_line_length": 30.894736842105264,
"alnum_prop": 0.6042021578648495,
"repo_name": "Gchorba/Ask",
"id": "ea1c502d5312a59a7e241dc509eaf225f770652d",
"size": "1761",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/twilio/rest/resources/task_router/activities.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "6111"
},
{
"name": "Groff",
"bytes": "28"
},
{
"name": "HTML",
"bytes": "530"
},
{
"name": "JavaScript",
"bytes": "6345"
},
{
"name": "Python",
"bytes": "3937647"
},
{
"name": "Shell",
"bytes": "3737"
}
],
"symlink_target": ""
} |
"""Tests for V2 LSTM layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import time
from absl.testing import parameterized
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import keras
from tensorflow.python.client import session as session_lib
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.layers import recurrent as rnn_v1
from tensorflow.python.keras.layers import recurrent_v2 as rnn
from tensorflow.python.keras.utils import np_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import gradient_descent
from tensorflow.python.util import nest
# Global config for grappler setting that is used for graph mode test.
_rewrites = rewriter_config_pb2.RewriterConfig()
_rewrites.implementation_selector = rewriter_config_pb2.RewriterConfig.ON
_rewrites.min_graph_nodes = -1
_graph_options = config_pb2.GraphOptions(rewrite_options=_rewrites)
_config = config_pb2.ConfigProto(graph_options=_graph_options)
@keras_parameterized.run_all_keras_modes(config=_config)
class LSTMV2Test(keras_parameterized.TestCase):
@parameterized.named_parameters(
('non_tan_activation', 'relu', 'sigmoid', 0, False, True),
('non_sigmoid_recur_activation', 'tanh', 'relu', 0, False, True),
('use_recurrent_dropout', 'tanh', 'sigmoid', 0.1, False, True),
('unroll', 'tanh', 'sigmoid', 0, True, True),
('not_use_bias', 'tanh', 'sigmoid', 0, False, False),
)
def test_could_use_defun_backend(self, activation, recurrent_activation,
recurrent_dropout, unroll, use_bias):
layer = rnn.LSTM(
1,
activation=activation,
recurrent_activation=recurrent_activation,
recurrent_dropout=recurrent_dropout,
unroll=unroll,
use_bias=use_bias)
self.assertFalse(layer._could_use_gpu_kernel)
@testing_utils.run_v2_only
def test_use_on_default_activation_with_gpu_kernel(self):
layer = rnn.LSTM(1, activation=nn.tanh)
self.assertTrue(layer._could_use_gpu_kernel)
layer = rnn.LSTM(1, recurrent_activation=nn.sigmoid)
self.assertTrue(layer._could_use_gpu_kernel)
def test_static_shape_inference_LSTM(self):
# Github issue: 15165
timesteps = 3
embedding_dim = 4
units = 2
model = keras.models.Sequential()
inputs = keras.layers.Dense(
embedding_dim, input_shape=(timesteps, embedding_dim))
model.add(inputs)
layer = rnn.LSTM(units, return_sequences=True)
model.add(layer)
outputs = model.layers[-1].output
self.assertEqual(outputs.shape.as_list(), [None, timesteps, units])
def test_dynamic_behavior_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer = rnn.LSTM(units, input_shape=(None, embedding_dim))
model = keras.models.Sequential()
model.add(layer)
model.compile(gradient_descent.GradientDescentOptimizer(0.001), 'mse')
x = np.random.random((num_samples, timesteps, embedding_dim))
y = np.random.random((num_samples, units))
model.train_on_batch(x, y)
def test_stacking_LSTM(self):
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(rnn.LSTM(10, return_sequences=True, unroll=False))
model.add(rnn.LSTM(5, return_sequences=True, unroll=False))
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.GradientDescentOptimizer(0.01))
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
def test_from_config_LSTM(self):
layer_class = rnn.LSTM
for stateful in (False, True):
l1 = layer_class(units=1, stateful=stateful)
l2 = layer_class.from_config(l1.get_config())
assert l1.get_config() == l2.get_config()
def test_specify_initial_state_keras_tensor(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
# Test with Keras tensor
inputs = keras.Input((timesteps, embedding_dim))
initial_state = [keras.Input((units,)) for _ in range(num_states)]
layer = rnn.LSTM(units)
if len(initial_state) == 1:
output = layer(inputs, initial_state=initial_state[0])
else:
output = layer(inputs, initial_state=initial_state)
self.assertTrue(
any(initial_state[0] is t
for t in layer._inbound_nodes[0].input_tensors))
model = keras.models.Model([inputs] + initial_state, output)
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.GradientDescentOptimizer(0.01))
inputs = np.random.random((num_samples, timesteps, embedding_dim))
initial_state = [
np.random.random((num_samples, units)) for _ in range(num_states)
]
targets = np.random.random((num_samples, units))
model.train_on_batch([inputs] + initial_state, targets)
def test_specify_initial_state_non_keras_tensor(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
# Test with non-Keras tensor
inputs = keras.Input((timesteps, embedding_dim))
initial_state = [
keras.backend.random_normal_variable((num_samples, units), 0, 1)
for _ in range(num_states)
]
layer = rnn.LSTM(units)
output = layer(inputs, initial_state=initial_state)
model = keras.models.Model(inputs, output)
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.GradientDescentOptimizer(0.01))
inputs = np.random.random((num_samples, timesteps, embedding_dim))
targets = np.random.random((num_samples, units))
model.train_on_batch(inputs, targets)
def test_reset_states_with_values(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
layer = rnn.LSTM(units, stateful=True)
layer.build((num_samples, timesteps, embedding_dim))
initial_weight_count = len(layer.weights)
layer.reset_states()
assert len(layer.states) == num_states
assert layer.states[0] is not None
self.assertAllClose(
keras.backend.eval(layer.states[0]),
np.zeros(keras.backend.int_shape(layer.states[0])),
atol=1e-4)
state_shapes = [keras.backend.int_shape(state) for state in layer.states]
values = [np.ones(shape) for shape in state_shapes]
if len(values) == 1:
values = values[0]
layer.reset_states(values)
self.assertAllClose(
keras.backend.eval(layer.states[0]),
np.ones(keras.backend.int_shape(layer.states[0])),
atol=1e-4)
# Test with invalid data
with self.assertRaises(ValueError):
layer.reset_states([1] * (len(layer.states) + 1))
self.assertEqual(initial_weight_count, len(layer.weights))
# Variables in "states" shouldn't show up in .weights
layer.states = nest.map_structure(variables.Variable, values)
layer.reset_states()
self.assertEqual(initial_weight_count, len(layer.weights))
def test_specify_state_with_masking(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
inputs = keras.Input((timesteps, embedding_dim))
_ = keras.layers.Masking()(inputs)
initial_state = [keras.Input((units,)) for _ in range(num_states)]
output = rnn.LSTM(units)(
inputs, initial_state=initial_state)
model = keras.models.Model([inputs] + initial_state, output)
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.GradientDescentOptimizer(0.01))
inputs = np.random.random((num_samples, timesteps, embedding_dim))
initial_state = [
np.random.random((num_samples, units)) for _ in range(num_states)
]
targets = np.random.random((num_samples, units))
model.train_on_batch([inputs] + initial_state, targets)
def test_return_state(self):
if test.is_built_with_rocm():
self.skipTest('Skipping the test as ROCm MIOpen does not '
'support padded input yet.')
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
inputs = keras.Input(batch_shape=(num_samples, timesteps, embedding_dim))
masked = keras.layers.Masking()(inputs)
layer = rnn.LSTM(units, return_state=True, stateful=True)
outputs = layer(masked)
state = outputs[1:]
assert len(state) == num_states
model = keras.models.Model(inputs, state[0])
inputs = np.random.random((num_samples, timesteps, embedding_dim))
state = model.predict(inputs)
self.assertAllClose(keras.backend.eval(layer.states[0]), state, atol=1e-4)
def test_state_reuse(self):
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
inputs = keras.Input(batch_shape=(num_samples, timesteps, embedding_dim))
layer = rnn.LSTM(
units, return_state=True, return_sequences=True)
outputs = layer(inputs)
output, state = outputs[0], outputs[1:]
output = rnn.LSTM(units)(output, initial_state=state)
model = keras.models.Model(inputs, output)
inputs = np.random.random((num_samples, timesteps, embedding_dim))
model.predict(inputs)
def test_initial_states_as_other_inputs(self):
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
num_states = 2
layer_class = rnn.LSTM
# Test with Keras tensor
main_inputs = keras.Input((timesteps, embedding_dim))
initial_state = [keras.Input((units,)) for _ in range(num_states)]
inputs = [main_inputs] + initial_state
layer = layer_class(units)
output = layer(inputs)
self.assertTrue(
any(initial_state[0] is t
for t in layer._inbound_nodes[0].input_tensors))
model = keras.models.Model(inputs, output)
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.GradientDescentOptimizer(0.01))
main_inputs = np.random.random((num_samples, timesteps, embedding_dim))
initial_state = [
np.random.random((num_samples, units)) for _ in range(num_states)
]
targets = np.random.random((num_samples, units))
model.train_on_batch([main_inputs] + initial_state, targets)
@testing_utils.run_v2_only
def test_lstm_v2_feature_parity_with_canonical_lstm(self):
if test.is_built_with_rocm():
self.skipTest('Skipping the test as ROCm MIOpen does not '
'support padded input yet.')
input_shape = 10
rnn_state_size = 8
timestep = 4
batch = 20
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=batch,
test_samples=0,
input_shape=(timestep, input_shape),
num_classes=rnn_state_size,
random_seed=random_seed.DEFAULT_GRAPH_SEED)
y_train = np_utils.to_categorical(y_train, rnn_state_size)
# For the last batch item of the test data, we filter out the last
# timestep to simulate the variable length sequence and masking test.
x_train[-2:, -1, :] = 0.0
y_train[-2:] = 0
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=dtypes.float32)
masked_input = keras.layers.Masking()(inputs)
lstm_layer = rnn_v1.LSTM(rnn_state_size,
recurrent_activation='sigmoid')
output = lstm_layer(masked_input)
lstm_model = keras.models.Model(inputs, output)
weights = lstm_model.get_weights()
y_1 = lstm_model.predict(x_train)
lstm_model.compile('rmsprop', 'mse')
lstm_model.fit(x_train, y_train)
y_2 = lstm_model.predict(x_train)
with testing_utils.device(should_use_gpu=True):
cudnn_layer = rnn.LSTM(rnn_state_size)
cudnn_model = keras.models.Model(inputs, cudnn_layer(masked_input))
cudnn_model.set_weights(weights)
y_3 = cudnn_model.predict(x_train)
cudnn_model.compile('rmsprop', 'mse')
cudnn_model.fit(x_train, y_train)
y_4 = cudnn_model.predict(x_train)
self.assertAllClose(y_1, y_3, rtol=1e-5, atol=2e-5)
self.assertAllClose(y_2, y_4, rtol=1e-5, atol=2e-5)
@parameterized.named_parameters(('v0', 0), ('v1', 1), ('v2', 2))
def test_implementation_mode_LSTM(self, implementation_mode):
if test.is_built_with_rocm():
self.skipTest('Skipping the test as ROCm MIOpen does not '
'support padded input yet.')
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
rnn.LSTM,
kwargs={
'units': units,
'implementation': implementation_mode
},
input_shape=(num_samples, timesteps, embedding_dim))
layer_class = rnn.LSTM
k_constraint = keras.constraints.max_norm(0.01)
r_constraint = keras.constraints.max_norm(0.01)
b_constraint = keras.constraints.max_norm(0.01)
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_constraint=k_constraint,
recurrent_constraint=r_constraint,
bias_constraint=b_constraint)
layer.build((None, None, embedding_dim))
self.assertEqual(layer.cell.kernel.constraint, k_constraint)
self.assertEqual(layer.cell.recurrent_kernel.constraint, r_constraint)
self.assertEqual(layer.cell.bias.constraint, b_constraint)
layer_class = rnn.LSTM
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(keras.layers.Masking(input_shape=(3, 4)))
model.add(layer_class(units=5, return_sequences=True, unroll=False))
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.GradientDescentOptimizer(0.01))
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
def test_masking_with_stacking_LSTM(self):
if test.is_built_with_rocm():
self.skipTest('Skipping the test as ROCm MIOpen does not '
'support padded input yet.')
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(keras.layers.Masking(input_shape=(3, 4)))
model.add(rnn.LSTM(10, return_sequences=True, unroll=False))
model.add(rnn.LSTM(5, return_sequences=True, unroll=False))
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.GradientDescentOptimizer(0.01))
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
@parameterized.named_parameters(
# test_name, time_major, go_backwards
('normal', False, False),
('time_major', True, False),
('go_backwards', False, True),
('both', True, True),
)
def test_time_major_and_go_backward(self, time_major, go_backwards):
input_shape = 10
rnn_state_size = 8
timestep = 4
batch = 100
x_train = np.random.random((batch, timestep, input_shape))
def build_model(layer_cls):
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=dtypes.float32)
layer = layer_cls(rnn_state_size,
recurrent_activation='sigmoid',
time_major=time_major,
return_sequences=True,
go_backwards=go_backwards)
if time_major:
converted_input = keras.layers.Lambda(
lambda t: array_ops.transpose(t, [1, 0, 2]))(inputs)
outputs = layer(converted_input)
outputs = keras.layers.Lambda(
lambda t: array_ops.transpose(t, [1, 0, 2]))(outputs)
else:
outputs = layer(inputs)
return keras.models.Model(inputs, outputs)
lstm_model = build_model(rnn_v1.LSTM)
y_ref = lstm_model.predict(x_train)
weights = lstm_model.get_weights()
lstm_v2_model = build_model(rnn.LSTM)
lstm_v2_model.set_weights(weights)
y = lstm_v2_model.predict(x_train)
self.assertAllClose(y, y_ref)
input_shape = 10
rnn_state_size = 8
output_shape = 8
timestep = 4
batch = 100
epoch = 10
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=batch,
test_samples=0,
input_shape=(timestep, input_shape),
num_classes=output_shape)
y_train = np_utils.to_categorical(y_train, output_shape)
layer = rnn.LSTM(rnn_state_size)
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=dtypes.float32)
outputs = layer(inputs)
model = keras.models.Model(inputs, outputs)
model.compile('rmsprop', loss='mse')
model.fit(x_train, y_train, epochs=epoch)
model.evaluate(x_train, y_train)
model.predict(x_train)
@parameterized.named_parameters(
# test_name, use_bias, bias_initializer, activation
('normal', True, 'zeros'),
('no_bias', False, 'zeros'),
('random_bias', True, 'random_uniform'),
)
def test_lstm_model_save_load(self, use_bias, bias_initializer):
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
h5_path = os.path.join(temp_dir, 'test.h5')
batch = 10
timestep = 3
input_dim = 5
units = 2
x = np.random.random((batch, timestep, input_dim))
def build_model():
inputs = keras.layers.Input(
shape=[timestep, input_dim], dtype=dtypes.float32)
layer = rnn.LSTM(
units,
use_bias=use_bias,
bias_initializer=bias_initializer)
output = layer(inputs)
return keras.models.Model(inputs, output), layer
model, layer = build_model()
y_ref = model.predict(x)
model.save_weights(h5_path)
cloned_model, new_layer = build_model()
cloned_model.load_weights(h5_path)
y = cloned_model.predict(x)
self.assertAllClose(y, y_ref)
self.assertAllClose(layer.get_weights(), new_layer.get_weights())
def test_lstm_output_on_multiple_kernel(self):
input_shape = 10
rnn_state_size = 8
timestep = 4
batch = 100
x_train = np.random.random((batch, timestep, input_shape))
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=dtypes.float32)
with testing_utils.device(should_use_gpu=False):
layer = rnn.LSTM(rnn_state_size)
output = layer(inputs)
cpu_model = keras.models.Model(inputs, output)
weights = cpu_model.get_weights()
y_1 = cpu_model.predict(x_train)
with testing_utils.device(should_use_gpu=True):
layer = rnn.LSTM(rnn_state_size)
output = layer(inputs)
gpu_model = keras.models.Model(inputs, output)
gpu_model.set_weights(weights)
y_2 = gpu_model.predict(x_train)
# Note that CuDNN uses 'sigmoid' as activation, so the LSTM V2 uses
# 'sigmoid' as default. Construct the canonical LSTM with sigmoid to achieve
# the same output.
with testing_utils.device(should_use_gpu=True):
layer = rnn_v1.LSTM(rnn_state_size, recurrent_activation='sigmoid')
output = layer(inputs)
canonical_model = keras.models.Model(inputs, output)
# Remove the extra cudnn bias since canonical lstm will not use it.
canonical_model.set_weights(weights[:3])
y_3 = canonical_model.predict(x_train)
self.assertAllClose(y_1, y_2)
self.assertAllClose(y_2, y_3)
def test_return_sequences_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
rnn.LSTM,
kwargs={
'units': units,
'return_sequences': True
},
input_shape=(num_samples, timesteps, embedding_dim))
@testing_utils.run_v2_only
def test_float64_LSTM(self):
if test.is_built_with_rocm():
self.skipTest('Skipping the test as ROCm MIOpen does not '
'support float64 yet.')
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
rnn.LSTM,
kwargs={
'units': units,
'return_sequences': True,
'dtype': 'float64'
},
input_shape=(num_samples, timesteps, embedding_dim),
input_dtype='float64')
def test_regularizers_LSTM(self):
embedding_dim = 4
layer_class = rnn.LSTM
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_regularizer=keras.regularizers.l1(0.01),
recurrent_regularizer=keras.regularizers.l1(0.01),
bias_regularizer='l2',
activity_regularizer='l1')
layer.build((None, None, 2))
self.assertEqual(len(layer.losses), 3)
x = keras.backend.variable(np.ones((2, 3, 2)))
layer(x)
if context.executing_eagerly():
self.assertEqual(len(layer.losses), 4)
else:
self.assertEqual(len(layer.get_losses_for(x)), 1)
def test_statefulness_LSTM(self):
if test.is_built_with_rocm():
self.skipTest('Skipping the test as ROCm MIOpen does not '
'support padded input yet.')
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer_class = rnn.LSTM
model = keras.models.Sequential()
model.add(
keras.layers.Embedding(
4,
embedding_dim,
mask_zero=True,
input_length=timesteps,
batch_input_shape=(num_samples, timesteps)))
layer = layer_class(
units, return_sequences=False, stateful=True, weights=None)
model.add(layer)
model.compile(
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
out1 = model.predict(np.ones((num_samples, timesteps)))
self.assertEqual(out1.shape, (num_samples, units))
# train once so that the states change
model.train_on_batch(
np.ones((num_samples, timesteps)), np.ones((num_samples, units)))
out2 = model.predict(np.ones((num_samples, timesteps)))
# if the state is not reset, output should be different
self.assertNotEqual(out1.max(), out2.max())
# check that output changes after states are reset
# (even though the model itself didn't change)
layer.reset_states()
out3 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out2.max(), out3.max())
# check that container-level reset_states() works
model.reset_states()
out4 = model.predict(np.ones((num_samples, timesteps)))
self.assertAllClose(out3, out4, atol=1e-5)
# check that the call to `predict` updated the states
out5 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out4.max(), out5.max())
# Check masking
layer.reset_states()
left_padded_input = np.ones((num_samples, timesteps))
left_padded_input[0, :1] = 0
left_padded_input[1, :2] = 0
out6 = model.predict(left_padded_input)
layer.reset_states()
right_padded_input = np.ones((num_samples, timesteps))
right_padded_input[0, -1:] = 0
right_padded_input[1, -2:] = 0
out7 = model.predict(right_padded_input)
layer.reset_states()
mix_padded_input = np.ones((num_samples, timesteps))
mix_padded_input[0, 1] = 0
mix_padded_input[1, 0] = 0
mix_padded_input[1, 2] = 0
out8 = model.predict(mix_padded_input)
self.assertAllClose(out7, out6, atol=1e-5)
self.assertAllClose(out8, out7, atol=1e-5)
def test_stateful_LSTM_training(self):
# See b/123587692 for more context.
vocab_size = 20
embedding_dim = 10
batch_size = 8
timestep = 12
units = 5
x = np.random.randint(0, vocab_size, size=(batch_size, timestep))
y = np.random.randint(0, vocab_size, size=(batch_size, timestep))
model = keras.Sequential([
keras.layers.Embedding(vocab_size, embedding_dim,
batch_input_shape=[batch_size, timestep]),
rnn.LSTM(units, return_sequences=True, stateful=True),
keras.layers.Dense(vocab_size)
])
model.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
model.fit(x, y, epochs=1, shuffle=False)
def test_dropout_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
rnn.LSTM,
kwargs={
'units': units,
'dropout': 0.1,
'recurrent_dropout': 0.1
},
input_shape=(num_samples, timesteps, embedding_dim))
def test_bidirectional(self):
batch = 128
timestep = 20
vocab_size = 1000
model = keras.Sequential([
keras.layers.Embedding(vocab_size, 64),
keras.layers.Bidirectional(rnn.LSTM(
64, return_sequences=True)),
keras.layers.Bidirectional(rnn.LSTM(32)),
keras.layers.Dense(64, activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
])
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
x = np.random.randint(0, vocab_size, size=(batch, timestep))
y = np.random.randint(0, 1, size=(batch))
model.fit(x, y, epochs=1, shuffle=False)
model.evaluate(x, y)
model.predict(x)
@testing_utils.run_v2_only
def test_explicit_device_with_go_backward_and_mask(self):
if test.is_built_with_rocm():
self.skipTest('Skipping the test as ROCm MIOpen does not '
'support padded input yet.')
batch_size = 8
timestep = 7
masksteps = 5
units = 4
inputs = np.random.randn(batch_size, timestep, units).astype(np.float32)
mask = np.ones((batch_size, timestep)).astype(np.bool)
mask[:, masksteps:] = 0
# Test for V1 behavior.
lstm_v1 = rnn_v1.LSTM(units, return_sequences=True, go_backwards=True)
with testing_utils.device(should_use_gpu=True):
outputs_masked_v1 = lstm_v1(inputs, mask=constant_op.constant(mask))
outputs_trimmed_v1 = lstm_v1(inputs[:, :masksteps])
self.assertAllClose(outputs_masked_v1[:, -masksteps:], outputs_trimmed_v1)
# Test for V2 behavior.
lstm = rnn.LSTM(units, return_sequences=True, go_backwards=True)
with testing_utils.device(should_use_gpu=True):
outputs_masked = lstm(inputs, mask=constant_op.constant(mask))
outputs_trimmed = lstm(inputs[:, :masksteps])
self.assertAllClose(outputs_masked[:, -masksteps:], outputs_trimmed)
def test_v1_session_behavior(self):
with ops.get_default_graph().as_default():
# See b/139132348 for more details.
x = np.random.uniform(size=(100, 4, 8))
y = np.random.uniform(size=(100, 1))
dataset = dataset_ops.Dataset.from_tensor_slices(
(x, y)).shuffle(100).batch(32)
inp = keras.layers.Input(shape=(4, 8))
layer = rnn.LSTM(1)(inp)
layer = keras.layers.Dense(1)(layer)
model = keras.models.Model(inp, layer)
model.compile(loss='mse', optimizer='sgd')
model.fit(dataset)
def test_with_fully_masked_inputs(self):
num_samples = 8
timestep = 5
embedding_dim = 4
vocab_size = 20
units = 2
inputs = np.random.randint(0, vocab_size, size=(num_samples, timestep))
# Set the first inputs to be fully zero.
inputs[0, :] = 0.0
model = keras.models.Sequential()
model.add(
keras.layers.Embedding(
vocab_size,
embedding_dim,
mask_zero=True,
input_length=timestep,
batch_input_shape=(num_samples, timestep)))
layer = rnn.LSTM(units)
model.add(layer)
model.compile(
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
# Make sure it doesn't crash with cudnn kernel.
model.predict(inputs)
@keras_parameterized.run_all_keras_modes(config=_config)
class LSTMGraphRewriteTest(keras_parameterized.TestCase):
input_shape = 10
output_shape = 8
rnn_state_size = 8
timestep = 4
batch = 100
epoch = 1
def _test_runtime_with_model(self, model):
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=self.batch,
test_samples=0,
input_shape=(self.timestep, self.input_shape),
num_classes=self.output_shape)
y_train = np_utils.to_categorical(y_train, self.output_shape)
model.compile(
optimizer='sgd',
loss=['categorical_crossentropy', None],
run_eagerly=testing_utils.should_run_eagerly())
existing_loss = 0
for _ in range(self.epoch):
history = model.fit(x_train, y_train)
loss_value = history.history['loss'][0]
self.assertNotEqual(existing_loss, loss_value)
existing_loss = loss_value
_, runtime_value = model.predict(x_train)
if test.is_gpu_available():
self.assertEqual(runtime_value[0], rnn._RUNTIME_GPU)
else:
self.assertEqual(runtime_value[0], rnn._RUNTIME_CPU)
@testing_utils.run_v2_only
def test_LSTM_runtime(self):
layer = rnn.LSTM(self.rnn_state_size, return_runtime=True)
inputs = keras.layers.Input(
shape=[self.timestep, self.input_shape], dtype=dtypes.float32)
outputs, runtime = layer(inputs)
# Expand the runtime so that it is a 1D tensor instead of scalar.
# TF model does not work with scalar model output, specially during
# aggregation.
runtime = keras.layers.Lambda(
lambda x: array_ops.expand_dims(x, axis=-1))(runtime)
model = keras.models.Model(inputs=inputs, outputs=[outputs, runtime])
self._test_runtime_with_model(model)
@testing_utils.run_v2_only
def test_LSTM_runtime_with_mask(self):
if test.is_built_with_rocm():
self.skipTest('Skipping the test as ROCm MIOpen does not '
'support padded input yet.')
# Masking will affect which backend is selected based on whether the mask
# is strictly right padded.
layer = rnn.LSTM(self.rnn_state_size, return_runtime=True)
inputs = keras.layers.Input(
shape=[self.timestep, self.input_shape], dtype=dtypes.float32)
masked_inputs = keras.layers.Masking()(inputs)
outputs, runtime = layer(masked_inputs)
# Expand the runtime so that it is a 1D tensor instead of scalar.
# TF model does not work with scalar model output, specially during
# aggregation.
runtime = keras.layers.Lambda(
lambda x: array_ops.expand_dims(x, axis=-1))(runtime)
model = keras.models.Model(inputs=inputs, outputs=[outputs, runtime])
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=self.batch,
test_samples=0,
input_shape=(self.timestep, self.input_shape),
num_classes=self.output_shape)
y_train = np_utils.to_categorical(y_train, self.output_shape)
model.compile(
optimizer='sgd',
loss=['categorical_crossentropy', None],
run_eagerly=testing_utils.should_run_eagerly())
model.fit(x_train, y_train)
# Verify unpadded data.
_, runtime_value = model.predict(x_train)
if test.is_gpu_available():
self.assertEqual(runtime_value[0], rnn._RUNTIME_GPU)
else:
self.assertEqual(runtime_value[0], rnn._RUNTIME_CPU)
# Update x/y to be right padded by setting the last timestep to 0
x_train[:, -1, :] = 0
y_train[:, -1] = 0
_, runtime_value = model.predict(x_train)
if test.is_gpu_available():
self.assertEqual(runtime_value[0], rnn._RUNTIME_GPU)
else:
self.assertEqual(runtime_value[0], rnn._RUNTIME_CPU)
# Further update x/y to be mix padded (masks in the middle), and verify
# only cpu kernel can be selected.
x_train[:, -3, :] = 0
y_train[:, -3] = 0
_, runtime_value = model.predict(x_train)
self.assertEqual(runtime_value[0], rnn._RUNTIME_CPU)
@testing_utils.run_v2_only
def test_LSTM_runtime_with_cond(self):
# This test is to demonstrate the graph rewrite of grappler plugin under
# the condition that the function returns different number of internal
# states.
layer = rnn.LSTM(self.rnn_state_size, return_runtime=True)
inputs = keras.layers.Input(
shape=[self.timestep, self.input_shape], dtype=dtypes.float32)
zeros = array_ops.zeros([self.batch, self.output_shape])
dummy_runtime = rnn._runtime(rnn._RUNTIME_UNKNOWN)
a = constant_op.constant(0)
b = constant_op.constant(1)
# Will always run the lstm layer.
outputs, runtime = control_flow_ops.cond(
gen_math_ops.less(a, b),
lambda: layer(inputs),
lambda: (zeros, dummy_runtime))
# Expand the runtime so that it is a 1D tensor instead of scalar.
# TF model does not work with scalar model output, specially during
# aggregation.
runtime = keras.layers.Lambda(
lambda x: array_ops.expand_dims(x, axis=-1))(runtime)
model = keras.models.Model(inputs=inputs, outputs=[outputs, runtime])
self._test_runtime_with_model(model)
class LSTMPerformanceTest(test.Benchmark):
def _measure_performance(self, test_config, model, x_train, y_train):
batch = test_config['batch']
epoch = test_config['epoch']
warmup_epoch = test_config['warmup_epoch']
# warm up the model
model.fit(x_train, y_train, batch_size=batch, epochs=warmup_epoch)
start_time = time.time()
model.fit(x_train, y_train, batch_size=batch, epochs=epoch - warmup_epoch)
end_time = time.time()
return (end_time - start_time) / (epoch - warmup_epoch)
def _time_performance_run_cudnn_lstm(self, test_config, x_train, y_train):
# Get the performance number for standard Cudnn LSTM
input_shape = test_config['input_shape']
rnn_state_size = test_config['rnn_state_size']
timestep = test_config['timestep']
cudnn_lstm_layer = keras.layers.CuDNNLSTM(rnn_state_size)
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=dtypes.float32)
outputs = cudnn_lstm_layer(inputs)
model = keras.models.Model(inputs, outputs)
model.compile('sgd', 'mse')
sec_per_epoch = self._measure_performance(
test_config, model, x_train, y_train)
logging.info('Average performance for %s per epoch is: %s',
'CuDNN LSTM', sec_per_epoch)
return sec_per_epoch
def _time_performance_run_unifed_lstm_gpu(
self, test_config, x_train, y_train):
# Get performance number for lstm_v2 with grappler swap the impl
input_shape = test_config['input_shape']
rnn_state_size = test_config['rnn_state_size']
timestep = test_config['timestep']
layer = rnn.LSTM(rnn_state_size)
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=dtypes.float32)
outputs = layer(inputs)
model = keras.models.Model(inputs, outputs)
model.compile('sgd', 'mse')
sec_per_epoch = self._measure_performance(
test_config, model, x_train, y_train)
logging.info('Average performance for %s per epoch is: %s',
'LSTM V2', sec_per_epoch)
return sec_per_epoch
def _time_performance_run_normal_lstm(
self, test_config, x_train, y_train):
# Get performance number for standard LSTM on GPU.
input_shape = test_config['input_shape']
rnn_state_size = test_config['rnn_state_size']
timestep = test_config['timestep']
layer = rnn_v1.LSTM(rnn_state_size)
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=dtypes.float32)
outputs = layer(inputs)
model = keras.models.Model(inputs, outputs)
model.compile('sgd', 'mse')
sec_per_epoch = self._measure_performance(
test_config, model, x_train, y_train)
logging.info('Average performance for %s per epoch is: %s',
'Normal LSTM', sec_per_epoch)
return sec_per_epoch
def _benchmark_performance_with_standard_cudnn_impl(self):
if not test.is_gpu_available():
self.skipTest('performance test will only run on GPU')
mode = 'eager' if context.executing_eagerly() else 'graph'
batch = 64
num_batch = 10
test_config = {
'input_shape': 128,
'rnn_state_size': 64,
'output_shape': 64,
'timestep': 50,
'batch': batch,
'epoch': 20,
# The performance for warmup epoch is ignored.
'warmup_epoch': 1,
}
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=(batch * num_batch),
test_samples=0,
input_shape=(test_config['timestep'], test_config['input_shape']),
num_classes=test_config['output_shape'])
y_train = np_utils.to_categorical(y_train, test_config['output_shape'])
cudnn_sec_per_epoch = self._time_performance_run_cudnn_lstm(
test_config, x_train, y_train)
lstm_v2_sec_per_epoch = self._time_performance_run_unifed_lstm_gpu(
test_config, x_train, y_train)
normal_lstm_sec_per_epoch = self._time_performance_run_normal_lstm(
test_config, x_train, y_train)
cudnn_vs_v2 = cudnn_sec_per_epoch / lstm_v2_sec_per_epoch
v2_vs_normal = normal_lstm_sec_per_epoch / lstm_v2_sec_per_epoch
self.report_benchmark(name='keras_cudnn_lstm_' + mode,
wall_time=cudnn_sec_per_epoch,
iters=test_config['epoch'],
extras=test_config)
self.report_benchmark(name='keras_lstm_v2_' + mode,
wall_time=lstm_v2_sec_per_epoch,
iters=test_config['epoch'],
extras=test_config)
self.report_benchmark(name='keras_canonical_lstm_' + mode,
wall_time=normal_lstm_sec_per_epoch,
iters=test_config['epoch'],
extras=test_config)
logging.info('Expect the performance of LSTM V2 is within 80% of '
'CuDNN LSTM, got {0:.2f}%'.format(cudnn_vs_v2 * 100))
logging.info('Expect the performance of LSTM V2 is more than 5 times'
' of normal LSTM, got {0:.2f}'.format(v2_vs_normal))
def benchmark_performance_graph(self):
with ops.get_default_graph().as_default():
with session_lib.Session(config=_config):
self._benchmark_performance_with_standard_cudnn_impl()
def benchmark_performance_eager(self):
with context.eager_mode():
self._benchmark_performance_with_standard_cudnn_impl()
if __name__ == '__main__':
test.main()
| {
"content_hash": "36e3ca35bc7f519fe4ed5912697a598e",
"timestamp": "",
"source": "github",
"line_count": 1111,
"max_line_length": 80,
"avg_line_length": 35.344734473447346,
"alnum_prop": 0.6502241010492004,
"repo_name": "aldian/tensorflow",
"id": "c0f41b4bf7ccc61d62f99fac8023d791ecf800b9",
"size": "39957",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/layers/lstm_v2_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "8458"
},
{
"name": "C",
"bytes": "201402"
},
{
"name": "C++",
"bytes": "29667924"
},
{
"name": "CMake",
"bytes": "647100"
},
{
"name": "Go",
"bytes": "976514"
},
{
"name": "Java",
"bytes": "412117"
},
{
"name": "Jupyter Notebook",
"bytes": "1833675"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "38128"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63210"
},
{
"name": "Perl",
"bytes": "6715"
},
{
"name": "Protocol Buffer",
"bytes": "275733"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "26424665"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Shell",
"bytes": "373109"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('question', '0010_checkin_coach_response'),
]
operations = [
migrations.AlterField(
model_name='checkin',
name='coach_response',
field=models.TextField(null=True),
),
]
| {
"content_hash": "411205e7830ad4e8dfe29c46bef101a9",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 52,
"avg_line_length": 21.555555555555557,
"alnum_prop": 0.6005154639175257,
"repo_name": "airportmarc/the416life",
"id": "16d09b78b12bc859151a4a10e0f8d7588e04837f",
"size": "461",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/apps/question/migrations/0011_auto_20171122_0832.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "18"
},
{
"name": "CSS",
"bytes": "430385"
},
{
"name": "HTML",
"bytes": "174632"
},
{
"name": "JavaScript",
"bytes": "224762"
},
{
"name": "Python",
"bytes": "477212"
},
{
"name": "Shell",
"bytes": "4240"
},
{
"name": "Vue",
"bytes": "80363"
}
],
"symlink_target": ""
} |
from libcloud.types import Provider
from libcloud.providers import get_driver
EC2 = get_driver(Provider.EC2_US_EAST)
Slicehost = get_driver(Provider.SLICEHOST)
Rackspace = get_driver(Provider.RACKSPACE)
drivers = [ EC2('access key id', 'secret key'),
Slicehost('api key'),
Rackspace('username', 'api key') ]
nodes = [ driver.list_nodes() for driver in drivers ]
print nodes
# [ <Node: provider=Amazon, status=RUNNING, name=bob, ip=1.2.3.4.5>,
# <Node: provider=Slicehost, status=REBOOT, name=korine, ip=6.7.8.9.10>, ... ]
# grab the node named "test"
node = filter(lambda x: x.name == 'test', nodes)[0]
# reboot "test"
node.reboot()
| {
"content_hash": "1a402113f43af8f4b999fed57e0f0cc1",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 78,
"avg_line_length": 30.227272727272727,
"alnum_prop": 0.6766917293233082,
"repo_name": "alex/libcloud",
"id": "d501a597626a920809b0a644015c3fd0b49b4752",
"size": "1451",
"binary": false,
"copies": "1",
"ref": "refs/heads/0.2.x",
"path": "example.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "259159"
}
],
"symlink_target": ""
} |
import httplib
import math
import os
import subprocess
import sys
import yaml
from optparse import OptionParser
def parse_time(timestr):
mul = 0.001
timestr = unicode(timestr.strip().lower())
if timestr[-1] == 's':
mul = 1
timestr = timestr[:-1]
if timestr[-1] == 'c':
mul = 0.01
timestr = timestr[:-1]
elif timestr[-1] == 'm':
mul = 0.001
timestr = timestr[:-1]
elif timestr[-1] == u'µ':
mul = 0.000001
timestr = timestr[:-1]
elif timestr[-1] == 'n':
mul = 0.000000001
timestr = timestr[:-1]
return int(math.ceil(float(timestr) * mul * 1000))
def parse_memory(memstr):
mul = 1
memstr = unicode(memstr.strip().lower())
if memstr[-1] == 'b':
mul = 1
memstr = memstr[:-1]
if memstr[-1] == 'k':
mul = 1024
memstr = memstr[:-1]
elif memstr[-1] == 'm':
mul = 1024**2
memstr = memstr[:-1]
elif memstr[-1] == 'g':
mul = 1024**3
memstr = memstr[:-1]
elif memstr[-1] == 't':
mul = 1024**4
memstr = memstr[:-1]
elif memstr[-1] == 'p':
mul = 1024**5
memstr = memstr[:-1]
return int(math.ceil(float(memstr) * mul))
parser = OptionParser()
parser.add_option('-H', '--control-host', dest='host', default='192.168.100.101', action='store', type='string')
parser.add_option('-P', '--control-port', dest='port', default=8765, action='store', type='int')
(options, args) = parser.parse_args()
compile_time = 20*1000
compile_memory = 1024*1024*1024
compile_stack = 64*1024*1024
check_time = 60*1000
check_memory = 1024*1024*1024
def communicate(cmd, args={}, check=True):
yam = yaml.dump(args)
try:
con = httplib.HTTPConnection(options.host, options.port)
con.request('POST', '/' + cmd, yam)
res = con.getresponse()
ret = yaml.load(res.read())
except:
raise Exception('Communication '+cmd+' failed')
if check and ('res' not in ret or ret['res'] != 'OK'):
raise Exception('Communication '+cmd+' finished with failure')
return ret
submit = communicate('GETSUBMIT', check=False)
test = communicate('GETTEST', check=False)
filename = submit['content']['filename']
fileext = filename.split('.')[-1].lower()
time_limit = parse_time(test.get('time')['value'])
memory_limit = parse_memory(test.get('memory')['value'])
if fileext == 'c':
source_file = 'solution.c'
compiler = [ '/usr/bin/gcc', '-static', '-O2', '-Wall', 'solution.c', '-lm', '-osolution.x']
elif fileext in ['cpp', 'cc', 'cxx']:
fileext = 'cpp'
source_file = 'solution.cpp'
compiler = [ '/usr/bin/g++', '-static', '-O2', '-Wall', 'solution.cpp', '-osolution.x']
elif fileext in ['pas', 'p', 'pp']:
fileext = 'pas'
source_file = 'solution.pas'
compiler = [ '/usr/bin/fpc', '-Sgic', '-Xs', '-viwnh', '-O2', 'solution.pas', '-osolution.x']
else:
communicate('SETSTRING', {'name': 'status', 'value': 'EXT'})
sys.exit(0)
communicate('CREATEJAIL', {'path': '/jail', 'template': 'judge'})
#COMPILE SOLUTION
communicate('GETSUBMITBLOB', {'name': 'content', 'path': os.path.join('/jail/runner', source_file)})
compile_run = ['runner', '--quiet',
'--root=/jail',
'--work-dir=/runner',
'--env=simple',
'--setuid=runner',
'--setgid=runner',
'--control-host='+options.host,
'--control-port='+str(options.port),
'--cgroup=/compile',
'--cgroup-memory='+str(compile_memory),
'--cgroup-cputime='+str(compile_time),
'--max-realtime='+str(compile_time),
'--max-stack='+str(compile_stack),
'--stdout=/tmp/compile.log', '--trunc-stdout',
'--stderr=__STDOUT__',
'--priority=30']
compile_run += compiler
ret = subprocess.call(compile_run)
communicate('SETBLOB', {'name': 'compile_log', 'path': '/tmp/compile.log'})
if ret != 0:
communicate('SETSTRING', {'name': 'status', 'value': 'CME'})
sys.exit(0)
#RUN SOLUTION
communicate('GETTESTBLOB', {'name': 'input', 'path': '/tmp/data.in'})
execute_run = ['runner',
'--root=/jail',
'--work-dir=/runner',
'--env=empty',
'--setuid=runner',
'--setgid=runner',
'--control-host='+options.host,
'--control-port='+str(options.port),
'--cgroup=/execute',
'--cgroup-memory='+str(memory_limit),
'--cgroup-cputime='+str(time_limit),
'--max-memory='+str(memory_limit),
'--max-cputime='+str(time_limit),
'--max-realtime='+str(int(1.5*time_limit)),
'--max-threads=1',
'--max-files=4',
'--stdin=/tmp/data.in',
'--stdout=/tmp/data.out', '--trunc-stdout',
'--stderr=/dev/null',
'--priority=30']
execute_run += ['/runner/solution.x']
res = subprocess.Popen(execute_run, stdout = subprocess.PIPE).communicate()[0]
ret = res.splitlines()[0]
stats = {}
for line in res.splitlines()[1:]:
key = line.split(':')[0].strip().lower()
value = u':'.join(line.split(':')[1:]).strip()
stats[key] = value
communicate('SETSTRING', {'name': 'execute_time_real', 'value': str(float(stats['time'])/1000)+'s'})
communicate('SETSTRING', {'name': 'execute_time_cpu', 'value': str(float(stats['cpu'])/1000)+'s'})
communicate('SETSTRING', {'name': 'execute_memory', 'value': str(int(stats['memory']))+'B'})
if ret != 'OK':
communicate('SETSTRING', {'name': 'status', 'value': ret})
sys.exit(0)
has_checker = 'checker' in test
#COMPILE CHECKER
if has_checker:
communicate('GETTESTBLOB', {'name': 'checker', 'path': '/tmp/checker.cpp'})
checker_compiler = [ '/usr/bin/g++', '-static', '-O2', 'checker.cpp', '-ochecker.x']
checker_compile_run = ['runner', '--quiet',
'--root=/',
'--work-dir=/tmp',
'--env=simple',
'--setuid=runner',
'--setgid=runner',
'--control-host='+options.host,
'--control-port='+str(options.port),
'--cgroup=/compile_checker',
'--cgroup-memory='+str(compile_memory),
'--cgroup-cputime='+str(compile_time),
'--max-realtime='+str(compile_time),
'--max-stack='+str(compile_stack),
'--stdout=/tmp/check.log', '--trunc-stdout',
'--stderr=__STDOUT__',
'--stdout=/dev/null',
'--stderr=/dev/null',
'--priority=30']
checker_compile_run += checker_compiler
ret = subprocess.call(checker_compile_run)
if ret != 0:
communicate('SETBLOB', {'name': 'check_log', 'path': '/tmp/check.log'})
communicate('SETSTRING', {'name': 'status', 'value': 'INT'})
sys.exit(0)
#CHECK OUTPUT
has_hint = 'hint' in test
if has_hint:
communicate('GETTESTBLOB', {'name': 'hint', 'path': '/tmp/data.hint'})
hint_file = '/tmp/data.hint'
else:
hint_file = '/dev/null'
if has_checker:
checker = ['/tmp/checker.x', '/tmp/data.in', hint_file, '/tmp/data.out']
else:
checker = ['/usr/bin/diff', '-q', '-w', '-B', hint_file, '/tmp/data.out']
checker_run = ['runner', '--quiet',
'--root=/',
'--work-dir=/tmp',
'--env=simple',
'--setuid=runner',
'--setgid=runner',
'--control-host='+options.host,
'--control-port='+str(options.port),
'--cgroup=/check',
'--cgroup-memory='+str(check_memory),
'--cgroup-cputime='+str(check_time),
'--max-realtime='+str(check_time),
'--stdout=/tmp/check.log',
'--stderr=__STDOUT__',
'--priority=30']
if not has_checker:
checker_run += ['--trunc-stdout']
checker_run += checker
ret = subprocess.call(checker_run)
communicate('SETBLOB', {'name': 'check_log', 'path': '/tmp/check.log'})
if ret != 0:
communicate('SETSTRING', {'name': 'status', 'value': 'ANS'})
sys.exit(0)
has_languages = 'languages' in test
if has_languages:
languages = [ l.strip().lower() for l in test.get('languages')['value'].split(',') ]
if fileext not in languages:
communicate('SETSTRING', {'name': 'status', 'value': 'LANG'})
sys.exit(0)
communicate('SETSTRING', {'name': 'status', 'value': 'OK'})
| {
"content_hash": "ab78f115c31281395d761ff2779cdb3f",
"timestamp": "",
"source": "github",
"line_count": 236,
"max_line_length": 112,
"avg_line_length": 34.20338983050848,
"alnum_prop": 0.5681367690782954,
"repo_name": "zielmicha/satori",
"id": "2068f3f11d4f52a0d37cda5ad321236ba78358d2",
"size": "9389",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "satori.core/satori/core/judges/simple_judge.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "165337"
},
{
"name": "CSS",
"bytes": "72202"
},
{
"name": "HTML",
"bytes": "56647"
},
{
"name": "Java",
"bytes": "270392"
},
{
"name": "JavaScript",
"bytes": "300430"
},
{
"name": "Makefile",
"bytes": "1223"
},
{
"name": "Perl",
"bytes": "1572"
},
{
"name": "Python",
"bytes": "1011796"
},
{
"name": "Shell",
"bytes": "231478"
},
{
"name": "TeX",
"bytes": "17071"
}
],
"symlink_target": ""
} |
"""
Django settings for core project.
Generated by 'django-admin startproject' using Django 2.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'w_gt0z8omw-n71pzqs#h_+pr_@j##2s@l8!dd4$_dy#2!u_ln*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
'gallery',
'mptt',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'core.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'core.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| {
"content_hash": "6aa9cfc3f33158cd913863cf670417b2",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 91,
"avg_line_length": 25.3739837398374,
"alnum_prop": 0.6802306952899712,
"repo_name": "c0ntribut0r/django-mptt-urls",
"id": "f06c6e74da6e5082215432ecb50a0c00aa182e05",
"size": "3121",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_project/core/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1735"
},
{
"name": "Python",
"bytes": "9337"
}
],
"symlink_target": ""
} |
"""
FILE: sample_conv_summarization_async.py
DESCRIPTION:
This sample demonstrates how to analyze a conversation for issue resolution.
For more info about how to setup a CLU conversation project, see the README.
USAGE:
python sample_conv_summarization_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_CONVERSATIONS_ENDPOINT - endpoint for your CLU resource.
2) AZURE_CONVERSATIONS_KEY - API key for your CLU resource.
"""
import asyncio
async def sample_conv_summarization_async():
# [START analyze_conversation_app]
# import libraries
import os
from azure.core.credentials import AzureKeyCredential
from azure.ai.language.conversations.aio import ConversationAnalysisClient
# get secrets
endpoint = os.environ["AZURE_CONVERSATIONS_ENDPOINT"]
key = os.environ["AZURE_CONVERSATIONS_KEY"]
# analyze query
client = ConversationAnalysisClient(endpoint, AzureKeyCredential(key))
async with client:
poller = await client.begin_conversation_analysis(
task={
"displayName": "Analyze conversations from xxx",
"analysisInput": {
"conversations": [
{
"conversationItems": [
{
"text": "Hello, how can I help you?",
"modality": "text",
"id": "1",
"role": "Agent",
"participantId": "Agent"
},
{
"text": "How to upgrade Office? I am getting error messages the whole day.",
"modality": "text",
"id": "2",
"role": "Customer",
"participantId": "Customer"
},
{
"text": "Press the upgrade button please. Then sign in and follow the instructions.",
"modality": "text",
"id": "3",
"role": "Agent",
"participantId": "Agent"
}
],
"modality": "text",
"id": "conversation1",
"language": "en"
},
]
},
"tasks": [
{
"taskName": "Issue task",
"kind": "ConversationalSummarizationTask",
"parameters": {
"summaryAspects": ["issue"]
}
},
{
"taskName": "Resolution task",
"kind": "ConversationalSummarizationTask",
"parameters": {
"summaryAspects": ["resolution"]
}
},
]
}
)
# view result
result = await poller.result()
task_results = result["tasks"]["items"]
for task in task_results:
print(f"\n{task['taskName']} status: {task['status']}")
task_result = task["results"]
if task_result["errors"]:
print("... errors occurred ...")
for error in task_result["errors"]:
print(error)
else:
conversation_result = task_result["conversations"][0]
if conversation_result["warnings"]:
print("... view warnings ...")
for warning in conversation_result["warnings"]:
print(warning)
else:
summaries = conversation_result["summaries"]
print("... view task result ...")
for summary in summaries:
print(f"{summary['aspect']}: {summary['text']}")
# [END analyze_conversation_app]
async def main():
await sample_conv_summarization_async()
if __name__ == '__main__':
asyncio.run(main())
| {
"content_hash": "0c5127b7e7233bb74ccf365eef550184",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 121,
"avg_line_length": 38.97435897435897,
"alnum_prop": 0.4243421052631579,
"repo_name": "Azure/azure-sdk-for-python",
"id": "6919f6bdc53c7fe26a56ff817c5b96aa18bf8fb1",
"size": "4727",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_conv_summarization_async.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""
desispec.pipeline.defs
=========================
Common definitions needed by pipeline modules.
"""
from __future__ import absolute_import, division, print_function
task_states = [
"waiting",
"ready",
"running",
"done",
"failed"
]
"""The valid states of each pipeline task."""
task_state_to_int = {
"waiting" : 0,
"ready" : 1,
"running" : 2,
"done" : 3,
"failed" : 4
}
task_int_to_state = {
0 : "waiting",
1 : "ready",
2 : "running",
3 : "done",
4 : "failed"
}
state_colors = {
"waiting": "#000000",
"ready" : "#0000ff",
"running": "#ffff00",
"done": "#00ff00",
"failed": "#ff0000",
}
"""State colors used for visualization."""
task_name_sep = "_"
"""The separator string used for building object names."""
prod_options_name = "options.yaml"
"""The name of the options file inside the run directory."""
| {
"content_hash": "2d9e557217430793f819acd068a70c46",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 64,
"avg_line_length": 17.607843137254903,
"alnum_prop": 0.5612472160356348,
"repo_name": "desihub/desispec",
"id": "656766ea21e4127b28053ab0b4221b534decf645",
"size": "985",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "py/desispec/pipeline/defs.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "761"
},
{
"name": "Python",
"bytes": "4219435"
},
{
"name": "Shell",
"bytes": "17927"
}
],
"symlink_target": ""
} |
import sqlite3
import sys
import os
import platform
def replace_maccr(x):
"""Open text file and replace Mac CRs"""
f = open(x, 'r')
str = f.read()
f.close()
str = str.replace('\r', '\n')
f_new = open(x, 'w')
f_new.write(str)
f_new.close()
def db_connect(command, path, db_name):
"""Create and connect to a sqlite database file and execute a command without variables"""
db_path = path + db_name
connect = sqlite3.connect(db_path)
cursor = connect.cursor()
data = cursor.execute(command)
return data
connect.commit()
cursor.close()
def get_app_support_dir():
"""find the application support directory on mac os x"""
user_sys = platform.system()
user_name = os.getenv('LOGNAME')
if user_sys == "Darwin":
app_support_dir = "/Users/%s/Library/Application Support/" % user_name
elif user_sys == "Linux":
app_support_dir = "/home/%s/" % user_name
return app_support_dir
| {
"content_hash": "742ed31bc3167f3465759187fc227be8",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 94,
"avg_line_length": 27.36111111111111,
"alnum_prop": 0.617258883248731,
"repo_name": "btnpushnmunky/cupcake",
"id": "ee657faff5b0414adbb092ab94f312a3cdc50426",
"size": "985",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "helpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25493"
}
],
"symlink_target": ""
} |
"""
homeassistant.components.httpinterface
~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module provides an API and a HTTP interface for debug purposes.
By default it will run on port 8123.
All API calls have to be accompanied by an 'api_password' parameter and will
return JSON. If successful calls will return status code 200 or 201.
Other status codes that can occur are:
- 400 (Bad Request)
- 401 (Unauthorized)
- 404 (Not Found)
- 405 (Method not allowed)
The api supports the following actions:
/api - GET
Returns message if API is up and running.
Example result:
{
"message": "API running."
}
/api/states - GET
Returns a list of entities for which a state is available
Example result:
[
{ .. state object .. },
{ .. state object .. }
]
/api/states/<entity_id> - GET
Returns the current state from an entity
Example result:
{
"attributes": {
"next_rising": "07:04:15 29-10-2013",
"next_setting": "18:00:31 29-10-2013"
},
"entity_id": "weather.sun",
"last_changed": "23:24:33 28-10-2013",
"state": "below_horizon"
}
/api/states/<entity_id> - POST
Updates the current state of an entity. Returns status code 201 if successful
with location header of updated resource and as body the new state.
parameter: new_state - string
optional parameter: attributes - JSON encoded object
Example result:
{
"attributes": {
"next_rising": "07:04:15 29-10-2013",
"next_setting": "18:00:31 29-10-2013"
},
"entity_id": "weather.sun",
"last_changed": "23:24:33 28-10-2013",
"state": "below_horizon"
}
/api/events/<event_type> - POST
Fires an event with event_type
optional parameter: event_data - JSON encoded object
Example result:
{
"message": "Event download_file fired."
}
"""
import json
import threading
import logging
import time
import gzip
import os
from http.server import SimpleHTTPRequestHandler, HTTPServer
from socketserver import ThreadingMixIn
from urllib.parse import urlparse, parse_qs
import homeassistant as ha
from homeassistant.const import (
SERVER_PORT, CONTENT_TYPE_JSON,
HTTP_HEADER_HA_AUTH, HTTP_HEADER_CONTENT_TYPE, HTTP_HEADER_ACCEPT_ENCODING,
HTTP_HEADER_CONTENT_ENCODING, HTTP_HEADER_VARY, HTTP_HEADER_CONTENT_LENGTH,
HTTP_HEADER_CACHE_CONTROL, HTTP_HEADER_EXPIRES, HTTP_OK, HTTP_UNAUTHORIZED,
HTTP_NOT_FOUND, HTTP_METHOD_NOT_ALLOWED, HTTP_UNPROCESSABLE_ENTITY)
import homeassistant.remote as rem
import homeassistant.util as util
import homeassistant.bootstrap as bootstrap
DOMAIN = "http"
DEPENDENCIES = []
CONF_API_PASSWORD = "api_password"
CONF_SERVER_HOST = "server_host"
CONF_SERVER_PORT = "server_port"
CONF_DEVELOPMENT = "development"
DATA_API_PASSWORD = 'api_password'
_LOGGER = logging.getLogger(__name__)
def setup(hass, config=None):
""" Sets up the HTTP API and debug interface. """
if config is None or DOMAIN not in config:
config = {DOMAIN: {}}
api_password = util.convert(config[DOMAIN].get(CONF_API_PASSWORD), str)
no_password_set = api_password is None
if no_password_set:
api_password = util.get_random_string()
# If no server host is given, accept all incoming requests
server_host = config[DOMAIN].get(CONF_SERVER_HOST, '0.0.0.0')
server_port = config[DOMAIN].get(CONF_SERVER_PORT, SERVER_PORT)
development = str(config[DOMAIN].get(CONF_DEVELOPMENT, "")) == "1"
server = HomeAssistantHTTPServer(
(server_host, server_port), RequestHandler, hass, api_password,
development, no_password_set)
hass.bus.listen_once(
ha.EVENT_HOMEASSISTANT_START,
lambda event:
threading.Thread(target=server.start, daemon=True).start())
hass.http = server
hass.local_api = rem.API(util.get_local_ip(), api_password, server_port)
return True
class HomeAssistantHTTPServer(ThreadingMixIn, HTTPServer):
""" Handle HTTP requests in a threaded fashion. """
# pylint: disable=too-few-public-methods
allow_reuse_address = True
daemon_threads = True
# pylint: disable=too-many-arguments
def __init__(self, server_address, request_handler_class,
hass, api_password, development, no_password_set):
super().__init__(server_address, request_handler_class)
self.server_address = server_address
self.hass = hass
self.api_password = api_password
self.development = development
self.no_password_set = no_password_set
self.paths = []
# We will lazy init this one if needed
self.event_forwarder = None
if development:
_LOGGER.info("running http in development mode")
def start(self):
""" Starts the server. """
self.hass.bus.listen_once(
ha.EVENT_HOMEASSISTANT_STOP,
lambda event: self.shutdown())
_LOGGER.info(
"Starting web interface at http://%s:%d", *self.server_address)
# 31-1-2015: Refactored frontend/api components out of this component
# To prevent stuff from breaking, load the two extracted components
bootstrap.setup_component(self.hass, 'api')
bootstrap.setup_component(self.hass, 'frontend')
self.serve_forever()
def register_path(self, method, url, callback, require_auth=True):
""" Regitsters a path wit the server. """
self.paths.append((method, url, callback, require_auth))
# pylint: disable=too-many-public-methods,too-many-locals
class RequestHandler(SimpleHTTPRequestHandler):
"""
Handles incoming HTTP requests
We extend from SimpleHTTPRequestHandler instead of Base so we
can use the guess content type methods.
"""
server_version = "HomeAssistant/1.0"
def _handle_request(self, method): # pylint: disable=too-many-branches
""" Does some common checks and calls appropriate method. """
url = urlparse(self.path)
# Read query input
data = parse_qs(url.query)
# parse_qs gives a list for each value, take the latest element
for key in data:
data[key] = data[key][-1]
# Did we get post input ?
content_length = int(self.headers.get(HTTP_HEADER_CONTENT_LENGTH, 0))
if content_length:
body_content = self.rfile.read(content_length).decode("UTF-8")
try:
data.update(json.loads(body_content))
except (TypeError, ValueError):
# TypeError if JSON object is not a dict
# ValueError if we could not parse JSON
_LOGGER.exception(
"Exception parsing JSON: %s", body_content)
self.write_json_message(
"Error parsing JSON", HTTP_UNPROCESSABLE_ENTITY)
return
if self.server.no_password_set:
api_password = self.server.api_password
else:
api_password = self.headers.get(HTTP_HEADER_HA_AUTH)
if not api_password and DATA_API_PASSWORD in data:
api_password = data[DATA_API_PASSWORD]
if '_METHOD' in data:
method = data.pop('_METHOD')
# Var to keep track if we found a path that matched a handler but
# the method was different
path_matched_but_not_method = False
# Var to hold the handler for this path and method if found
handle_request_method = False
require_auth = True
# Check every handler to find matching result
for t_method, t_path, t_handler, t_auth in self.server.paths:
# we either do string-comparison or regular expression matching
# pylint: disable=maybe-no-member
if isinstance(t_path, str):
path_match = url.path == t_path
else:
path_match = t_path.match(url.path)
if path_match and method == t_method:
# Call the method
handle_request_method = t_handler
require_auth = t_auth
break
elif path_match:
path_matched_but_not_method = True
# Did we find a handler for the incoming request?
if handle_request_method:
# For some calls we need a valid password
if require_auth and api_password != self.server.api_password:
self.write_json_message(
"API password missing or incorrect.", HTTP_UNAUTHORIZED)
else:
handle_request_method(self, path_match, data)
elif path_matched_but_not_method:
self.send_response(HTTP_METHOD_NOT_ALLOWED)
self.end_headers()
else:
self.send_response(HTTP_NOT_FOUND)
self.end_headers()
def do_HEAD(self): # pylint: disable=invalid-name
""" HEAD request handler. """
self._handle_request('HEAD')
def do_GET(self): # pylint: disable=invalid-name
""" GET request handler. """
self._handle_request('GET')
def do_POST(self): # pylint: disable=invalid-name
""" POST request handler. """
self._handle_request('POST')
def do_PUT(self): # pylint: disable=invalid-name
""" PUT request handler. """
self._handle_request('PUT')
def do_DELETE(self): # pylint: disable=invalid-name
""" DELETE request handler. """
self._handle_request('DELETE')
def write_json_message(self, message, status_code=HTTP_OK):
""" Helper method to return a message to the caller. """
self.write_json({'message': message}, status_code=status_code)
def write_json(self, data=None, status_code=HTTP_OK, location=None):
""" Helper method to return JSON to the caller. """
self.send_response(status_code)
self.send_header(HTTP_HEADER_CONTENT_TYPE, CONTENT_TYPE_JSON)
if location:
self.send_header('Location', location)
self.end_headers()
if data is not None:
self.wfile.write(
json.dumps(data, indent=4, sort_keys=True,
cls=rem.JSONEncoder).encode("UTF-8"))
def write_file(self, path):
""" Returns a file to the user. """
try:
with open(path, 'rb') as inp:
self.write_file_pointer(self.guess_type(path), inp)
except IOError:
self.send_response(HTTP_NOT_FOUND)
self.end_headers()
_LOGGER.exception("Unable to serve %s", path)
def write_file_pointer(self, content_type, inp):
"""
Helper function to write a file pointer to the user.
Does not do error handling.
"""
do_gzip = 'gzip' in self.headers.get(HTTP_HEADER_ACCEPT_ENCODING, '')
self.send_response(HTTP_OK)
self.send_header(HTTP_HEADER_CONTENT_TYPE, content_type)
self.set_cache_header()
if do_gzip:
gzip_data = gzip.compress(inp.read())
self.send_header(HTTP_HEADER_CONTENT_ENCODING, "gzip")
self.send_header(HTTP_HEADER_VARY, HTTP_HEADER_ACCEPT_ENCODING)
self.send_header(HTTP_HEADER_CONTENT_LENGTH, str(len(gzip_data)))
else:
fst = os.fstat(inp.fileno())
self.send_header(HTTP_HEADER_CONTENT_LENGTH, str(fst[6]))
self.end_headers()
if self.command == 'HEAD':
return
elif do_gzip:
self.wfile.write(gzip_data)
else:
self.copyfile(inp, self.wfile)
def set_cache_header(self):
""" Add cache headers if not in development """
if not self.server.development:
# 1 year in seconds
cache_time = 365 * 86400
self.send_header(
HTTP_HEADER_CACHE_CONTROL,
"public, max-age={}".format(cache_time))
self.send_header(
HTTP_HEADER_EXPIRES,
self.date_time_string(time.time()+cache_time))
| {
"content_hash": "2a8114ec35f23039aa1155ba464881b8",
"timestamp": "",
"source": "github",
"line_count": 379,
"max_line_length": 79,
"avg_line_length": 31.569920844327175,
"alnum_prop": 0.6211450062682825,
"repo_name": "hemantsangwan/Python-home-assistant",
"id": "5528267686f6894d23346f929bd38cd94d493bb1",
"size": "11965",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "homeassistant/components/http.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "708583"
},
{
"name": "Python",
"bytes": "413732"
},
{
"name": "Shell",
"bytes": "3984"
}
],
"symlink_target": ""
} |
from riskmetrics import riskmetrics
def test_construct_cpe():
cpe = riskmetrics.construct_cpe("vendor", "product", "0.1")
assert cpe == 'cpe:/a:vendor:product:0.1'
def test_get_latest_version():
version = riskmetrics.get_latest_version("35g3q", "fq34gf")
assert version == ""
| {
"content_hash": "63bf62ce54fa7ead642c8e5145364aa6",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 63,
"avg_line_length": 26.90909090909091,
"alnum_prop": 0.6790540540540541,
"repo_name": "hacksmath/csci4900",
"id": "1823e888d917d7b778fae9619573943226f23d41",
"size": "328",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "riskmetrics/test/test_riskmetrics.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
from branch_utility import BranchUtility
from compiled_file_system import CompiledFileSystem
from environment import IsDevServer
from github_file_system_provider import GithubFileSystemProvider
from host_file_system_provider import HostFileSystemProvider
from third_party.json_schema_compiler.memoize import memoize
from render_servlet import RenderServlet
from object_store_creator import ObjectStoreCreator
from server_instance import ServerInstance
from gcs_file_system_provider import CloudStorageFileSystemProvider
class InstanceServletRenderServletDelegate(RenderServlet.Delegate):
'''AppEngine instances should never need to call out to SVN. That should only
ever be done by the cronjobs, which then write the result into DataStore,
which is as far as instances look. To enable this, crons can pass a custom
(presumably online) ServerInstance into Get().
Why? SVN is slow and a bit flaky. Cronjobs failing is annoying but temporary.
Instances failing affects users, and is really bad.
Anyway - to enforce this, we actually don't give instances access to SVN. If
anything is missing from datastore, it'll be a 404. If the cronjobs don't
manage to catch everything - uhoh. On the other hand, we'll figure it out
pretty soon, and it also means that legitimate 404s are caught before a round
trip to SVN.
'''
def __init__(self, delegate):
self._delegate = delegate
@memoize
def CreateServerInstance(self):
object_store_creator = ObjectStoreCreator(start_empty=False)
branch_utility = self._delegate.CreateBranchUtility(object_store_creator)
# In production have offline=True so that we can catch cron errors. In
# development it's annoying to have to run the cron job, so offline=False.
#
# XXX(kalman): The above comment is not true, I have temporarily disabled
# this while the cron is running out of memory and not reliably finishing.
# In the meantime, live dangerously and fetch content if it's not there.
# I.e. never offline. See http://crbug.com/345361.
host_file_system_provider = self._delegate.CreateHostFileSystemProvider(
object_store_creator,
offline=False) # XXX(kalman): condition should be "not IsDevServer()"
github_file_system_provider = self._delegate.CreateGithubFileSystemProvider(
object_store_creator)
return ServerInstance(object_store_creator,
CompiledFileSystem.Factory(object_store_creator),
branch_utility,
host_file_system_provider,
github_file_system_provider,
CloudStorageFileSystemProvider(object_store_creator))
class InstanceServlet(object):
'''Servlet for running on normal AppEngine instances.
Create this via GetConstructor() so that cache state can be shared amongst
them via the memoizing Delegate.
'''
class Delegate(object):
'''Allow runtime dependencies to be overriden for testing.
'''
def CreateBranchUtility(self, object_store_creator):
return BranchUtility.Create(object_store_creator)
def CreateHostFileSystemProvider(self, object_store_creator, **optargs):
return HostFileSystemProvider(object_store_creator, **optargs)
def CreateGithubFileSystemProvider(self, object_store_creator):
return GithubFileSystemProvider(object_store_creator)
@staticmethod
def GetConstructor(delegate_for_test=None):
render_servlet_delegate = InstanceServletRenderServletDelegate(
delegate_for_test or InstanceServlet.Delegate())
return lambda request: RenderServlet(request, render_servlet_delegate)
# NOTE: if this were a real Servlet it would implement a Get() method, but
# GetConstructor returns an appropriate lambda function (Request -> Servlet).
| {
"content_hash": "56cfaacbd351963a3c323a09c6bd3c00",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 80,
"avg_line_length": 49.4025974025974,
"alnum_prop": 0.7465825446898002,
"repo_name": "patrickm/chromium.src",
"id": "a4080497185719152cf90e5f2b199d59aebbaca5",
"size": "3967",
"binary": false,
"copies": "3",
"ref": "refs/heads/nw",
"path": "chrome/common/extensions/docs/server2/instance_servlet.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "853"
},
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "52960"
},
{
"name": "Awk",
"bytes": "8660"
},
{
"name": "C",
"bytes": "40737238"
},
{
"name": "C#",
"bytes": "1132"
},
{
"name": "C++",
"bytes": "207930633"
},
{
"name": "CSS",
"bytes": "939170"
},
{
"name": "Java",
"bytes": "5844934"
},
{
"name": "JavaScript",
"bytes": "17837835"
},
{
"name": "Mercury",
"bytes": "10533"
},
{
"name": "Objective-C",
"bytes": "886228"
},
{
"name": "Objective-C++",
"bytes": "6667789"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "Perl",
"bytes": "672770"
},
{
"name": "Python",
"bytes": "10857933"
},
{
"name": "Rebol",
"bytes": "262"
},
{
"name": "Shell",
"bytes": "1326032"
},
{
"name": "Tcl",
"bytes": "277091"
},
{
"name": "XSLT",
"bytes": "13493"
},
{
"name": "nesC",
"bytes": "15206"
}
],
"symlink_target": ""
} |
import sys
import getpass
import re
import os
import glob
import traceback
import subprocess
import datetime
import tempfile
import shutil
import upgrade_util as uu
from utilities import _write_to_file
from utilities import error_
from utilities import get_dbver
from utilities import get_db_madlib_version
from utilities import get_rev_num
from utilities import info_
from utilities import is_rev_gte
from utilities import remove_comments_from_sql
from utilities import run_query
# Required Python version
py_min_ver = [2, 6]
# Check python version
if sys.version_info[:2] < py_min_ver:
print("ERROR: python version too old ({0}). You need {1} or greater.".
format('.'.join(map(str, sys.version_info[:3])),
'.'.join(map(str, py_min_ver))))
exit(1)
# Find MADlib root directory. This file is installed to
# $MADLIB_ROOT/madpack/madpack.py, so to get $MADLIB_ROOT we need to go
# two levels up in the directory hierarchy. We use (a) os.path.realpath and
# (b) __file__ (instead of sys.argv[0]) because madpack.py could be called
# (a) through a symbolic link and (b) not as the main module.
maddir = os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/..") # MADlib root dir
sys.path.append(maddir + "/madpack")
# Import MADlib python modules
import argparse
import configyml
# Some read-only variables
this = os.path.basename(sys.argv[0]) # name of this script
# Default directories
maddir_conf = maddir + "/config" # Config dir
maddir_lib = maddir + "/lib/libmadlib.so" # C/C++ libraries
# Read the config files
ports = configyml.get_ports(maddir_conf) # object made of Ports.yml
new_madlib_ver = configyml.get_version(maddir_conf) # MADlib OS-level version
portid_list = []
for port in ports:
portid_list.append(port)
SUPPORTED_PORTS = ('postgres', 'greenplum')
# Global variables
portid = None # Target port ID (eg: pg90, gp40)
dbver = None # DB version
con_args = {} # DB connection arguments
verbose = None # Verbose flag
keeplogs = None
tmpdir = None
DB_CREATE_OBJECTS = "db_create_objects"
INSTALL_DEV_CHECK = "install_dev_check"
UNIT_TEST = "unit_test"
def _make_dir(dir):
"""
# Create a temp dir
# @param dir temp directory path
"""
if not os.path.isdir(dir):
try:
os.makedirs(dir)
except:
print "ERROR: can not create directory: %s. Check permissions." % dir
exit(1)
# ------------------------------------------------------------------------------
def _internal_run_query(sql, show_error):
"""
Runs a SQL query on the target platform DB
using the default command-line utility.
Very limited:
- no text output with "new line" characters allowed
@param sql query text to execute
@param show_error displays the SQL error msg
"""
return run_query(sql, con_args, show_error)
# ------------------------------------------------------------------------------
def _get_relative_maddir(maddir, port):
""" Return a relative path version of maddir
GPDB installations have a symlink outside of GPHOME that
links to the current GPHOME. After a DB upgrade, this symlink is updated to
the new GPHOME.
'maddir_lib', which uses the absolute path of GPHOME, is hardcoded into each
madlib function definition. Replacing the GPHOME path with the equivalent
relative path makes it simpler to perform DB upgrades without breaking MADlib.
"""
if port == 'postgres':
# do nothing for postgres
return maddir
# e.g. maddir_lib = $GPHOME/madlib/Versions/1.9/lib/libmadlib.so
# 'madlib' is supposed to be in this path, which is the default folder
# used by GPPKG to install madlib
try:
abs_gphome, tail = maddir.split('madlib/')
except ValueError:
return maddir
# Check outside $GPHOME if there is a symlink to this absolute path
# os.pardir is equivalent to ..
# os.path.normpath removes the extraneous .. from that path
rel_gphome = os.path.normpath(os.path.join(abs_gphome, os.pardir, 'greenplum-db'))
if (os.path.islink(rel_gphome) and
os.path.realpath(rel_gphome) == os.path.realpath(abs_gphome)):
# if the relative link exists and is pointing to current location
return os.path.join(rel_gphome, 'madlib', tail)
else:
return maddir
# ------------------------------------------------------------------------------
def _cleanup_comments_in_sqlfile(output_filename, upgrade):
"""
@brief Remove comments in the sql script, and re-write the file with the
cleaned up script.
"""
if not upgrade:
with open(output_filename, 'r+') as output_filehandle:
full_sql = output_filehandle.read()
full_sql = remove_comments_from_sql(full_sql)
# Re-write the cleaned-up sql to a new file. Python does not let us
# erase all the content of a file and rewrite the same file again.
cleaned_output_filename = output_filename+'.tmp'
with open(cleaned_output_filename, 'w') as output_filehandle:
_write_to_file(output_filehandle, full_sql)
# Move the cleaned output file to the old one.
os.rename(cleaned_output_filename, output_filename)
def _run_m4_and_append(schema, maddir_mod_py, module, sqlfile,
output_filehandle, pre_sql=None):
"""
Function to process a sql file with M4.
"""
# Check if the SQL file exists
if not os.path.isfile(sqlfile):
error_(this, "Missing module SQL file (%s)" % sqlfile, False)
raise ValueError
# Prepare the file using M4
try:
# Add the before SQL
if pre_sql:
output_filehandle.writelines([pre_sql, '\n\n'])
# Find the madpack dir (platform specific or generic)
if os.path.isdir(maddir + "/ports/" + portid + "/" + dbver + "/madpack"):
maddir_madpack = maddir + "/ports/" + portid + "/" + dbver + "/madpack"
else:
maddir_madpack = maddir + "/madpack"
maddir_ext_py = maddir + "/lib/python"
m4args = ['m4',
'-P',
'-DMADLIB_SCHEMA=' + schema,
'-DPLPYTHON_LIBDIR=' + maddir_mod_py,
'-DEXT_PYTHON_LIBDIR=' + maddir_ext_py,
'-DMODULE_PATHNAME=' + maddir_lib,
'-DMODULE_NAME=' + module,
'-I' + maddir_madpack,
sqlfile]
info_(this, "> ... parsing: " + " ".join(m4args), verbose)
output_filehandle.flush()
subprocess.call(m4args, stdout=output_filehandle)
except:
error_(this, "Failed executing m4 on %s" % sqlfile, False)
raise Exception
def _run_install_check_sql(schema, maddir_mod_py, module, sqlfile,
tmpfile, logfile, pre_sql):
"""
Run SQL file
@param schema name of the target schema
@param maddir_mod_py name of the module dir with Python code
@param module name of the module
@param sqlfile name of the file to parse
@param tmpfile name of the temp file to run
@param logfile name of the log file (stdout)
@param pre_sql optional SQL to run before executing the file
"""
try:
f = open(tmpfile, 'w')
_run_m4_and_append(schema, maddir_mod_py, module, sqlfile, f, pre_sql)
f.close()
except:
error_(this, "Failed to temp m4 processed file %s." % tmpfile, False)
raise Exception
# Only update function definition
sub_module = ''
# Run the SQL using DB command-line utility
if portid in SUPPORTED_PORTS:
sqlcmd = 'psql'
# Test the DB cmd line utility
std, err = subprocess.Popen(['which', sqlcmd], stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
if not std:
error_(this, "Command not found: %s" % sqlcmd, True)
runcmd = [sqlcmd, '-a',
'-v', 'ON_ERROR_STOP=1',
'-h', con_args['host'].split(':')[0],
'-p', con_args['host'].split(':')[1],
'-d', con_args['database'],
'-U', con_args['user'],
'--no-password',
'-f', tmpfile]
runenv = os.environ
if 'password' in con_args:
runenv["PGPASSWORD"] = con_args['password']
runenv["PGOPTIONS"] = '-c client_min_messages=notice'
# Open log file
try:
log = open(logfile, 'w')
except:
error_(this, "Cannot create log file: %s" % logfile, False)
raise Exception
# Run the SQL
try:
info_(this, "> ... executing " + tmpfile, verbose)
retval = subprocess.call(runcmd, env=runenv, stdout=log, stderr=log)
except:
error_(this, "Failed executing %s" % tmpfile, False)
raise Exception
finally:
log.close()
return retval
# ------------------------------------------------------------------------------
def _run_sql_file(schema, sqlfile):
"""
Run SQL file
@param schema name of the target schema
@param sqlfile name of the file to parse
"""
# Run the SQL using DB command-line utility
if portid in SUPPORTED_PORTS:
sqlcmd = 'psql'
# Test the DB cmd line utility
std, err = subprocess.Popen(['which', sqlcmd], stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
if not std:
error_(this, "Command not found: %s" % sqlcmd, True)
runcmd = [sqlcmd, '-a',
'-v', 'ON_ERROR_STOP=1',
'-h', con_args['host'].split(':')[0],
'-p', con_args['host'].split(':')[1],
'-d', con_args['database'],
'-U', con_args['user'],
'--no-password',
'--single-transaction',
'-f', sqlfile]
runenv = os.environ
if 'password' in con_args:
runenv["PGPASSWORD"] = con_args['password']
runenv["PGOPTIONS"] = '-c client_min_messages=notice'
# Open log file
logfile = sqlfile + '.log'
try:
log = open(logfile, 'w')
except:
error_(this, "Cannot create log file: %s" % logfile, False)
raise Exception
# Run the SQL
try:
info_(this, "> ... executing " + sqlfile, verbose)
info_(this, ' '.join(runcmd), verbose)
retval = subprocess.call(runcmd, env=runenv, stdout=log, stderr=log)
except:
error_(this, "Failed executing %s" % sqlfile, False)
raise Exception
finally:
log.close()
# Check the exit status
result = _parse_result_logfile(retval, logfile, sqlfile)
return result
# ------------------------------------------------------------------------------
def _parse_result_logfile(retval, logfile, sql_abspath,
sql_filename=None, module=None, milliseconds=None):
"""
Function to parse the logfile and return if its content indicate a failure
or success.
"""
is_install_check_logfile = bool(sql_filename and module)
# Check the exit status
if retval != 0:
result = 'FAIL'
global keeplogs
keeplogs = True
# Since every single statement in the test file gets logged,
# an empty log file indicates an empty or a failed test
elif os.path.isfile(logfile) and os.path.getsize(logfile) > 0:
result = 'PASS'
# Otherwise
else:
result = 'ERROR'
if is_install_check_logfile:
# Output result
print "TEST CASE RESULT|Module: " + module + \
"|" + os.path.basename(sql_filename) + "|" + result + \
"|Time: %d milliseconds" % (milliseconds)
if result == 'FAIL':
error_(this, "Failed executing %s" % sql_abspath, stop=False)
error_(this, "Check the log at %s" % logfile, stop=False)
return result
def _check_db_port(portid):
"""
Make sure we are connected to the expected DB platform
@param portid expected DB port id - to be validates
"""
# Postgres
try:
row = _internal_run_query("SELECT version() AS version", True)
except:
error_(this, "Cannot validate DB platform type", True)
if row and row[0]['version'].lower().find(portid) >= 0:
if portid == 'postgres':
if row[0]['version'].lower().find('greenplum') < 0:
return True
elif portid == 'greenplum':
return True
return False
# ------------------------------------------------------------------------------
def _print_vers(new_madlib_ver, db_madlib_ver, con_args, schema):
"""
Print version information
@param new_madlib_ver OS-level MADlib version
@param db_madlib_ver DB-level MADlib version
@param con_args database connection arguments
@param schema MADlib schema name
"""
info_(this, "MADlib tools version = %s (%s)" % (str(new_madlib_ver), sys.argv[0]), True)
if con_args:
try:
info_(this, "MADlib database version = %s (host=%s, db=%s, schema=%s)"
% (db_madlib_ver, con_args['host'], con_args['database'], schema), True)
except:
info_(this, "MADlib database version = [Unknown] (host=%s, db=%s, schema=%s)"
% (db_madlib_ver, con_args['host'], con_args['database'], schema), True)
return
# ------------------------------------------------------------------------------
def _plpy_check(py_min_ver):
"""
Check pl/python existence and version
@param py_min_ver min Python version to run MADlib
"""
info_(this, "Testing PL/Python environment...", True)
# Check PL/Python existence
rv = _internal_run_query("SELECT count(*) AS CNT FROM pg_language "
"WHERE lanname = 'plpythonu'", True)
if int(rv[0]['cnt']) > 0:
info_(this, "> PL/Python already installed", verbose)
else:
info_(this, "> PL/Python not installed", verbose)
info_(this, "> Creating language PL/Python...", True)
try:
_internal_run_query("CREATE LANGUAGE plpythonu;", True)
except:
error_(this, """Cannot create language plpythonu. Please check if you
have configured and installed portid (your platform) with
`--with-python` option. Stopping installation...""", False)
raise Exception
# Check PL/Python version
_internal_run_query("DROP FUNCTION IF EXISTS plpy_version_for_madlib();", False)
_internal_run_query("""
CREATE OR REPLACE FUNCTION plpy_version_for_madlib()
RETURNS TEXT AS
$$
import sys
# return '.'.join(str(item) for item in sys.version_info[:3])
return str(sys.version_info[:3]).replace(',','.').replace(' ','').replace(')','').replace('(','')
$$
LANGUAGE plpythonu;
""", True)
rv = _internal_run_query("SELECT plpy_version_for_madlib() AS ver;", True)
python = rv[0]['ver']
py_cur_ver = [int(i) for i in python.split('.')]
if py_cur_ver >= py_min_ver:
info_(this, "> PL/Python version: %s" % python, verbose)
else:
error_(this, "PL/Python version too old: %s. You need %s or greater"
% (python, '.'.join(str(i) for i in py_min_ver)), False)
raise Exception
_internal_run_query("DROP FUNCTION IF EXISTS plpy_version_for_madlib();", False)
info_(this, "> PL/Python environment OK (version: %s)" % python, True)
# ------------------------------------------------------------------------------
def _db_install(schema, is_schema_in_db, filehandle):
"""
Install MADlib
@param schema MADlib schema name
@param is_schema_in_db flag to indicate if schema is already present
@param filehandle file that contains the sql for installation
@param testcase command-line args for a subset of modules
"""
# Create MADlib objects
try:
_db_create_schema(schema, is_schema_in_db, filehandle)
_db_create_objects(schema, filehandle)
except:
error_(this, "Building database objects failed. "
"Before retrying: drop %s schema OR install MADlib into "
"a different schema." % schema, True)
# ------------------------------------------------------------------------------
def _db_upgrade(schema, filehandle, db_madlib_ver):
"""
Upgrade MADlib
@param schema MADlib schema name
@param filehandle Handle to output file
@param db_madlib_ver DB-level MADlib version
"""
if is_rev_gte(get_rev_num(db_madlib_ver), get_rev_num(new_madlib_ver)):
info_(this, "Current MADlib version already up to date.", True)
return 1
if is_rev_gte(get_rev_num('1.9.1'), get_rev_num(db_madlib_ver)):
error_(this, """
MADlib versions prior to v1.10 are not supported for upgrade.
Please try upgrading to v1.10 and then upgrade to this version.
""", True)
return 1
info_(this, "Upgrading MADlib into %s schema..." % schema, True)
info_(this, "\tDetecting dependencies...", True)
info_(this, "\tLoading change list...", True)
ch = uu.ChangeHandler(schema, portid, con_args, maddir, db_madlib_ver, filehandle)
info_(this, "\tDetecting table dependencies...", True)
td = uu.TableDependency(schema, portid, con_args)
info_(this, "\tDetecting view dependencies...", True)
vd = uu.ViewDependency(schema, portid, con_args)
abort = False
if td.has_dependency():
info_(this, "*" * 50, True)
info_(this, "\tFollowing user tables/indexes are dependent on MADlib objects:", True)
info_(this, td.get_dependency_str(), True)
info_(this, "*" * 50, True)
cd_udt = [udt for udt in td.get_depended_udt() if udt in ch.udt]
if len(cd_udt) > 0:
error_(this, """
User has objects dependent on following updated MADlib types!
{0}
These objects need to be dropped before upgrading.
""".format('\n\t\t\t'.join(cd_udt)), False)
# we add special handling for 'linregr_result'
if 'linregr_result' in cd_udt:
info_(this, """Dependency on 'linregr_result' could be due to objects
created from the output of the aggregate 'linregr'.
Please refer to the Linear Regression documentation
<http://madlib.apache.org/docs/latest/group__grp__linreg.html#warning>
for the recommended solution.
""", False)
abort = True
c_udoc = ch.get_udoc_oids()
d_udoc = td.get_depended_udoc_oids()
cd_udoc = [udoc for udoc in d_udoc if udoc in c_udoc]
if len(cd_udoc) > 0:
error_(this, """
User has objects dependent on the following updated MADlib operator classes!
oid={0}
These objects need to be dropped before upgrading.
""".format('\n\t\t\t'.join(cd_udoc)), False)
abort = True
if vd.has_dependency():
info_(this, "*" * 50, True)
info_(this, "\tFollowing user views are dependent on MADlib objects:", True)
info_(this, vd.get_dependency_graph_str(), True)
info_(this, "*" * 50, True)
c_udf = ch.get_udf_signature()
d_udf = vd.get_depended_func_signature('UDF')
cd_udf = [udf for udf in d_udf if udf in c_udf]
if len(cd_udf) > 0:
error_(this, """
User has objects dependent on following updated MADlib functions!
{0}
These objects will fail to work with the updated functions and
need to be dropped before starting upgrade again.
""".format('\n\t\t\t\t\t'.join(cd_udf)), False)
abort = True
c_uda = ch.get_uda_signature()
d_uda = vd.get_depended_func_signature('UDA')
cd_uda = [uda for uda in d_uda if uda in c_uda]
if len(cd_uda) > 0:
error_(this, """
User has objects dependent on following updated MADlib aggregates!
{0}
These objects will fail to work with the new aggregates and
need to be dropped before starting upgrade again.
""".format('\n\t\t\t\t\t'.join(cd_uda)), False)
abort = True
c_udo = ch.get_udo_oids()
d_udo = vd.get_depended_opr_oids()
cd_udo = [udo for udo in d_udo if udo in c_udo]
if len(cd_udo) > 0:
error_(this, """
User has objects dependent on following updated MADlib operators!
oid={0}
These objects will fail to work with the new operators and
need to be dropped before starting upgrade again.
""".format('\n\t\t\t\t\t'.join(cd_udo)), False)
abort = True
if abort:
error_(this, """------- Upgrade aborted. -------
Backup and drop all objects that depend on MADlib before trying upgrade again.
Use madpack reinstall to automatically drop these objects only if appropriate.""", True)
else:
info_(this, "No dependency problem found, continuing to upgrade ...", True)
info_(this, "\tReading existing UDAs/UDTs...", False)
sc = uu.ScriptCleaner(schema, portid, con_args, ch)
info_(this, "Script Cleaner initialized ...", False)
ch.drop_changed_uda()
ch.drop_changed_udoc()
ch.drop_changed_udo()
ch.drop_changed_udc()
ch.drop_changed_udf()
ch.drop_changed_udt() # assume dependent udf for udt does not change
ch.drop_traininginfo_4dt() # used types: oid, text, integer, float
_db_create_objects(schema, filehandle, True, sc)
return 0
# ------------------------------------------------------------------------------
def _db_rename_schema(from_schema, to_schema):
"""
Rename schema
@param from_schema name of the schema to rename
@param to_schema new name for the schema
"""
info_(this, "> Renaming schema %s to %s" % (from_schema, to_schema), True)
try:
_internal_run_query("ALTER SCHEMA %s RENAME TO %s;" % (from_schema, to_schema), True)
except:
error_(this, 'Cannot rename schema. Stopping installation...', False)
raise Exception
# ------------------------------------------------------------------------------
def _db_create_schema(schema, is_schema_in_db, filehandle):
"""
Create schema
@param from_schema name of the schema to rename
@param is_schema_in_db flag to indicate if schema is already present
@param to_schema new name for the schema
"""
if not is_schema_in_db:
_write_to_file(filehandle, "CREATE SCHEMA %s;" % schema)
# ------------------------------------------------------------------------------
def _process_py_sql_files_in_modules(modset, args_dict):
"""
This function executes relevant files from all applicable modules
(either all modules, or specific modules specified as a comma
separated list).
* If the operation is install/dev check, then all the corresponding sql
files are executed.
* If the operation is unit-test, then all corresponding python files
are executed.
* If the operation was from _db_create_objects(), then all the relevant
objects are written to files for execution during install/reinstall/upgrade.
"""
if 'madpack_cmd' in args_dict:
madpack_cmd = args_dict['madpack_cmd']
else:
madpack_cmd = None
if not madpack_cmd:
calling_operation = DB_CREATE_OBJECTS
elif madpack_cmd in ['install-check', 'dev-check']:
calling_operation = INSTALL_DEV_CHECK
elif madpack_cmd == 'unit-test':
calling_operation = UNIT_TEST
else:
error_(this, "Invalid madpack operation: %s" % madpack_cmd, True)
# Perform operations on all modules
for moduleinfo in portspecs['modules']:
# Get the module name
module = moduleinfo['name']
# Skip if doesn't meet specified modules
if modset and module not in modset:
continue
# Find the Python module dir (platform specific or generic)
if os.path.isdir(maddir + "/ports/" + portid + "/" + dbver + "/modules/" + module):
maddir_mod_py = maddir + "/ports/" + portid + "/" + dbver + "/modules"
else:
maddir_mod_py = maddir + "/modules"
# Find the SQL module dir (platform specific or generic)
if os.path.isdir(maddir + "/ports/" + portid + "/modules/" + module):
maddir_mod_sql = maddir + "/ports/" + portid + "/modules"
elif os.path.isdir(maddir + "/modules/" + module):
maddir_mod_sql = maddir + "/modules"
else:
# This was a platform-specific module, for which no default exists.
# We can just skip this module.
continue
# Make a temp dir for log files
cur_tmpdir = tmpdir + "/" + module
_make_dir(cur_tmpdir)
if calling_operation == DB_CREATE_OBJECTS:
info_(this, "> - %s" % module, True)
mask = maddir_mod_sql + '/' + module + '/*.sql_in'
elif calling_operation == INSTALL_DEV_CHECK:
if madpack_cmd == 'install-check':
mask = maddir_mod_sql + '/' + module + '/test/*.ic.sql_in'
else:
mask = maddir_mod_sql + '/' + module + '/test/*.sql_in'
elif calling_operation == UNIT_TEST:
mask = maddir_mod_py + '/' + module + '/test/unit_tests/test_*.py'
else:
error_(this, "Something is wrong, shouldn't be here.", True)
# Loop through all SQL files for this module
source_files = glob.glob(mask)
source_files = [s for s in source_files if '.setup' not in s]
if calling_operation == INSTALL_DEV_CHECK and madpack_cmd != 'install-check':
source_files = [s for s in source_files if '.ic' not in s]
# Do this error check only when running install/reinstall/upgrade
if calling_operation == DB_CREATE_OBJECTS and not source_files:
error_(this, "No files found in: %s" % mask, True)
# Execute all SQL/py files for the module
for src_file in source_files:
algoname = os.path.basename(src_file).split('.')[0]
# run only algo specified
if (modset and modset[module] and
algoname not in modset[module]):
continue
if calling_operation == DB_CREATE_OBJECTS:
_execute_per_module_db_create_obj_algo(
args_dict['schema'],
maddir_mod_py,
module,
src_file,
algoname,
cur_tmpdir,
args_dict['upgrade'],
args_dict['create_obj_handle'],
args_dict['sc'])
elif calling_operation == INSTALL_DEV_CHECK:
_execute_per_module_install_dev_check_algo(
args_dict['schema'],
args_dict['test_user'],
maddir_mod_py,
module,
src_file,
cur_tmpdir)
elif calling_operation == UNIT_TEST:
_execute_per_module_unit_test_algo(
module,
src_file,
cur_tmpdir)
else:
error_(this, "Something is wrong, shouldn't be here: %s" % src_file, True)
if calling_operation == DB_CREATE_OBJECTS:
shutil.rmtree(cur_tmpdir)
# ------------------------------------------------------------------------------
def _execute_per_module_db_create_obj_algo(schema, maddir_mod_py, module,
sqlfile, algoname, cur_tmpdir,
upgrade, create_obj_handle, sc):
"""
Perform operations that have to be done per module when
_db_create_objects function is invoked
"""
if not upgrade:
_run_m4_and_append(schema, maddir_mod_py, module, sqlfile,
create_obj_handle, None)
else:
tmpfile = cur_tmpdir + '/' + os.path.basename(sqlfile) + '.tmp'
with open(tmpfile, 'w+') as tmphandle:
_run_m4_and_append(schema, maddir_mod_py, module, sqlfile,
tmphandle, None)
processed_sql = sc.cleanup(open(tmpfile).read(), algoname)
_write_to_file(create_obj_handle, processed_sql)
# ------------------------------------------------------------------------------
def _execute_per_module_unit_test_algo(module, pyfile, cur_tmpdir):
"""
Perform opertions that have to be done per module when
unit tests are run
"""
logfile = cur_tmpdir + '/' + os.path.basename(pyfile) + '.log'
try:
log = open(logfile, 'w')
except:
error_(this, "Cannot create log file: %s" % logfile, False)
raise Exception
info_(this, "> ... executing " + pyfile, verbose)
try:
milliseconds = 0
run_start = datetime.datetime.now()
# Run the python unit test file
runcmd = ["python", pyfile]
runenv = os.environ
retval = subprocess.call(runcmd, env=runenv, stdout=log, stderr=log)
run_end = datetime.datetime.now()
milliseconds = round((run_end - run_start).seconds * 1000 +
(run_end - run_start).microseconds / 1000)
except:
error_(this, "Failed executing %s" % pyfile, False)
raise Exception
finally:
log.close()
_parse_result_logfile(retval, logfile, pyfile,
pyfile, module, milliseconds)
# ------------------------------------------------------------------------------
def _execute_per_module_install_dev_check_algo(schema, test_user,
maddir_mod_py, module,
sqlfile, cur_tmpdir):
"""
Perform opertions that have to be done per module when
install-check or dev-check is run
"""
try:
# Prepare test schema
test_schema = "madlib_installcheck_%s" % (module)
_internal_run_query("DROP SCHEMA IF EXISTS %s CASCADE; CREATE SCHEMA %s;" %
(test_schema, test_schema), True)
_internal_run_query("GRANT ALL ON SCHEMA %s TO %s;" %
(test_schema, test_user), True)
# Switch to test user and prepare the search_path
pre_sql = '-- Switch to test user:\n' \
'SET ROLE %s;\n' \
'-- Set SEARCH_PATH for install-check:\n' \
'SET search_path=%s,%s;\n' \
% (test_user, test_schema, schema)
# Set file names
tmpfile = cur_tmpdir + '/' + os.path.basename(sqlfile) + '.tmp'
logfile = cur_tmpdir + '/' + os.path.basename(sqlfile) + '.log'
# If there is no problem with the SQL file
milliseconds = 0
# Run the SQL
run_start = datetime.datetime.now()
retval = _run_install_check_sql(schema, maddir_mod_py,
module, sqlfile, tmpfile,
logfile, pre_sql)
# Runtime evaluation
run_end = datetime.datetime.now()
milliseconds = round((run_end - run_start).seconds * 1000 +
(run_end - run_start).microseconds / 1000)
# Check the exit status
result = _parse_result_logfile(retval, logfile, tmpfile, sqlfile,
module, milliseconds)
finally:
# Cleanup test schema for the module
_internal_run_query("DROP SCHEMA IF EXISTS %s CASCADE;" % (test_schema), True)
# ------------------------------------------------------------------------------
def _db_create_objects(schema, create_obj_handle, upgrade=False, sc=None):
"""
Create MADlib DB objects in the schema
@param schema Name of the target schema
@param create_obj_handle file handle for sql output file
@param upgrade flag to indicate if it's an upgrade operation or not
@param sc ScriptCleaner object
"""
if not upgrade:
# Create MigrationHistory table
try:
_write_to_file(create_obj_handle,
"DROP TABLE IF EXISTS %s.migrationhistory;" % schema)
sql = """CREATE TABLE %s.migrationhistory
(id serial, version varchar(255),
applied timestamp default current_timestamp);
""" % schema
_write_to_file(create_obj_handle, sql)
except:
error_(this, "Cannot create MigrationHistory table", False)
raise Exception
# Stamp the DB installation
try:
_write_to_file(create_obj_handle,
"""INSERT INTO %s.migrationhistory(version)
VALUES('%s');
""" % (schema, str(new_madlib_ver)))
except:
error_(this, "Cannot insert data into %s.migrationhistory table" % schema, False)
raise Exception
# Run migration SQLs
info_(this, "> Preparing objects for the following modules:", True)
# We always create objects for all modules during install/reinstall/upgrade
modset = {}
_process_py_sql_files_in_modules(modset, locals())
# ------------------------------------------------------------------------------
def unescape(string):
"""
Unescape separation characters in connection strings, i.e., remove first
backslash from "\/", "\@", "\:", and "\\".
"""
if string is None:
return None
else:
return re.sub(r'\\(?P<char>[/@:\\])', '\g<char>', string)
# ------------------------------------------------------------------------------
def parseConnectionStr(connectionStr):
"""
@brief Parse connection strings of the form
<tt>[username[/password]@][hostname][:port][/database]</tt>
Separation characters (/@:) and the backslash (\) need to be escaped.
@returns A tuple (username, password, hostname, port, database). Field not
specified will be None.
"""
match = re.search(
r'((?P<user>([^/@:\\]|\\/|\\@|\\:|\\\\)+)' +
r'(/(?P<password>([^/@:\\]|\\/|\\@|\\:|\\\\)*))?@)?' +
r'(?P<host>([^/@:\\]|\\/|\\@|\\:|\\\\)+)?' +
r'(:(?P<port>[0-9]+))?' +
r'(/(?P<database>([^/@:\\]|\\/|\\@|\\:|\\\\)+))?', connectionStr)
return (
unescape(match.group('user')),
unescape(match.group('password')),
unescape(match.group('host')),
match.group('port'),
unescape(match.group('database')))
# ------------------------------------------------------------------------------
def parse_arguments():
parser = argparse.ArgumentParser(
prog="madpack",
description='MADlib package manager (' + str(new_madlib_ver) + ')',
argument_default=False,
formatter_class=argparse.RawTextHelpFormatter,
epilog="""Example:
$ madpack install -s madlib -p greenplum -c gpadmin@mdw:5432/testdb
This will install MADlib objects into a Greenplum database called TESTDB
running on server MDW:5432. Installer will try to login as GPADMIN
and will prompt for password. The target schema will be MADLIB.
$ madpack dev-check
This will run dev-check on all the installed modules in MADlib. Another
similar, but light-weight check, is called install-check.
$ madpack unit-test -t convex,recursive_partitioning/decision_tree
This will run all the unit tests that are defined in the convex module, and
for decision trees in the recursive partitioning module.
The -t option runs tests only for required modules, and can be used similarly
for install-check, dev-check and unit-test.
""")
help_msg = """One of the following options:
install : load MADlib into DB
upgrade : upgrade MADlib
uninstall : uninstall MADlib from DB
reinstall : perform uninstall and install
version : compare and print MADlib version (binaries vs database objects)
install-check : quick test of installed modules
dev-check : more detailed test of installed modules
unit-test : unit tests of installed modules
"""
choice_list = ['install', 'update', 'upgrade', 'uninstall',
'reinstall', 'version', 'install-check',
'dev-check', 'unit-test']
parser.add_argument('command', metavar='COMMAND', nargs=1,
choices=choice_list, help=help_msg)
parser.add_argument(
'-c', '--conn', metavar='CONNSTR', nargs=1, dest='connstr', default=None,
help="""Connection string of the following syntax:
[user[/password]@][host][:port][/database]
If not provided default values will be derived for PostgreSQL and Greenplum:
- user: PGUSER or USER env variable or OS username
- pass: PGPASSWORD env variable or runtime prompt
- host: PGHOST env variable or 'localhost'
- port: PGPORT env variable or '5432'
- db: PGDATABASE env variable or OS username""")
parser.add_argument('-s', '--schema', nargs=1, dest='schema',
metavar='SCHEMA', default='madlib',
help="Target schema for the database objects.")
parser.add_argument('-p', '--platform', nargs=1, dest='platform',
metavar='PLATFORM', choices=portid_list,
help="Target database platform, current choices: " + str(portid_list))
parser.add_argument('-v', '--verbose', dest='verbose',
action="store_true", help="Verbose mode.")
parser.add_argument('-l', '--keeplogs', dest='keeplogs', default=False,
action="store_true", help="Do not remove installation log files.")
parser.add_argument('-d', '--tmpdir', dest='tmpdir', default='/tmp/',
help="Temporary directory location for installation log files.")
parser.add_argument('-t', '--testcase', dest='testcase', default="",
help="Module names to test, comma separated. Applies to install-check, dev-check and unit-test.")
# Get the arguments
return parser.parse_args()
def _is_madlib_installation_valid_for_tests(schema, db_madlib_ver, test_type):
# Compare OS and DB versions. Continue if OS = DB.
if not db_madlib_ver:
info_(this, "MADlib is not installed in the schema {0}. {1} stopped.".format(schema, test_type.capitalize()), True)
return False
if get_rev_num(db_madlib_ver) != get_rev_num(new_madlib_ver):
_print_vers(new_madlib_ver, db_madlib_ver, con_args, schema)
info_(this, "Versions do not match. {0} stopped.".format(test_type.capitalize()), True)
return False
return True
def _get_modset_for_tests(testcase, filename_prefix=''):
# Get all module and algo names to run tests for, is specified as a comma
# separated list.
caseset = (set([test.strip() for test in testcase.split(',')])
if testcase else set())
modset = {}
for case in caseset:
if case.find('/') > -1:
[mod, algo] = case.split('/')
if mod not in modset:
modset[mod] = []
if algo not in modset[mod]:
modset[mod].append(filename_prefix+algo)
else:
modset[case] = []
return modset
def run_unit_tests(args, testcase):
"""
Run unit tests.
"""
if not _is_madlib_installation_valid_for_tests(args['schema'],
args['db_madlib_ver'],
'unit-tests'):
return
info_(this, "> Running unit-test scripts for:", verbose)
modset = _get_modset_for_tests(testcase, 'test_')
# Loop through all modules and run unit tests
_process_py_sql_files_in_modules(modset, {'madpack_cmd': 'unit-test'})
def run_install_check(args, testcase, madpack_cmd):
is_install_check = True if madpack_cmd == 'install-check' else False
schema = args['schema']
db_madlib_ver = args['db_madlib_ver']
if not _is_madlib_installation_valid_for_tests(schema, db_madlib_ver, madpack_cmd):
return
# Create install-check user
db_name = args["c_db"].replace('.', '').replace('-', '_')
test_user = ('madlib_' +
new_madlib_ver.replace('.', '').replace('-', '_') +
'_installcheck_' + db_name)
try:
_internal_run_query("DROP USER IF EXISTS %s;" % (test_user), False)
except Exception as e:
_internal_run_query("DROP OWNED BY %s CASCADE;" % (test_user), True)
_internal_run_query("DROP USER IF EXISTS %s;" % (test_user), True)
_internal_run_query("CREATE USER %s WITH SUPERUSER NOINHERIT;" % (test_user), True)
_internal_run_query("GRANT USAGE ON SCHEMA %s TO %s;" % (schema, test_user), True)
_internal_run_query("GRANT ALL PRIVILEGES ON DATABASE %s TO %s;" % (db_name, test_user), True)
# 2) Run test SQLs
info_(this, "> Running %s scripts for:" % madpack_cmd, verbose)
modset = _get_modset_for_tests(testcase)
# Loop through all modules
try:
modset = _get_modset_for_tests(testcase)
# Execute relevant sql files in each module for IC/DC
_process_py_sql_files_in_modules(modset, locals())
finally:
# Drop install-check user
_internal_run_query("REVOKE USAGE ON SCHEMA %s FROM %s;" % (schema, test_user), True)
try:
_internal_run_query("DROP OWNED BY %s CASCADE;" % (test_user), show_error=False)
except Exception as e:
# We've intermittently noticed a "cache lookup failure" due to this
# "DROP OWNED BY". This could be related to an error on
# Stack Exchange: https://dba.stackexchange.com/questions/173815/redshift-internalerror-cache-lookup-failed-for-relation
# Summary: Sometimes with too many drop statements the cache is
# out-of-sync and takes a few seconds to resync. Repeat the same
# command after a time gap.
from time import sleep
sleep(1)
_internal_run_query("DROP OWNED BY %s CASCADE;" % (test_user), show_error=True)
_internal_run_query("REVOKE ALL PRIVILEGES ON DATABASE %s FROM %s;" % (db_name, test_user), True)
_internal_run_query("DROP USER %s;" % (test_user), True)
def _append_uninstall_madlib_sqlfile(schema, db_madlib_ver, is_schema_in_db,
output_filehandle):
if get_rev_num(db_madlib_ver) == [0]:
info_(this,
"Nothing to uninstall or reinstall. "
"No version found in schema %s."% schema, True)
return 1, is_schema_in_db
# Find any potential data to lose
affected_objects = _internal_run_query("""
SELECT
n1.nspname AS schema,
relname AS relation,
attname AS column,
typname AS type
FROM
pg_attribute a,
pg_class c,
pg_type t,
pg_namespace n,
pg_namespace n1
WHERE
n.nspname = '%s'
AND t.typnamespace = n.oid
AND a.atttypid = t.oid
AND c.oid = a.attrelid
AND c.relnamespace = n1.oid
AND c.relkind = 'r'
ORDER BY
n1.nspname, relname, attname, typname""" % schema.lower(), True)
info_(this, "*** Uninstalling MADlib ***", True)
info_(this, "***********************************************************************************", True)
info_(this, "* Schema %s and all database objects depending on it will be dropped!" % schema, True)
if affected_objects:
info_(this, "* If you continue the following data will be lost (schema : table.column : type):", True)
for ao in affected_objects:
info_(this, '* - ' + ao['schema'] + ' : ' + ao['relation'] + '.' +
ao['column'] + ' : ' + ao['type'], True)
info_(this, "***********************************************************************************", True)
info_(this, "Would you like to continue? [Y/N]", True)
go = raw_input('>>> ').upper()
while (go not in ('Y', 'N', 'YES', 'NO')):
go = raw_input('Yes or No >>> ').upper()
# 2) Do the uninstall/drop
if go in ('N', 'NO'):
info_(this, 'No problem. Nothing dropped.', True)
return 1, is_schema_in_db
elif go in ('Y', 'YES'):
try:
_write_to_file(output_filehandle,
"DROP SCHEMA %s CASCADE;" % (schema))
is_schema_in_db = False
return 0, is_schema_in_db
except:
error_(this, "Cannot drop schema %s." % schema, True)
else:
return 1, is_schema_in_db
def _append_install_madlib_sqlfile(schema, db_madlib_ver, is_schema_in_db,
madpack_cmd, output_filehandle):
# Refresh MADlib version in DB, None for GP/PG
if madpack_cmd == 'reinstall':
info_(this, "Setting MADlib database version to be None for reinstall", verbose)
db_madlib_ver = None
info_(this, "*** Installing MADlib ***", True)
# 1) Compare OS and DB versions.
# noop if OS <= DB.
_print_vers(new_madlib_ver, db_madlib_ver, con_args, schema)
if db_madlib_ver is None:
# Case when there is no existing MADlib installation, proceed to create
# objects if nothing installed in DB
pass
elif is_rev_gte(get_rev_num(db_madlib_ver), get_rev_num(new_madlib_ver)):
# Case when existing MADlib version is the same/higher as the new installation.
info_(this, "Current MADlib version already up to date.", True)
return 1
else:
# Case when the existing MADlib installation is lower than the new
# installation. Error out and refer to upgrade if OS > DB
error_(this, """Aborting installation: existing MADlib version detected in {0} schema
To upgrade the {0} schema to MADlib v{1} please run the following command:
madpack upgrade -s {0} -p {2} [-c ...]
""".format(schema, new_madlib_ver, portid), True)
# 2) Run installation
_plpy_check(py_min_ver)
_db_install(schema, is_schema_in_db, output_filehandle)
return 0
def create_install_madlib_sqlfile(args, madpack_cmd):
upgrade = args['upgrade']
schema = args['schema']
db_madlib_ver = args['db_madlib_ver']
is_schema_in_db = args['is_schema_in_db']
return_signal = 0
with open(args['output_filename'], 'a+') as output_filehandle:
# COMMAND: uninstall/reinstall
if madpack_cmd in ('uninstall', 'reinstall'):
return_signal, is_schema_in_db = _append_uninstall_madlib_sqlfile(
schema, db_madlib_ver, is_schema_in_db, output_filehandle)
if return_signal == 1:
return 1
# COMMAND: install/reinstall
if madpack_cmd in ('install', 'reinstall'):
return_signal += _append_install_madlib_sqlfile(schema, db_madlib_ver,
is_schema_in_db, madpack_cmd, output_filehandle)
# COMMAND: upgrade
if madpack_cmd in ('upgrade', 'update'):
upgrade = True
info_(this, "*** Upgrading MADlib ***", True)
db_madlib_ver = get_db_madlib_version(con_args, schema)
# 1) Check DB version. If None, nothing to upgrade.
if not db_madlib_ver:
info_(this, "MADlib is not installed in {schema} schema and there "
"is nothing to upgrade. Please use install "
"instead.".format(schema=schema),
True)
return_signal += 1
# 2) Compare OS and DB versions. Continue if OS > DB.
else:
_print_vers(new_madlib_ver, db_madlib_ver, con_args, schema)
if is_rev_gte(get_rev_num(db_madlib_ver), get_rev_num(new_madlib_ver)):
info_(this, "Current MADlib version is already up-to-date.", True)
return_signal += 1
else:
# 3) Run upgrade
_plpy_check(py_min_ver)
return_signal = _db_upgrade(schema, output_filehandle, db_madlib_ver)
return 1 if return_signal > 0 else 0
def main(argv):
args = parse_arguments()
global verbose
verbose = args.verbose
info_(this, "Arguments: " + str(args), verbose)
global keeplogs
keeplogs = args.keeplogs
global tmpdir
try:
tmpdir = tempfile.mkdtemp('', 'madlib.', args.tmpdir)
except OSError, e:
tmpdir = e.filename
error_(this, "cannot create temporary directory: '%s'." % tmpdir, True)
# Parse SCHEMA
if len(args.schema[0]) > 1:
schema = args.schema[0].lower()
else:
schema = args.schema.lower()
# Parse DB Platform (== PortID) and compare with Ports.yml
global portid
if args.platform:
try:
# Get the DB platform name == DB port id
portid = args.platform[0].lower()
ports[portid]
except:
portid = None
error_(this, "Can not find specs for port %s" % (args.platform[0]), True)
else:
portid = None
# Parse CONNSTR (only if PLATFORM and DBAPI2 are defined)
if portid:
connStr = "" if args.connstr is None else args.connstr[0]
(c_user, c_pass, c_host, c_port, c_db) = parseConnectionStr(connStr)
# Find the default values for PG and GP
if portid in SUPPORTED_PORTS:
if c_user is None:
c_user = os.environ.get('PGUSER', getpass.getuser())
if c_pass is None:
c_pass = os.environ.get('PGPASSWORD', None)
if c_host is None:
c_host = os.environ.get('PGHOST', 'localhost')
if c_port is None:
c_port = os.environ.get('PGPORT', '5432')
if c_db is None:
c_db = os.environ.get('PGDATABASE', c_user)
# Set connection variables
global con_args
con_args['host'] = c_host + ':' + c_port
con_args['database'] = c_db
con_args['user'] = c_user
if c_pass is not None:
con_args['password'] = c_pass
# Try connecting to the database
info_(this, "Testing database connection...", verbose)
try:
# check for password only if required
_internal_run_query("SELECT 1", False)
except EnvironmentError:
con_args['password'] = getpass.getpass("Password for user %s: " % c_user)
_internal_run_query("SELECT 1", False)
except:
error_(this, 'Failed to connect to database', True)
# Get DB version
global dbver
dbver = get_dbver(con_args, portid)
# update maddir to use a relative path if available
global maddir
maddir = _get_relative_maddir(maddir, portid)
# Get MADlib version in DB
db_madlib_ver = get_db_madlib_version(con_args, schema)
portdir = os.path.join(maddir, "ports", portid)
supportedVersions = [dirItem for dirItem in os.listdir(portdir)
if os.path.isdir(os.path.join(portdir, dirItem)) and
re.match("^\d+", dirItem)]
if dbver is None:
dbver = ".".join(
map(str, max([versionStr.split('.')
for versionStr in supportedVersions])))
info_(this, "Could not parse version string reported by {DBMS}. Will "
"default to newest supported version of {DBMS} "
"({version}).".format(DBMS=ports[portid]['name'],
version=dbver), True)
else:
info_(this, "Detected %s version %s." % (ports[portid]['name'], dbver),
True)
dbver_split = get_rev_num(dbver)
if portid == 'greenplum':
if is_rev_gte(dbver_split, get_rev_num('5.0')):
# GPDB (starting 5.0) uses semantic versioning. Hence, only
# need first digit for major version.
dbver = str(dbver_split[0])
elif is_rev_gte(dbver_split, get_rev_num('4.3.5')):
# Due to the ABI incompatibility between 4.3.4 and 4.3.5,
# MADlib treats 4.3.5+ as DB version 4.3ORCA which is
# different from 4.3. The name is suffixed with ORCA since
# optimizer (ORCA) is 'on' by default in 4.3.5+
dbver = '4.3ORCA'
else:
# only need the first two digits for <= 4.3.4
dbver = '.'.join(map(str, dbver_split[:2]))
elif portid == 'postgres':
if is_rev_gte(dbver_split, get_rev_num('10.0')):
# Postgres starting 10.0 uses semantic versioning. Hence,
# only need first digit for major version.
dbver = str(dbver_split[0])
if not os.path.isdir(os.path.join(portdir, dbver)):
error_(this, "This version is not among the %s versions for which "
"MADlib support files have been installed (%s)." %
(ports[portid]['name'], ", ".join(supportedVersions)), True)
# Validate that db platform is correct
if not _check_db_port(portid):
error_(this, "Invalid database platform specified.", True)
# Adjust MADlib directories for this port (if they exist)
global maddir_conf
if os.path.isdir(maddir + "/ports/" + portid + "/" + dbver + "/config"):
maddir_conf = maddir + "/ports/" + portid + "/" + dbver + "/config"
else:
maddir_conf = maddir + "/config"
global maddir_lib
if os.path.isfile(maddir + "/ports/" + portid + "/" + dbver +
"/lib/libmadlib.so"):
maddir_lib = maddir + "/ports/" + portid + "/" + dbver + \
"/lib/libmadlib.so"
else:
maddir_lib = maddir + "/lib/libmadlib.so"
# Get the list of modules for this port
global portspecs
portspecs = configyml.get_modules(maddir_conf)
else:
con_args = None
db_madlib_ver = None
# Parse COMMAND argument and compare with Ports.yml
# Debugging...
# print "OS new_madlib_ver: " + str(new_madlib_ver) + " > " + str(get_rev_num(new_madlib_ver))
# print "DB new_madlib_ver: " + str(db_madlib_ver) + " > " + str(get_rev_num(db_madlib_ver))
# Make sure we have the necessary parameters to continue
if args.command[0] != 'version':
if not portid:
error_(this, "Missing -p/--platform parameter.", True)
if not con_args:
error_(this, "Unknown problem with database connection string: %s" % con_args, True)
# ---------------- Completed "Get and validate arguments" -----------------
# COMMAND: install-check, dev-check or unit-test
if args.command[0] in ('install-check', 'dev-check'):
run_install_check(locals(), args.testcase, args.command[0])
elif args.command[0] == 'unit-test':
run_unit_tests(locals(), args.testcase)
elif args.command[0] == 'version':
_print_vers(new_madlib_ver, db_madlib_ver, con_args, schema)
else:
if args.testcase:
error_(this,
"-t (testcase) option is not supported for %s" % args.command[0],
True)
try:
is_schema_in_db = _internal_run_query("SELECT schema_name FROM information_schema.schemata WHERE schema_name='%s';" % schema, True)
except:
error_(this, "Cannot validate if schema already exists.", True)
output_filename = tmpdir + "/madlib_{0}.sql".format(args.command[0])
upgrade = False
return_val = create_install_madlib_sqlfile(locals(), args.command[0])
if return_val == 0:
op_msg = (args.command[0].capitalize() + "ing"
if args.command[0] != 'upgrade'
else 'Upgrading')
info_(this, "%s MADlib:" % op_msg, True)
_cleanup_comments_in_sqlfile(output_filename, upgrade)
result = _run_sql_file(schema, output_filename)
if result == 'FAIL':
info_(this, "MADlib {0} unsuccessful.".format(args.command[0]), True)
info_(this, "All changes are rolled back.", True)
else:
if args.command[0] != 'uninstall':
if args.command[0] == 'upgrade':
info_(this, "MADlib %s upgraded successfully in %s schema." % (str(new_madlib_ver), schema), True)
else:
info_(this, "> Created %s schema" % schema, True)
info_(this, "> Created %s.MigrationHistory table" % schema, True)
info_(this, "> Wrote version info in MigrationHistory table", True)
info_(this, "MADlib %s installed successfully in %s schema." % (str(new_madlib_ver), schema))
else:
info_(this, "MADlib %s uninstalled successfully from %s schema." % (str(new_madlib_ver), schema))
# ------------------------------------------------------------------------------
# Start Here
# ------------------------------------------------------------------------------
if __name__ == "__main__":
# Run main
main(sys.argv[1:])
# Optional log files cleanup
# keeplogs and tmpdir are global variables
if not keeplogs:
shutil.rmtree(tmpdir)
else:
print "INFO: Log files saved in " + tmpdir
| {
"content_hash": "152b8c8d06991798a310cc2e7c0fe7c4",
"timestamp": "",
"source": "github",
"line_count": 1421,
"max_line_length": 143,
"avg_line_length": 41.42153413089374,
"alnum_prop": 0.5520387359836901,
"repo_name": "madlib/madlib",
"id": "74fff37395030ccdc5d956d12b2b49ec70b04d94",
"size": "59042",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/madpack/madpack.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1338300"
},
{
"name": "C++",
"bytes": "1404277"
},
{
"name": "CMake",
"bytes": "181442"
},
{
"name": "HTML",
"bytes": "458"
},
{
"name": "Makefile",
"bytes": "2615"
},
{
"name": "PLpgSQL",
"bytes": "16483"
},
{
"name": "Python",
"bytes": "453696"
},
{
"name": "Shell",
"bytes": "33672"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.contrib import admin
from calaccess_raw import models
from .base import BaseAdmin
class AcronymsCdAdmin(BaseAdmin):
list_display = ("acronym", "stands_for", "effect_dt", "a_desc")
date_hierarchy = 'effect_dt'
search_fields = ("acronym", "a_desc")
class AddressCdAdmin(BaseAdmin):
pass
class BallotMeasuresCdAdmin(BaseAdmin):
list_display = ("measure_name", "election_date", "jurisdiction")
list_filter = ("jurisdiction",)
search_fields = ("measure_name",)
class EfsFilingLogCdAdmin(BaseAdmin):
list_display = (
"id",
"filing_date",
"filingstatus",
"filer_id",
"vendor",
"form_type",
)
class FilersCdAdmin(BaseAdmin):
pass
class FilerAcronymsCdAdmin(BaseAdmin):
pass
class FilerAddressCdAdmin(BaseAdmin):
list_display = (
"filer_id",
"adrid",
"effect_dt",
"add_type"
)
class FilerEthicsClassCdAdmin(BaseAdmin):
pass
class FilerInterestsCdAdmin(BaseAdmin):
pass
@admin.register(models.FilerLinksCd)
class FilerLinksCdAdmin(BaseAdmin):
list_display = (
"filer_id_a",
"filer_id_b",
"link_type",
"active_flg",
"effect_dt",
"termination_dt",
)
list_filter = (
"active_flg",
"link_type"
)
search_fields = ("filer_id_a", "filer_id_b")
class FilerStatusTypesCdAdmin(BaseAdmin):
list_display = (
"status_type",
"status_desc"
)
class FilerToFilerTypeCdAdmin(BaseAdmin):
list_display = (
"filer_id",
"filer_type",
"effect_dt",
"active",
"session_id",
"race",
"district_cd",
"party_cd"
)
list_filter = (
"active",
"filer_type",
"category",
"sub_category",
"category_type",
"party_cd",
"session_id"
)
date_hierarchy = "effect_dt"
search_fields = (
"filer_id",
)
class FilerTypesCdAdmin(BaseAdmin):
list_display = (
"filer_type",
"description",
"grp_type",
"calc_use",
"grace_period",
)
class FilerXrefCdAdmin(BaseAdmin):
pass
class FilingPeriodCdAdmin(BaseAdmin):
list_display = (
"period_id", "start_date", "end_date", "period_desc",
)
search_fields = (
"period_id",
)
class GroupTypesCdAdmin(BaseAdmin):
pass
class HeaderCdAdmin(BaseAdmin):
pass
class HdrCdAdmin(BaseAdmin):
pass
class ImageLinksCdAdmin(BaseAdmin):
pass
class LegislativeSessionsCdAdmin(BaseAdmin):
pass
class LobbyingChgLogCdAdmin(BaseAdmin):
pass
class LobbyistContributions1CdAdmin(BaseAdmin):
pass
class LobbyistContributions2CdAdmin(BaseAdmin):
pass
class LobbyistContributions3CdAdmin(BaseAdmin):
pass
class LobbyistEmployer1CdAdmin(BaseAdmin):
pass
class LobbyistEmployer2CdAdmin(BaseAdmin):
pass
class LobbyistEmployer3CdAdmin(BaseAdmin):
pass
class LobbyistEmployerFirms1CdAdmin(BaseAdmin):
pass
class LobbyistEmployerFirms2CdAdmin(BaseAdmin):
pass
class LobbyistEmpLobbyist1CdAdmin(BaseAdmin):
pass
class LobbyistEmpLobbyist2CdAdmin(BaseAdmin):
pass
class LobbyistFirm1CdAdmin(BaseAdmin):
pass
class LobbyistFirm2CdAdmin(BaseAdmin):
pass
class LobbyistFirm3CdAdmin(BaseAdmin):
pass
class LobbyistFirmEmployer1CdAdmin(BaseAdmin):
pass
class LobbyistFirmEmployer2CdAdmin(BaseAdmin):
pass
class LobbyistFirmLobbyist1CdAdmin(BaseAdmin):
pass
class LobbyistFirmLobbyist2CdAdmin(BaseAdmin):
pass
class LookupCodeAdmin(BaseAdmin):
list_display = (
"code_type",
"code_id",
"code_desc",
)
list_filter = (
"code_type",
)
search_fields = (
"code_type",
"code_id",
"code_desc",
)
class NamesCdAdmin(BaseAdmin):
pass
class ReceivedFilingsCdAdmin(BaseAdmin):
pass
class ReportsCdAdmin(BaseAdmin):
pass
admin.site.register(models.AcronymsCd, AcronymsCdAdmin)
admin.site.register(models.AddressCd, AddressCdAdmin)
admin.site.register(models.BallotMeasuresCd, BallotMeasuresCdAdmin)
admin.site.register(models.EfsFilingLogCd, EfsFilingLogCdAdmin)
admin.site.register(models.FilersCd, FilersCdAdmin)
admin.site.register(models.FilerAcronymsCd, FilerAcronymsCdAdmin)
admin.site.register(models.FilerAddressCd, FilerAddressCdAdmin)
admin.site.register(models.FilerEthicsClassCd, FilerEthicsClassCdAdmin)
admin.site.register(models.FilerInterestsCd, FilerInterestsCdAdmin)
admin.site.register(models.FilerStatusTypesCd, FilerStatusTypesCdAdmin)
admin.site.register(models.FilerToFilerTypeCd, FilerToFilerTypeCdAdmin)
admin.site.register(models.FilerTypesCd, FilerTypesCdAdmin)
admin.site.register(models.FilerXrefCd, FilerXrefCdAdmin)
admin.site.register(models.FilingPeriodCd, FilingPeriodCdAdmin)
admin.site.register(models.GroupTypesCd, GroupTypesCdAdmin)
admin.site.register(models.HeaderCd, HeaderCdAdmin)
admin.site.register(models.HdrCd, HdrCdAdmin)
admin.site.register(models.ImageLinksCd, ImageLinksCdAdmin)
admin.site.register(models.LegislativeSessionsCd, LegislativeSessionsCdAdmin)
admin.site.register(models.LobbyingChgLogCd, LobbyingChgLogCdAdmin)
admin.site.register(
models.LobbyistContributions1Cd,
LobbyistContributions1CdAdmin
)
admin.site.register(
models.LobbyistContributions2Cd,
LobbyistContributions2CdAdmin
)
admin.site.register(
models.LobbyistContributions3Cd,
LobbyistContributions3CdAdmin
)
admin.site.register(models.LobbyistEmployer1Cd, LobbyistEmployer1CdAdmin)
admin.site.register(models.LobbyistEmployer2Cd, LobbyistEmployer2CdAdmin)
admin.site.register(models.LobbyistEmployer3Cd, LobbyistEmployer3CdAdmin)
admin.site.register(
models.LobbyistEmployerFirms1Cd,
LobbyistEmployerFirms1CdAdmin
)
admin.site.register(
models.LobbyistEmployerFirms2Cd,
LobbyistEmployerFirms2CdAdmin
)
admin.site.register(
models.LobbyistEmpLobbyist1Cd,
LobbyistEmpLobbyist1CdAdmin
)
admin.site.register(
models.LobbyistEmpLobbyist2Cd,
LobbyistEmpLobbyist2CdAdmin
)
admin.site.register(models.LobbyistFirm1Cd, LobbyistFirm1CdAdmin)
admin.site.register(models.LobbyistFirm2Cd, LobbyistFirm2CdAdmin)
admin.site.register(models.LobbyistFirm3Cd, LobbyistFirm3CdAdmin)
admin.site.register(
models.LobbyistFirmEmployer1Cd,
LobbyistFirmEmployer1CdAdmin
)
admin.site.register(
models.LobbyistFirmEmployer2Cd,
LobbyistFirmEmployer2CdAdmin
)
admin.site.register(
models.LobbyistFirmLobbyist1Cd,
LobbyistFirmLobbyist1CdAdmin
)
admin.site.register(
models.LobbyistFirmLobbyist2Cd,
LobbyistFirmLobbyist2CdAdmin
)
admin.site.register(models.LookupCode, LookupCodeAdmin)
admin.site.register(models.NamesCd, NamesCdAdmin)
admin.site.register(models.ReceivedFilingsCd, ReceivedFilingsCdAdmin)
admin.site.register(models.ReportsCd, ReportsCdAdmin)
| {
"content_hash": "c553fd104453f9fa53ddc87bb5e54616",
"timestamp": "",
"source": "github",
"line_count": 325,
"max_line_length": 77,
"avg_line_length": 21.49230769230769,
"alnum_prop": 0.7216893342877595,
"repo_name": "anthonyjpesce/django-calaccess-raw-data",
"id": "dba1942dbb0c08c35a90af0465130403591eb79b",
"size": "7031",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "calaccess_raw/admin/other.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "933"
},
{
"name": "Python",
"bytes": "519000"
}
],
"symlink_target": ""
} |
from chainerex.training.triggers.early_stopping_trigger import EarlyStoppingTrigger
class IterativeEarlyStoppingManager(object):
"""
Examples:
from chainerex.training.triggers import IterativeEarlyStoppingManager
iesm = IterativeEarlyStoppingManager()
trainer = Trainer(updater, stop_trigger=iesm.stop_trigger)
schedule_lr_list = [0.1, 0.001]
def extension_fn(trainer):
index = iesm.iterate_count
optimizer.lr = schedule_lr_list[index]
trainer.extend(extension_fn, trigger=iesm.extension_trigger)
"""
def increment_iterate_count(self, trainer):
self.iterate_count += 1
if self.verbose:
print('updating count to {}'.format(self.iterate_count))
def __init__(self, max_iterate_count=-1,
trigger=(1, 'epoch'), monitor='main/loss', patients=3,
mode='auto', verbose=False, max_epoch=100, debug=False):
self.extension_trigger = EarlyStoppingTrigger(
trigger=trigger, monitor=monitor, patients=patients,
mode=mode, verbose=verbose, max_epoch=max_epoch, debug=debug)
self.extension_trigger.set_on_condition_listener(
self.increment_iterate_count
)
self.verbose = verbose
self.max_epoch = max_epoch
self.stop_trigger = self.stop_condition
self.max_iterate_count = max_iterate_count
self.iterate_count = 0
def stop_condition(self, trainer):
# 1. Check epoch
if self.max_epoch >=0 and trainer.updater.epoch_detail >= self.max_epoch:
return True
# 2. Check iterative count
if self.max_iterate_count >=0 and self.max_iterate_count > self.iterate_count:
return True
return False
@property
def iterate_index(self):
return self.iterate_count - 1
| {
"content_hash": "bd4ffd89b9edebf667202c392e76c201",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 86,
"avg_line_length": 35.660377358490564,
"alnum_prop": 0.6328042328042328,
"repo_name": "corochann/chainerex",
"id": "33d6d309151baf50509e83938ca533e8505d7c12",
"size": "1890",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chainerex/training/triggers/iterative_early_stopping_manager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "124900"
}
],
"symlink_target": ""
} |
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_display_units10.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [56159232, 61364096]
data = [
[10000000, 20000000, 30000000, 20000000, 10000000],
]
worksheet.write_column(0, 0, data[0])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.set_y_axis({'display_units': 'trillions', 'display_units_visible': 0})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| {
"content_hash": "3f6f621f5a13043a58fe2cefd89cb8d0",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 84,
"avg_line_length": 26.425531914893618,
"alnum_prop": 0.6111111111111112,
"repo_name": "jkyeung/XlsxWriter",
"id": "e340a578c5f8bcdd2029f904f673e634e9cc3d39",
"size": "1415",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xlsxwriter/test/comparison/test_chart_display_units10.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5113"
},
{
"name": "CSS",
"bytes": "16544"
},
{
"name": "HTML",
"bytes": "13100"
},
{
"name": "Makefile",
"bytes": "7819"
},
{
"name": "Perl",
"bytes": "3504"
},
{
"name": "Python",
"bytes": "2430294"
},
{
"name": "Shell",
"bytes": "6064"
}
],
"symlink_target": ""
} |
import time
from io import TextIOWrapper
from subprocess import Popen, PIPE
from six import PY2
import pysmt.smtlib.commands as smtcmd
from pysmt.solvers.eager import EagerModel
from pysmt.smtlib.parser import SmtLibParser
from pysmt.smtlib.script import SmtLibCommand
from pysmt.solvers.solver import Solver, SolverOptions
from pysmt.exceptions import (SolverReturnedUnknownResultError,
UnknownSolverAnswerError, PysmtValueError)
class SmtLibOptions(SolverOptions):
"""Options for the SmtLib Solver.
* debug_interaction: True, False
Print the communication between pySMT and the wrapped executable
"""
def __init__(self, **base_options):
SolverOptions.__init__(self, **base_options)
if self.unsat_cores_mode is not None:
raise PysmtValueError("'unsat_cores_mode' option not supported.")
self.debug_interaction = False
if 'debug_interaction' in self.solver_options:
self.debug_interaction = self.solver_options
del self.solver_options['debug_interaction']
def __call__(self, solver):
# These options are needed for the wrapper to work
solver.set_option(":print-success", "true")
solver.set_option(":diagnostic-output-channel", '"stdout"')
if self.generate_models:
solver.set_option(":produce-models", "true")
else:
solver.set_option(":produce-models", "false")
if self.random_seed is not None:
solver.set_option(":random-seed", str(self.random_seed))
for k,v in self.solver_options.items():
if k in (':print-success', 'diagnostic-output-channel'):
raise PysmtValueError("Cannot override %s." % k)
solver.set_option(k, str(v))
# EOC SmtLibOptions
class SmtLibSolver(Solver):
"""Wrapper for using a solver via textual SMT-LIB interface.
The solver is launched in a subprocess using args as arguments of
the executable. Interaction with the solver occurs via pipe.
"""
OptionsClass = SmtLibOptions
def __init__(self, args, environment, logic, LOGICS=None, **options):
Solver.__init__(self,
environment,
logic=logic,
**options)
if LOGICS is not None: self.LOGICS = LOGICS
self.args = args
self.declared_vars = set()
self.solver = Popen(args, stdout=PIPE, stderr=PIPE, stdin=PIPE,
bufsize=-1)
# Give time to the process to start-up
time.sleep(0.01)
self.parser = SmtLibParser(interactive=True)
if PY2:
self.solver_stdin = self.solver.stdin
self.solver_stdout = self.solver.stdout
else:
self.solver_stdin = TextIOWrapper(self.solver.stdin)
self.solver_stdout = TextIOWrapper(self.solver.stdout)
# Initialize solver
self.options(self)
self.set_logic(logic)
def set_option(self, name, value):
self._send_silent_command(SmtLibCommand(smtcmd.SET_OPTION,
[name, value]))
def set_logic(self, logic):
self._send_silent_command(SmtLibCommand(smtcmd.SET_LOGIC, [logic]))
def _debug(self, msg, *format_args):
if self.options.debug_interaction:
print(msg % format_args)
def _send_command(self, cmd):
"""Sends a command to the STDIN pipe."""
self._debug("Sending: %s", cmd.serialize_to_string())
cmd.serialize(self.solver_stdin, daggify=True)
self.solver_stdin.write("\n")
self.solver_stdin.flush()
def _send_silent_command(self, cmd):
"""Sends a command to the STDIN pipe and awaits for acknowledgment."""
self._send_command(cmd)
self._check_success()
def _get_answer(self):
"""Reads a line from STDOUT pipe"""
res = self.solver_stdout.readline().strip()
self._debug("Read: %s", res)
return res
def _get_value_answer(self):
"""Reads and parses an assignment from the STDOUT pipe"""
lst = self.parser.get_assignment_list(self.solver_stdout)
self._debug("Read: %s", lst)
return lst
def _declare_variable(self, symbol):
cmd = SmtLibCommand(smtcmd.DECLARE_FUN, [symbol])
self._send_silent_command(cmd)
self.declared_vars.add(symbol)
def _check_success(self):
res = self._get_answer()
if res != "success":
raise UnknownSolverAnswerError("Solver returned: '%s'" % res)
def solve(self, assumptions=None):
assert assumptions is None
self._send_command(SmtLibCommand(smtcmd.CHECK_SAT, []))
ans = self._get_answer()
if ans == "sat":
return True
elif ans == "unsat":
return False
elif ans == "unknown":
raise SolverReturnedUnknownResultError
else:
raise UnknownSolverAnswerError("Solver returned: " + ans)
def reset_assertions(self):
self._send_silent_command(SmtLibCommand(smtcmd.RESET_ASSERTIONS, []))
return
def add_assertion(self, formula, named=None):
# This is needed because Z3 (and possibly other solvers) incorrectly
# recognize N * M * x as a non-linear term
formula = formula.simplify()
deps = formula.get_free_variables()
for d in deps:
if d not in self.declared_vars:
self._declare_variable(d)
self._send_silent_command(SmtLibCommand(smtcmd.ASSERT, [formula]))
def push(self, levels=1):
self._send_silent_command(SmtLibCommand(smtcmd.PUSH, [levels]))
def pop(self, levels=1):
self._send_silent_command(SmtLibCommand(smtcmd.POP, [levels]))
def get_value(self, item):
self._send_command(SmtLibCommand(smtcmd.GET_VALUE, [item]))
lst = self._get_value_answer()
assert len(lst) == 1
assert len(lst[0]) == 2
return lst[0][1]
def print_model(self, name_filter=None):
if name_filter is not None:
raise NotImplementedError
for v in self.declared_vars:
print("%s = %s" % (v, self.get_value(v)))
def get_model(self):
assignment = {}
for s in self.environment.formula_manager.get_all_symbols():
if s.is_term():
v = self.get_value(s)
assignment[s] = v
return EagerModel(assignment=assignment, environment=self.environment)
def _exit(self):
self._send_command(SmtLibCommand(smtcmd.EXIT, []))
self.solver_stdin.close()
self.solver_stdout.close()
self.solver.stderr.close()
self.solver.terminate()
return
| {
"content_hash": "a9cb03fda2d2201fe57aed6a9995a05c",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 78,
"avg_line_length": 35.34375,
"alnum_prop": 0.6102269378131447,
"repo_name": "agriggio/pysmt",
"id": "3738537943c333f02349c9f52c3aa9f0f0ac9be8",
"size": "7381",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pysmt/smtlib/solver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1842"
},
{
"name": "PowerShell",
"bytes": "5987"
},
{
"name": "Python",
"bytes": "1019481"
},
{
"name": "Shell",
"bytes": "6094"
}
],
"symlink_target": ""
} |
"""Patching utilities for working with fake objects.
See :ref:`using-fudge` for common scenarios.
"""
__all__ = ['patch_object', 'with_patched_object', 'PatchHandler',
'patched_context', 'patch']
import sys
import fudge
from fudge.util import wraps
class patch(object):
"""A test decorator that patches importable names with :class:`fakes <Fake>`
Each fake is exposed as an argument to the test:
.. doctest::
:hide:
>>> import fudge
.. doctest::
>>> @fudge.patch('os.remove')
... def test(fake_remove):
... fake_remove.expects_call()
... # do stuff...
...
>>> test()
Traceback (most recent call last):
...
AssertionError: fake:os.remove() was not called
.. doctest::
:hide:
>>> fudge.clear_expectations()
Many paths can be patched at once:
.. doctest::
>>> @fudge.patch('os.remove',
... 'shutil.rmtree')
... def test(fake_remove, fake_rmtree):
... fake_remove.is_callable()
... # do stuff...
...
>>> test()
For convenience, the patch method calls
:func:`fudge.clear_calls`, :func:`fudge.verify`, and :func:`fudge.clear_expectations`. For that reason, you must manage all your fake objects within the test function itself.
.. note::
If you are using a unittest class, you cannot declare fakes
within ``setUp()`` unless you manually clear calls and clear
expectations. If you do that, you'll want to use the
:func:`fudge.with_fakes` decorator instead of ``@patch``.
"""
def __init__(self, *obj_paths):
self.obj_paths = obj_paths
def __call__(self, fn):
@wraps(fn)
def caller(*args, **kw):
fakes = self.__enter__()
if not isinstance(fakes, (tuple, list)):
fakes = [fakes]
args += tuple(fakes)
value = None
try:
value = fn(*args, **kw)
except:
etype, val, tb = sys.exc_info()
self.__exit__(etype, val, tb)
raise etype, val, tb
else:
self.__exit__(None, None, None)
return value
return caller
def __enter__(self):
fudge.clear_expectations()
fudge.clear_calls()
self.patches = []
all_fakes = []
for path in self.obj_paths:
try:
target, attr = path.rsplit('.', 1)
except (TypeError, ValueError):
raise TypeError(
"Need a valid target to patch. You supplied: %r"
% path)
fake = fudge.Fake(path)
all_fakes.append(fake)
self.patches.append(patch_object(target, attr, fake))
if len(all_fakes) == 1:
return all_fakes[0]
else:
return all_fakes
def __exit__(self, exc_type, exc_val, exc_tb):
try:
if not exc_type:
fudge.verify()
finally:
for p in self.patches:
p.restore()
fudge.clear_expectations()
def with_patched_object(obj, attr_name, patched_value):
"""Decorator that patches an object before the decorated method
is called and restores it afterwards.
This is a wrapper around :func:`fudge.patcher.patch_object`
Example::
>>> from fudge import with_patched_object
>>> class Session:
... state = 'clean'
...
>>> @with_patched_object(Session, "state", "dirty")
... def test():
... print Session.state
...
>>> test()
dirty
>>> print Session.state
clean
"""
def patcher(method):
@wraps(method)
def method_call(*m_args, **m_kw):
patched_obj = patch_object(obj, attr_name, patched_value)
try:
return method(*m_args, **m_kw)
finally:
patched_obj.restore()
return method_call
return patcher
class patched_context(object):
"""A context manager to patch an object temporarily during a `with statement`_ block.
This is a wrapper around :func:`fudge.patcher.patch_object`
.. lame, lame, cannot figure out how to apply __future__ to doctest
so this output is currently skipped
.. doctest:: python25
:options: +SKIP
>>> from fudge import patched_context
>>> class Session:
... state = 'clean'
...
>>> with patched_context(Session, "state", "dirty"): # doctest: +SKIP
... print Session.state
...
dirty
>>> print Session.state
clean
.. _with statement: http://www.python.org/dev/peps/pep-0343/
"""
def __init__(self, obj, attr_name, patched_value):
# note that a @contextmanager decorator would be simpler
# but it can't be used since a value cannot be yielded within a
# try/finally block which is needed to restore the object on finally.
self.patched_object = patch_object(obj, attr_name, patched_value)
def __enter__(self):
return self.patched_object
def __exit__(self, exc_type, exc_val, exc_tb):
self.patched_object.restore()
def patch_object(obj, attr_name, patched_value):
"""Patches an object and returns an instance of :class:`fudge.patcher.PatchHandler` for later restoration.
Note that if *obj* is not an object but a path to a module then it will be imported.
You may want to use a more convenient wrapper :func:`with_patched_object` or :func:`patched_context`
Example::
>>> from fudge import patch_object
>>> class Session:
... state = 'clean'
...
>>> patched_session = patch_object(Session, "state", "dirty")
>>> Session.state
'dirty'
>>> patched_session.restore()
>>> Session.state
'clean'
Here is another example showing how to patch multiple objects at once::
>>> class Session:
... state = 'clean'
...
>>> class config:
... session_strategy = 'database'
...
>>> patches = [
... patch_object(config, "session_strategy", "filesystem"),
... patch_object(Session, "state", "dirty")
... ]
>>> try:
... # your app under test would run here ...
... print "(while patched)"
... print "config.session_strategy=%r" % config.session_strategy
... print "Session.state=%r" % Session.state
... finally:
... for p in patches:
... p.restore()
... print "(patches restored)"
(while patched)
config.session_strategy='filesystem'
Session.state='dirty'
(patches restored)
>>> config.session_strategy
'database'
>>> Session.state
'clean'
"""
if isinstance(obj, (str, unicode)):
obj_path = adjusted_path = obj
done = False
exc = None
at_top_level = False
while not done:
try:
obj = __import__(adjusted_path)
done = True
except ImportError:
# Handle paths that traveerse object attributes.
# Such as: smtplib.SMTP.connect
# smtplib <- module to import
adjusted_path = adjusted_path.rsplit('.', 1)[0]
if not exc:
exc = sys.exc_info()
if at_top_level:
# We're at the top level module and it doesn't exist.
# Raise the first exception since it will make more sense:
etype, val, tb = exc
raise etype, val, tb
if not adjusted_path.count('.'):
at_top_level = True
for part in obj_path.split('.')[1:]:
obj = getattr(obj, part)
handle = PatchHandler(obj, attr_name)
handle.patch(patched_value)
return handle
class NonExistant(object):
"""Represents a non-existant value."""
class PatchHandler(object):
"""Low level patch handler that memorizes a patch so you can restore it later.
You can use more convenient wrappers :func:`with_patched_object` and :func:`patched_context`
"""
def __init__(self, orig_object, attr_name):
self.orig_object = orig_object
self.attr_name = attr_name
self.proxy_object = None
self.orig_value, self.is_local = self._get_original(self.orig_object,
self.attr_name)
self.getter_class, self.getter = self._handle_getter(self.orig_object,
self.attr_name)
def patch(self, patched_value):
"""Set a new value for the attribute of the object."""
try:
if self.getter:
setattr(self.getter_class, self.attr_name, patched_value)
else:
setattr(self.orig_object, self.attr_name, patched_value)
except TypeError:
# Workaround for patching builtin objects:
proxy_name = 'fudge_proxy_%s_%s_%s' % (
self.orig_object.__module__,
self.orig_object.__name__,
patched_value.__class__.__name__
)
self.proxy_object = type(proxy_name, (self.orig_object,),
{self.attr_name: patched_value})
mod = sys.modules[self.orig_object.__module__]
setattr(mod, self.orig_object.__name__, self.proxy_object)
def restore(self):
"""Restore the saved value for the attribute of the object."""
if self.proxy_object is None:
if self.getter:
setattr(self.getter_class, self.attr_name, self.getter)
elif self.is_local:
setattr(self.orig_object, self.attr_name, self.orig_value)
else:
# Was not a local, safe to delete:
delattr(self.orig_object, self.attr_name)
else:
setattr(sys.modules[self.orig_object.__module__],
self.orig_object.__name__,
self.orig_object)
def _find_class_for_attr(self, cls, attr):
if attr in cls.__dict__:
return cls
else:
for base in cls.__bases__:
if self._find_class_for_attr(base, attr) is not NonExistant:
return base
return NonExistant
def _get_original(self, orig_object, name):
try:
value = orig_object.__dict__[name]
is_local = True
except (AttributeError, KeyError):
value = getattr(orig_object, name, NonExistant)
is_local = False
if value is NonExistant:
raise AttributeError(
"%s does not have the attribute %r" % (orig_object, name))
return value, is_local
def _get_exact_original(self, orig_object, name):
if hasattr(orig_object, '__dict__'):
if name not in orig_object.__dict__:
# TODO: handle class objects, not just instance objects?
# This is only here for Class.property.__get__
if hasattr(orig_object, '__class__'):
cls = orig_object.__class__
orig_object = self._find_class_for_attr(cls, name)
return orig_object
def _handle_getter(self, orig_object, name):
getter_class, getter = None, None
exact_orig = self._get_exact_original(orig_object, name)
try:
ob = exact_orig.__dict__[name]
except (AttributeError, KeyError):
pass
else:
if hasattr(ob, '__get__'):
getter_class = exact_orig
getter = ob
return getter_class, getter
| {
"content_hash": "ecafb5bf8335040518d4edb0e3f4682e",
"timestamp": "",
"source": "github",
"line_count": 367,
"max_line_length": 179,
"avg_line_length": 33.75476839237057,
"alnum_prop": 0.517678398450113,
"repo_name": "mozilla/betafarm",
"id": "8ca7e68a11c2c1d926401ca6af7b06cf99ba9043",
"size": "12389",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "vendor-local/lib/python/fudge/patcher.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "127124"
},
{
"name": "HTML",
"bytes": "222114"
},
{
"name": "JavaScript",
"bytes": "38349"
},
{
"name": "Python",
"bytes": "1829931"
},
{
"name": "Shell",
"bytes": "1213"
}
],
"symlink_target": ""
} |
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.autodoc'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'pyatmlab'
copyright = '2013, Gerrit Holl'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
from pyatmlab.meta import get_version
# The short X.Y version.
version = get_version(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, 'ChangeLog'))
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyatmlabdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'pyatmlab.tex', 'pyatmlab Documentation',
'Gerrit Holl', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pyatmlab', 'pyatmlab Documentation',
['Gerrit Holl'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pyatmlab', 'pyatmlab Documentation',
'Gerrit Holl', 'pyatmlab', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| {
"content_hash": "4ef40d2d5a042dbaaf8ff111798fe2c9",
"timestamp": "",
"source": "github",
"line_count": 249,
"max_line_length": 97,
"avg_line_length": 31.775100401606426,
"alnum_prop": 0.7052578361981799,
"repo_name": "olemke/pyatmlab",
"id": "cc8802c381cd71ebe833e9598e469dfb1f90d20c",
"size": "8356",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "doc/source/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "364064"
}
],
"symlink_target": ""
} |
import re, sys, os
# Check control characters.
def check_control_char(filename):
lineNum = 0
pos = []
flag = 0
with open(filename,'r', encoding='utf-8') as file:
for line in file:
lineNum += 1
if re.search(r'[\b]', line):
pos.append(lineNum)
flag = 1
if flag:
print("\n" + filename + ": this file has control characters in the following lines:\n")
for cc in pos:
print("CONTROL CHARACTERS: L" + str(cc))
print("\nPlease delete these control characters.")
return flag
if __name__ == "__main__":
count = 0
for filename in sys.argv[1:]:
if os.path.isfile(filename):
flag = check_control_char(filename)
if flag:
count+=1
if count:
print("\nThe above issues will cause website build failure. Please fix them.")
exit(1) | {
"content_hash": "5b6157e6232592121df163cadd668067",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 95,
"avg_line_length": 23.692307692307693,
"alnum_prop": 0.54004329004329,
"repo_name": "pingcap/docs",
"id": "3bf7784c3b71801d9cfbcac2efffca117676902c",
"size": "2249",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/check-control-char.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "9226"
},
{
"name": "Python",
"bytes": "110332"
},
{
"name": "Shell",
"bytes": "11505"
},
{
"name": "Smarty",
"bytes": "716"
},
{
"name": "TeX",
"bytes": "7550"
}
],
"symlink_target": ""
} |
"""Full Sloth test suite"""
import logging
import os
import unittest
logger = logging.getLogger(__name__)
def suite():
from . import test_version
test_suite = unittest.TestSuite()
test_suite.addTest(test_version.suite())
return test_suite
def run_tests():
"""Run test complete test_suite"""
runner = unittest.TextTestRunner()
if not runner.run(suite()).wasSuccessful():
print("Test suite failed")
else:
print("Test suite succeeded")
| {
"content_hash": "defb8a6216c6c92c9429f7e6e14cf9e4",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 47,
"avg_line_length": 21.17391304347826,
"alnum_prop": 0.6632443531827515,
"repo_name": "maurov/xraysloth",
"id": "1806257ebc78c218a9f7d3365c48287cab98bd2c",
"size": "533",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sloth/test/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "IDL",
"bytes": "882"
},
{
"name": "Jupyter Notebook",
"bytes": "328173"
},
{
"name": "Python",
"bytes": "791348"
},
{
"name": "Shell",
"bytes": "3536"
}
],
"symlink_target": ""
} |
import os
head = os.path.dirname(__file__)
pygtkweb = os.path.join(head, '..', '..', '..', 'pygtkweb')
TARGETS = {
'AutoGtk.py': dict(
options=[
'--library_dir',
os.path.join(pygtkweb, 'library'),
],
)
}
PACKAGE = {
'title': 'pywebgtkbuilder',
'desc': 'Python Web-Gtk "GtkBuilder" example',
}
def setup(targets):
'''Setup example for translation, MUST call util.setup(targets).'''
util.setup(targets)
def translate():
'''Translate example, MUST call util.translate().'''
util.translate()
def install(package):
'''Install and cleanup example module. MUST call util.install(package)'''
util.install(package)
##---------------------------------------##
# --------- (-: DO NOT EDIT :-) --------- #
##---------------------------------------##
import sys
import os
examples = head = os.path.abspath(os.path.dirname(__file__))
while os.path.split(examples)[1].lower() != 'examples':
examples = os.path.split(examples)[0]
if not examples:
raise ValueError("Cannot determine examples directory")
sys.path.insert(0, os.path.join(examples))
from _examples import util
sys.path.pop(0)
util.init(head)
setup(TARGETS)
translate()
install(PACKAGE)
| {
"content_hash": "3efb9d4a85769a344a2ffd52a814ec8e",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 77,
"avg_line_length": 21.135593220338983,
"alnum_prop": 0.5781876503608661,
"repo_name": "Hasimir/pyjs",
"id": "3bee55b7f47b5daf41e0822bcdfc36dfce26cd8b",
"size": "1294",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "examples/deprecated/pywebgtkbuilder/__main__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4640"
},
{
"name": "Groff",
"bytes": "6633"
},
{
"name": "HTML",
"bytes": "10106"
},
{
"name": "JavaScript",
"bytes": "63385"
},
{
"name": "Makefile",
"bytes": "453"
},
{
"name": "Python",
"bytes": "5515375"
},
{
"name": "Shell",
"bytes": "4264"
}
],
"symlink_target": ""
} |
from setuptools import setup
import sys
PY3 = sys.version_info >= (3,)
VERSION = "0.5.1-dev"
COMMANDS = ['fscat',
'fscp',
'fsinfo',
'fsls',
'fsmv',
'fscp',
'fsrm',
'fsserve',
'fstree',
'fsmkdir',
'fsmount']
classifiers = [
"Development Status :: 5 - Production/Stable",
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: System :: Filesystems',
]
with open('README.txt', 'r') as f:
long_desc = f.read()
extra = {}
if PY3:
extra["use_2to3"] = True
setup(install_requires=['setuptools', 'six'],
name='fs',
version=VERSION,
description="Filesystem abstraction layer",
long_description=long_desc,
license="BSD",
author="Will McGugan",
author_email="will@willmcgugan.com",
#url="http://code.google.com/p/pyfilesystem/",
#download_url="http://code.google.com/p/pyfilesystem/downloads/list",
url="http://pypi.python.org/pypi/fs/",
platforms=['any'],
packages=['fs',
'fs.expose',
'fs.expose.dokan',
'fs.expose.fuse',
'fs.expose.wsgi',
'fs.tests',
'fs.wrapfs',
'fs.osfs',
'fs.contrib',
'fs.contrib.bigfs',
'fs.contrib.davfs',
'fs.contrib.tahoelafs',
'fs.commands'],
package_data={'fs': ['tests/data/*.txt']},
scripts=['fs/commands/%s' % command for command in COMMANDS],
classifiers=classifiers,
**extra
)
| {
"content_hash": "55c5973e5db910f64d07ae3a6d5741e6",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 75,
"avg_line_length": 27.420289855072465,
"alnum_prop": 0.516384778012685,
"repo_name": "pscottdevos/pyfilesystem",
"id": "ea8f1dc2581eee7c1349d837931ed430cee43f43",
"size": "1979",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1057381"
},
{
"name": "Shell",
"bytes": "3083"
}
],
"symlink_target": ""
} |
import os
import pytest
from mockito import mock, verify, when, unstub, ANY
from selenium import webdriver
from SeleniumLibrary.keywords import WebDriverCreator
LOG_DIR = "/log/dir"
@pytest.fixture(scope="module")
def creator():
return WebDriverCreator(LOG_DIR)
def teardown_function():
unstub()
def test_normalise_browser_name(creator):
browser = creator._normalise_browser_name("chrome")
assert browser == "chrome"
browser = creator._normalise_browser_name("ChrOmE")
assert browser == "chrome"
browser = creator._normalise_browser_name(" Ch rO mE ")
assert browser == "chrome"
def test_get_creator_method(creator):
method = creator._get_creator_method("chrome")
assert method
method = creator._get_creator_method("firefox")
assert method
with pytest.raises(ValueError) as error:
creator._get_creator_method("foobar")
assert "foobar is not a supported browser." in str(error.value)
def test_parse_capabilities(creator):
caps = creator._parse_capabilities("key1:value1,key2:value2")
expected = {"desired_capabilities": {"key1": "value1", "key2": "value2"}}
assert caps == expected
caps = creator._parse_capabilities("key1:value1,key2:value2", "ie")
expected = {"capabilities": {"key1": "value1", "key2": "value2"}}
assert caps == expected
caps = creator._parse_capabilities("key1:value1,key2:value2", "firefox")
assert caps == expected
caps = creator._parse_capabilities("key1:value1,key2:value2", "ff")
assert caps == expected
caps = creator._parse_capabilities("key1:value1,key2:value2", "edge")
assert caps == expected
parsing_caps = expected.copy()
caps = creator._parse_capabilities(parsing_caps)
assert caps == {"desired_capabilities": expected}
caps = creator._parse_capabilities("key1 : value1 , key2: value2")
expected = {"desired_capabilities": {"key1": "value1", "key2": "value2"}}
assert caps == expected
caps = creator._parse_capabilities(" key 1 : value 1 , key2:value2")
expected = {"desired_capabilities": {"key 1": "value 1", "key2": "value2"}}
assert caps == expected
caps = creator._parse_capabilities("")
assert caps == {}
caps = creator._parse_capabilities({})
assert caps == {}
caps = creator._parse_capabilities(None)
assert caps == {}
for browser in [None, "safari", "headlesschrome", "foobar"]:
caps = creator._parse_capabilities(
{"key1": "value1", "key2": "value2"}, browser
)
expected = {"desired_capabilities": {"key1": "value1", "key2": "value2"}}
assert caps == expected
for browser in ["ie", "firefox", "edge"]:
caps = creator._parse_capabilities(
{"key1": "value1", "key2": "value2"}, browser
)
expected = {"capabilities": {"key1": "value1", "key2": "value2"}}
assert caps == expected
def test_capabilities_resolver_firefox(creator):
default_capabilities = webdriver.DesiredCapabilities.FIREFOX.copy()
expected_caps = {
"desired_capabilities": {"version": "66.02", "browserName": "firefox"}
}
caps_in = {"capabilities": {"version": "66.02"}}
resolved_caps = creator._remote_capabilities_resolver(caps_in, default_capabilities)
assert resolved_caps == expected_caps
caps_in = {"capabilities": {"version": "66.02", "browserName": "firefox"}}
resolved_caps = creator._remote_capabilities_resolver(caps_in, default_capabilities)
assert resolved_caps == expected_caps
def test_capabilities_resolver_no_set_caps(creator):
default_capabilities = webdriver.DesiredCapabilities.FIREFOX.copy()
resolved_caps = creator._remote_capabilities_resolver({}, default_capabilities)
assert resolved_caps == {"desired_capabilities": default_capabilities}
def test_capabilities_resolver_chrome(creator):
default_capabilities = webdriver.DesiredCapabilities.CHROME.copy()
expected_caps = {
"desired_capabilities": {"version": "73.0.3683.86", "browserName": "chrome"}
}
resolved_caps = creator._remote_capabilities_resolver(
{"capabilities": {"version": "73.0.3683.86"}}, default_capabilities
)
assert resolved_caps == expected_caps
caps_in = {
"desired_capabilities": {"version": "73.0.3683.86", "browserName": "chrome"}
}
resolved_caps = creator._remote_capabilities_resolver(caps_in, default_capabilities)
assert resolved_caps == expected_caps
def test_chrome(creator):
expected_webdriver = mock()
when(webdriver).Chrome(
options=None, service_log_path=None, executable_path="chromedriver"
).thenReturn(expected_webdriver)
driver = creator.create_chrome({}, None)
assert driver == expected_webdriver
def test_chrome_with_desired_capabilities(creator):
expected_webdriver = mock()
when(webdriver).Chrome(
desired_capabilities={"key": "value"},
options=None,
service_log_path=None,
executable_path="chromedriver",
).thenReturn(expected_webdriver)
driver = creator.create_chrome({"desired_capabilities": {"key": "value"}}, None)
assert driver == expected_webdriver
def test_chrome_remote_no_caps(creator):
url = "http://localhost:4444/wd/hub"
expected_webdriver = mock()
capabilities = webdriver.DesiredCapabilities.CHROME.copy()
file_detector = mock_file_detector(creator)
when(webdriver).Remote(
command_executor=url,
browser_profile=None,
desired_capabilities=capabilities,
options=None,
file_detector=file_detector,
).thenReturn(expected_webdriver)
driver = creator.create_chrome({}, url)
assert driver == expected_webdriver
def test_chrome_remote_caps(creator):
url = "http://localhost:4444/wd/hub"
expected_webdriver = mock()
capabilities = {"browserName": "chrome"}
file_detector = mock_file_detector(creator)
when(webdriver).Remote(
command_executor=url,
browser_profile=None,
desired_capabilities=capabilities,
options=None,
file_detector=file_detector,
).thenReturn(expected_webdriver)
driver = creator.create_chrome({"desired_capabilities": capabilities}, url)
assert driver == expected_webdriver
def test_chrome_remote_caps_no_browser_name(creator):
url = "http://localhost:4444/wd/hub"
expected_webdriver = mock()
capabilities = {"browserName": "chrome", "key": "value"}
file_detector = mock_file_detector(creator)
when(webdriver).Remote(
command_executor=url,
browser_profile=None,
desired_capabilities=capabilities,
options=None,
file_detector=file_detector,
).thenReturn(expected_webdriver)
driver = creator.create_chrome({"desired_capabilities": {"key": "value"}}, url)
assert driver == expected_webdriver
def test_chrome_healdless(creator):
expected_webdriver = mock()
options = mock()
when(webdriver).ChromeOptions().thenReturn(options)
when(webdriver).Chrome(
options=options, service_log_path=None, executable_path="chromedriver"
).thenReturn(expected_webdriver)
driver = creator.create_headless_chrome({}, None)
assert options.headless is True
assert driver == expected_webdriver
def test_chrome_healdless_with_grid(creator):
expected_webdriver = mock()
options = mock()
when(webdriver).ChromeOptions().thenReturn(options)
remote_url = "localhost:4444"
capabilities = webdriver.DesiredCapabilities.CHROME.copy()
file_detector = mock_file_detector(creator)
when(webdriver).Remote(
command_executor=remote_url,
options=options,
browser_profile=None,
desired_capabilities=capabilities,
file_detector=file_detector,
).thenReturn(expected_webdriver)
driver = creator.create_headless_chrome({}, remote_url)
assert options.headless is True
assert driver == expected_webdriver
def test_firefox(creator):
expected_webdriver = mock()
profile = mock()
when(webdriver).FirefoxProfile().thenReturn(profile)
log_file = get_geckodriver_log()
when(webdriver).Firefox(
options=None,
firefox_profile=profile,
executable_path="geckodriver",
service_log_path=log_file,
).thenReturn(expected_webdriver)
driver = creator.create_firefox({}, None, None)
assert driver == expected_webdriver
verify(webdriver).FirefoxProfile()
def test_get_ff_profile_real_path(creator):
profile_path = "/path/to/profile"
profile_mock = mock()
when(webdriver).FirefoxProfile(profile_path).thenReturn(profile_mock)
profile = creator._get_ff_profile(profile_path)
assert profile == profile_mock
def test_get_ff_profile_no_path(creator):
profile_mock = mock()
when(webdriver).FirefoxProfile().thenReturn(profile_mock)
profile = creator._get_ff_profile(None)
assert profile == profile_mock
def test_get_ff_profile_instance_FirefoxProfile(creator):
input_profile = webdriver.FirefoxProfile()
profile = creator._get_ff_profile(input_profile)
assert profile == input_profile
def test_firefox_remote_no_caps(creator):
url = "http://localhost:4444/wd/hub"
profile = mock()
when(webdriver).FirefoxProfile().thenReturn(profile)
expected_webdriver = mock()
capabilities = webdriver.DesiredCapabilities.FIREFOX.copy()
file_detector = mock_file_detector(creator)
when(webdriver).Remote(
command_executor=url,
browser_profile=profile,
options=None,
desired_capabilities=capabilities,
file_detector=file_detector,
).thenReturn(expected_webdriver)
driver = creator.create_firefox({}, url, None)
assert driver == expected_webdriver
def test_firefox_remote_caps(creator):
url = "http://localhost:4444/wd/hub"
profile = mock()
when(webdriver).FirefoxProfile().thenReturn(profile)
expected_webdriver = mock()
capabilities = {"browserName": "firefox"}
file_detector = mock_file_detector(creator)
when(webdriver).Remote(
command_executor=url,
browser_profile=profile,
options=None,
desired_capabilities=capabilities,
file_detector=file_detector,
).thenReturn(expected_webdriver)
driver = creator.create_firefox({"desired_capabilities": capabilities}, url, None)
assert driver == expected_webdriver
def test_firefox_remote_caps_no_browsername(creator):
url = "http://localhost:4444/wd/hub"
profile = mock()
when(webdriver).FirefoxProfile().thenReturn(profile)
expected_webdriver = mock()
capabilities = {"browserName": "firefox", "version": "66.02"}
file_detector = mock_file_detector(creator)
when(webdriver).Remote(
command_executor=url,
browser_profile=profile,
options=None,
desired_capabilities=capabilities,
file_detector=file_detector,
).thenReturn(expected_webdriver)
driver = creator.create_firefox({"capabilities": {"version": "66.02"}}, url, None)
assert driver == expected_webdriver
def test_firefox_profile(creator):
expected_webdriver = mock()
profile = mock()
profile_dir = "/profile/dir"
when(webdriver).FirefoxProfile(profile_dir).thenReturn(profile)
log_file = get_geckodriver_log()
when(webdriver).Firefox(
options=None,
service_log_path=log_file,
executable_path="geckodriver",
firefox_profile=profile,
).thenReturn(expected_webdriver)
driver = creator.create_firefox({}, None, profile_dir)
assert driver == expected_webdriver
def test_firefox_headless(creator):
expected_webdriver = mock()
profile = mock()
when(webdriver).FirefoxProfile().thenReturn(profile)
options = mock()
when(webdriver).FirefoxOptions().thenReturn(options)
log_file = get_geckodriver_log()
when(webdriver).Firefox(
options=options,
service_log_path=log_file,
executable_path="geckodriver",
firefox_profile=profile,
).thenReturn(expected_webdriver)
driver = creator.create_headless_firefox({}, None, None)
assert driver == expected_webdriver
def test_firefox_headless_with_grid_caps(creator):
expected_webdriver = mock()
options = mock()
when(webdriver).FirefoxOptions().thenReturn(options)
profile = mock()
when(webdriver).FirefoxProfile().thenReturn(profile)
remote_url = "localhost:4444"
capabilities = {"browserName": "firefox", "key": "value"}
file_detector = mock_file_detector(creator)
when(webdriver).Remote(
command_executor=remote_url,
options=options,
desired_capabilities=capabilities,
browser_profile=profile,
file_detector=file_detector,
).thenReturn(expected_webdriver)
driver = creator.create_headless_firefox(
{"capabilities": {"key": "value"}}, remote_url, None
)
assert driver == expected_webdriver
assert options.headless is True
def test_firefox_headless_with_grid_no_caps(creator):
expected_webdriver = mock()
options = mock()
when(webdriver).FirefoxOptions().thenReturn(options)
profile = mock()
when(webdriver).FirefoxProfile().thenReturn(profile)
remote_url = "localhost:4444"
capabilities = webdriver.DesiredCapabilities.FIREFOX.copy()
file_detector = mock_file_detector(creator)
when(webdriver).Remote(
command_executor=remote_url,
options=options,
desired_capabilities=capabilities,
browser_profile=profile,
file_detector=file_detector,
).thenReturn(expected_webdriver)
driver = creator.create_headless_firefox({}, remote_url, None)
assert driver == expected_webdriver
assert options.headless is True
def test_ie(creator):
expected_webdriver = mock()
when(webdriver).Ie(
options=None, service_log_path=None, executable_path="IEDriverServer.exe"
).thenReturn(expected_webdriver)
driver = creator.create_ie({}, None)
assert driver == expected_webdriver
when(webdriver).Ie(
capabilities={"key": "value"},
options=None,
service_log_path=None,
executable_path="IEDriverServer.exe",
).thenReturn(expected_webdriver)
driver = creator.create_ie(
desired_capabilities={"capabilities": {"key": "value"}},
remote_url=None,
options=None,
service_log_path=None,
)
assert driver == expected_webdriver
def test_ie_remote_no_caps(creator):
url = "http://localhost:4444/wd/hub"
expected_webdriver = mock()
capabilities = webdriver.DesiredCapabilities.INTERNETEXPLORER.copy()
file_detector = mock_file_detector(creator)
when(webdriver).Remote(
command_executor=url,
browser_profile=None,
desired_capabilities=capabilities,
options=None,
file_detector=file_detector,
).thenReturn(expected_webdriver)
driver = creator.create_ie({}, url)
assert driver == expected_webdriver
def test_ie_remote_caps(creator):
url = "http://localhost:4444/wd/hub"
expected_webdriver = mock()
capabilities = {"browserName": "internet explorer"}
file_detector = mock_file_detector(creator)
when(webdriver).Remote(
command_executor=url,
browser_profile=None,
desired_capabilities=capabilities,
options=None,
file_detector=file_detector,
).thenReturn(expected_webdriver)
driver = creator.create_ie({"capabilities": capabilities}, url)
assert driver == expected_webdriver
def test_ie_no_browser_name(creator):
url = "http://localhost:4444/wd/hub"
expected_webdriver = mock()
capabilities = {"browserName": "internet explorer", "key": "value"}
file_detector = mock_file_detector(creator)
when(webdriver).Remote(
command_executor=url,
browser_profile=None,
desired_capabilities=capabilities,
options=None,
file_detector=file_detector,
).thenReturn(expected_webdriver)
driver = creator.create_ie({"capabilities": {"key": "value"}}, url)
assert driver == expected_webdriver
def test_edge(creator):
executable_path = "MicrosoftWebDriver.exe"
expected_webdriver = mock()
when(webdriver).Edge(
service_log_path=None, executable_path=executable_path
).thenReturn(expected_webdriver)
when(creator)._has_options(ANY).thenReturn(False)
driver = creator.create_edge({}, None)
assert driver == expected_webdriver
def test_edge_remote_no_caps(creator):
url = "http://localhost:4444/wd/hub"
expected_webdriver = mock()
capabilities = webdriver.DesiredCapabilities.EDGE.copy()
file_detector = mock_file_detector(creator)
when(webdriver).Remote(
command_executor=url,
browser_profile=None,
desired_capabilities=capabilities,
options=None,
file_detector=file_detector,
).thenReturn(expected_webdriver)
driver = creator.create_edge({}, url)
assert driver == expected_webdriver
def test_edge_remote_caps(creator):
url = "http://localhost:4444/wd/hub"
expected_webdriver = mock()
capabilities = {"browserName": "MicrosoftEdge"}
file_detector = mock_file_detector(creator)
when(webdriver).Remote(
command_executor=url,
browser_profile=None,
desired_capabilities=capabilities,
options=None,
file_detector=file_detector,
).thenReturn(expected_webdriver)
driver = creator.create_edge({"capabilities": capabilities}, url)
assert driver == expected_webdriver
def test_edge_no_browser_name(creator):
url = "http://localhost:4444/wd/hub"
expected_webdriver = mock()
capabilities = {"browserName": "MicrosoftEdge", "key": "value"}
file_detector = mock_file_detector(creator)
when(webdriver).Remote(
command_executor=url,
browser_profile=None,
desired_capabilities=capabilities,
options=None,
file_detector=file_detector,
).thenReturn(expected_webdriver)
driver = creator.create_edge({"capabilities": {"key": "value"}}, url)
assert driver == expected_webdriver
def test_opera(creator):
expected_webdriver = mock()
executable_path = "operadriver"
when(webdriver).Opera(
options=None, service_log_path=None, executable_path=executable_path
).thenReturn(expected_webdriver)
driver = creator.create_opera({}, None)
assert driver == expected_webdriver
def test_opera_remote_no_caps(creator):
url = "http://localhost:4444/wd/hub"
expected_webdriver = mock()
capabilities = webdriver.DesiredCapabilities.OPERA.copy()
file_detector = mock_file_detector(creator)
when(webdriver).Remote(
command_executor=url,
browser_profile=None,
desired_capabilities=capabilities,
options=None,
file_detector=file_detector,
).thenReturn(expected_webdriver)
driver = creator.create_opera({}, url)
assert driver == expected_webdriver
def test_opera_remote_caps(creator):
url = "http://localhost:4444/wd/hub"
expected_webdriver = mock()
capabilities = {"browserName": "opera"}
file_detector = mock_file_detector(creator)
when(webdriver).Remote(
command_executor=url,
browser_profile=None,
desired_capabilities=capabilities,
options=None,
file_detector=file_detector,
).thenReturn(expected_webdriver)
driver = creator.create_opera({"desired_capabilities": capabilities}, url)
assert driver == expected_webdriver
def test_opera_no_browser_name(creator):
url = "http://localhost:4444/wd/hub"
expected_webdriver = mock()
capabilities = {"browserName": "opera", "key": "value"}
file_detector = mock_file_detector(creator)
when(webdriver).Remote(
command_executor=url,
browser_profile=None,
desired_capabilities=capabilities,
options=None,
file_detector=file_detector,
).thenReturn(expected_webdriver)
driver = creator.create_opera({"desired_capabilities": {"key": "value"}}, url)
assert driver == expected_webdriver
def test_safari(creator):
expected_webdriver = mock()
executable_path = "/usr/bin/safaridriver"
when(webdriver).Safari(executable_path=executable_path).thenReturn(
expected_webdriver
)
driver = creator.create_safari({}, None)
assert driver == expected_webdriver
def test_safari_remote_no_caps(creator):
url = "http://localhost:4444/wd/hub"
expected_webdriver = mock()
file_detector = mock_file_detector(creator)
capabilities = webdriver.DesiredCapabilities.SAFARI.copy()
when(webdriver).Remote(
command_executor=url,
browser_profile=None,
desired_capabilities=capabilities,
options=None,
file_detector=file_detector,
).thenReturn(expected_webdriver)
driver = creator.create_safari({}, url)
assert driver == expected_webdriver
def test_safari_remote_caps(creator):
url = "http://localhost:4444/wd/hub"
expected_webdriver = mock()
capabilities = {"browserName": "safari"}
file_detector = mock_file_detector(creator)
when(webdriver).Remote(
command_executor=url,
browser_profile=None,
desired_capabilities=capabilities,
options=None,
file_detector=file_detector,
).thenReturn(expected_webdriver)
driver = creator.create_safari({"desired_capabilities": capabilities}, url)
assert driver == expected_webdriver
def test_safari_no_broser_name(creator):
file_detector = mock_file_detector(creator)
url = "http://localhost:4444/wd/hub"
expected_webdriver = mock()
capabilities = {"browserName": "safari", "key": "value"}
when(webdriver).Remote(
command_executor=url,
browser_profile=None,
desired_capabilities=capabilities,
options=None,
file_detector=file_detector,
).thenReturn(expected_webdriver)
driver = creator.create_safari({"desired_capabilities": {"key": "value"}}, url)
assert driver == expected_webdriver
def test_phantomjs(creator):
expected_webdriver = mock()
executable_path = "phantomjs"
when(webdriver).PhantomJS(
service_log_path=None, executable_path=executable_path
).thenReturn(expected_webdriver)
driver = creator.create_phantomjs({}, None)
assert driver == expected_webdriver
def test_phantomjs_remote_no_caps(creator):
url = "http://localhost:4444/wd/hub"
expected_webdriver = mock()
capabilities = webdriver.DesiredCapabilities.PHANTOMJS.copy()
file_detector = mock_file_detector(creator)
when(webdriver).Remote(
command_executor=url,
browser_profile=None,
desired_capabilities=capabilities,
options=None,
file_detector=file_detector,
).thenReturn(expected_webdriver)
driver = creator.create_phantomjs({}, url)
assert driver == expected_webdriver
def test_phantomjs_remote_caps(creator):
url = "http://localhost:4444/wd/hub"
expected_webdriver = mock()
capabilities = {"browserName": "phantomjs"}
file_detector = mock_file_detector(creator)
when(webdriver).Remote(
command_executor=url,
browser_profile=None,
desired_capabilities=capabilities,
options=None,
file_detector=file_detector,
).thenReturn(expected_webdriver)
driver = creator.create_phantomjs({"desired_capabilities": capabilities}, url)
assert driver == expected_webdriver
def test_phantomjs_no_browser_name(creator):
url = "http://localhost:4444/wd/hub"
expected_webdriver = mock()
capabilities = {"browserName": "phantomjs", "key": "value"}
file_detector = mock_file_detector(creator)
when(webdriver).Remote(
command_executor=url,
browser_profile=None,
desired_capabilities=capabilities,
options=None,
file_detector=file_detector,
).thenReturn(expected_webdriver)
driver = creator.create_phantomjs({"desired_capabilities": {"key": "value"}}, url)
assert driver == expected_webdriver
def test_htmlunit_no_caps(creator):
caps = webdriver.DesiredCapabilities.HTMLUNIT
expected_webdriver = mock()
file_detector = mock_file_detector(creator)
when(webdriver).Remote(
command_executor="None",
desired_capabilities=caps,
browser_profile=None,
options=None,
file_detector=file_detector,
).thenReturn(expected_webdriver)
driver = creator.create_htmlunit({}, None)
assert driver == expected_webdriver
def test_htmlunit_remote_caps(creator):
caps = {"browserName": "htmlunit"}
expected_webdriver = mock()
file_detector = mock_file_detector(creator)
when(webdriver).Remote(
command_executor="None",
desired_capabilities=caps,
browser_profile=None,
options=None,
file_detector=file_detector,
).thenReturn(expected_webdriver)
driver = creator.create_htmlunit({"desired_capabilities": caps}, None)
assert driver == expected_webdriver
def test_htmlunit_no_browser_name(creator):
capabilities = {"browserName": "htmlunit", "key": "value"}
expected_webdriver = mock()
file_detector = mock_file_detector(creator)
when(webdriver).Remote(
command_executor="None",
desired_capabilities=capabilities,
browser_profile=None,
options=None,
file_detector=file_detector,
).thenReturn(expected_webdriver)
driver = creator.create_htmlunit({"desired_capabilities": {"key": "value"}}, None)
assert driver == expected_webdriver
def test_htmlunit_with_js(creator):
caps = webdriver.DesiredCapabilities.HTMLUNITWITHJS.copy()
expected_webdriver = mock()
file_detector = mock_file_detector(creator)
when(webdriver).Remote(
command_executor="None",
desired_capabilities=caps,
browser_profile=None,
options=None,
file_detector=file_detector,
).thenReturn(expected_webdriver)
driver = creator.create_htmlunit_with_js({}, None)
assert driver == expected_webdriver
def test_htmlunit_with_js_no_browser_name(creator):
capabilities = {"browserName": "htmlunit", "key": "value"}
expected_webdriver = mock()
file_detector = mock_file_detector(creator)
when(webdriver).Remote(
command_executor="None",
desired_capabilities=capabilities,
browser_profile=None,
options=None,
file_detector=file_detector,
).thenReturn(expected_webdriver)
driver = creator.create_htmlunit_with_js(
{"desired_capabilities": {"key": "value"}}, None
)
assert driver == expected_webdriver
def test_android(creator):
caps = webdriver.DesiredCapabilities.ANDROID
expected_webdriver = mock()
file_detector = mock_file_detector(creator)
when(webdriver).Remote(
command_executor="None",
desired_capabilities=caps,
browser_profile=None,
options=None,
file_detector=file_detector,
).thenReturn(expected_webdriver)
driver = creator.create_android({}, None)
assert driver == expected_webdriver
def test_android_no_browser_name(creator):
capabilities = {"browserName": "android", "key": "value"}
expected_webdriver = mock()
file_detector = mock_file_detector(creator)
when(webdriver).Remote(
command_executor="None",
desired_capabilities=capabilities,
browser_profile=None,
options=None,
file_detector=file_detector,
).thenReturn(expected_webdriver)
driver = creator.create_android({"desired_capabilities": {"key": "value"}}, None)
assert driver == expected_webdriver
def test_iphone(creator):
caps = webdriver.DesiredCapabilities.IPHONE
expected_webdriver = mock()
file_detector = mock_file_detector(creator)
when(webdriver).Remote(
command_executor="None",
desired_capabilities=caps,
browser_profile=None,
options=None,
file_detector=file_detector,
).thenReturn(expected_webdriver)
driver = creator.create_iphone({}, None)
assert driver == expected_webdriver
def test_iphone_no_browser_name(creator):
capabilities = {"browserName": "iPhone", "key": "value"}
expected_webdriver = mock()
file_detector = mock_file_detector(creator)
when(webdriver).Remote(
command_executor="None",
desired_capabilities=capabilities,
browser_profile=None,
options=None,
file_detector=file_detector,
).thenReturn(expected_webdriver)
driver = creator.create_iphone({"desired_capabilities": {"key": "value"}}, None)
assert driver == expected_webdriver
def test_create_driver_chrome(creator):
expected_webdriver = mock()
executable_path = "chromedriver"
when(creator)._get_executable_path(ANY).thenReturn(executable_path)
when(webdriver).Chrome(
options=None, service_log_path=None, executable_path=executable_path
).thenReturn(expected_webdriver)
for browser in ["chrome", "googlechrome", "gc"]:
driver = creator.create_driver(browser, None, None)
assert driver == expected_webdriver
def test_create_driver_firefox(creator):
expected_webdriver = mock()
profile = mock()
when(webdriver).FirefoxProfile().thenReturn(profile)
log_file = get_geckodriver_log()
executable_path = "geckodriver"
when(creator)._get_executable_path(ANY).thenReturn(executable_path)
when(webdriver).Firefox(
options=None,
service_log_path=log_file,
executable_path=executable_path,
firefox_profile=profile,
).thenReturn(expected_webdriver)
for browser in ["ff", "firefox"]:
driver = creator.create_driver(browser, None, None, None)
assert driver == expected_webdriver
def test_create_driver_ie(creator):
expected_webdriver = mock()
executable_path = "IEDriverServer.exe"
when(creator)._get_executable_path(ANY).thenReturn(executable_path)
when(webdriver).Ie(
options=None, service_log_path=None, executable_path=executable_path
).thenReturn(expected_webdriver)
for browser in ["ie", "Internet Explorer"]:
driver = creator.create_driver(browser, None, None)
assert driver == expected_webdriver
def get_geckodriver_log():
return os.path.join(LOG_DIR, "geckodriver-1.log")
def mock_file_detector(creator):
file_detector = mock()
when(creator)._get_sl_file_detector().thenReturn(file_detector)
return file_detector
| {
"content_hash": "c762951ffa44e2b97344a4e03246936f",
"timestamp": "",
"source": "github",
"line_count": 893,
"max_line_length": 88,
"avg_line_length": 34.19820828667413,
"alnum_prop": 0.6784112118929893,
"repo_name": "rtomac/robotframework-selenium2library",
"id": "27379a04e9918f8f368ae4259964d1721541b23e",
"size": "30539",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "utest/test/keywords/test_webdrivercreator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1739"
},
{
"name": "HTML",
"bytes": "381713"
},
{
"name": "JavaScript",
"bytes": "9665"
},
{
"name": "Python",
"bytes": "283075"
},
{
"name": "RobotFramework",
"bytes": "100940"
}
],
"symlink_target": ""
} |
'''
filenam log_campuse_net.py
fun :log the bupt net work
'''
import urllib2,re,cookielib,urllib
import regex
class log_campuse:
username = ''#username# raw_input('username:')
pwd = ''#raw_input('password:')
url = 'http://10.3.8.211/'
def __init__(self,username,pwd):
#return
self.username = username
self.pwd = pwd
#username='2014140268'
#pwd='103030'
def getresp(self):
cookie = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie))
opener.addheaders.append(('Cookie','myusername='+self.username+';username='+self.username+'; smartdot='+str(self.pwd)+'; pwd='+str(self.pwd)))
urllib2.install_opener(opener)
resp = urllib2.urlopen(self.url)
respinfo = resp.info()
#generate a post data
postdata=urllib.urlencode(
{'DDDDD':str(self.username),
'upass':str(self.pwd),
'savePWD':'0',
'0MKKey':''})
#print postdata;
req = urllib2.Request(self.url,postdata)
req.add_header('User-Agent','Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:40.0) Gecko/20100101 Firefox/40.0')
resp=urllib2.urlopen(req)
info = resp.info()
content = resp.read()
return content
def checkresp(self,content):
succ_confirm='You have successfully logged into our system'
failed_confirm='Ivalid account or password'
if(succ_confirm in content):#log success get again and acquire data
print 'logging successfully'
req = urllib2.Request(self.url)
resp = urllib2.urlopen(req)
content = resp.read()
time,flow,fee = regex.dataproc(content)
#regex.dataproc(content);
print 'time:'+time+' Min'
print 'flow:'+flow+' Kb'
print 'fee:'+fee +''
if(failed_confirm in content):
print failed_confirm
if __name__ =='__main__':
username = raw_input('username:')
pwd = raw_input('password:')
log = log_campuse(username,pwd)
content = log.getresp()
log.checkresp(content)
| {
"content_hash": "70415e0017f489ea23197754edc88294",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 150,
"avg_line_length": 33.78125,
"alnum_prop": 0.589731729879741,
"repo_name": "xuewindy/python-tools",
"id": "69352534ed843f9edbb10dc2ae5413b938db9799",
"size": "2162",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "log_campus_net.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2737"
}
],
"symlink_target": ""
} |
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel
app = QApplication(sys.argv)
# The default constructor has no parent.
# A widget with no parent is a window.
window = QMainWindow()
window.resize(250, 150)
window.setWindowTitle('Hello')
label = QLabel("Hello!", window)
window.show()
# The mainloop of the application. The event handling starts from this point.
# The exec_() method has an underscore. It is because the exec is a Python keyword. And thus, exec_() was used instead.
exit_code = app.exec_()
# The sys.exit() method ensures a clean exit.
# The environment will be informed, how the application ended.
sys.exit(exit_code)
| {
"content_hash": "c399a5cff9c8a6ddc7c238235a5e3972",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 120,
"avg_line_length": 35.75,
"alnum_prop": 0.7604895104895105,
"repo_name": "jeremiedecock/snippets",
"id": "12a515b62d3687d7f79b703d89bdbb0c1ab4a9e7",
"size": "1830",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/pyqt/pyqt5/widget_QLabel.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "AMPL",
"bytes": "4294"
},
{
"name": "Batchfile",
"bytes": "6779"
},
{
"name": "C",
"bytes": "102107"
},
{
"name": "C++",
"bytes": "320943"
},
{
"name": "CMake",
"bytes": "11424"
},
{
"name": "CSS",
"bytes": "21121"
},
{
"name": "Cython",
"bytes": "21"
},
{
"name": "Dockerfile",
"bytes": "1818"
},
{
"name": "Fortran",
"bytes": "633"
},
{
"name": "Gnuplot",
"bytes": "39999"
},
{
"name": "Go",
"bytes": "3166"
},
{
"name": "Groovy",
"bytes": "3009"
},
{
"name": "HTML",
"bytes": "138995"
},
{
"name": "IDL",
"bytes": "43"
},
{
"name": "Java",
"bytes": "120221"
},
{
"name": "JavaScript",
"bytes": "32342"
},
{
"name": "Jinja",
"bytes": "206"
},
{
"name": "Jupyter Notebook",
"bytes": "95991"
},
{
"name": "Lua",
"bytes": "200"
},
{
"name": "M4",
"bytes": "111"
},
{
"name": "MATLAB",
"bytes": "31972"
},
{
"name": "Makefile",
"bytes": "81307"
},
{
"name": "OpenSCAD",
"bytes": "14995"
},
{
"name": "PHP",
"bytes": "94"
},
{
"name": "Perl",
"bytes": "46"
},
{
"name": "Processing",
"bytes": "208"
},
{
"name": "Prolog",
"bytes": "454"
},
{
"name": "Python",
"bytes": "1685966"
},
{
"name": "R",
"bytes": "76"
},
{
"name": "Raku",
"bytes": "43"
},
{
"name": "Ruby",
"bytes": "42"
},
{
"name": "Scheme",
"bytes": "649"
},
{
"name": "Shell",
"bytes": "52865"
},
{
"name": "Smalltalk",
"bytes": "55"
},
{
"name": "TeX",
"bytes": "1189"
},
{
"name": "Vue",
"bytes": "49445"
},
{
"name": "XSLT",
"bytes": "1816"
}
],
"symlink_target": ""
} |
"""
Support for Homekit lights.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.homekit_controller/
"""
import logging
from homeassistant.components.homekit_controller import (
HomeKitEntity, KNOWN_ACCESSORIES)
from homeassistant.components.light import (
ATTR_BRIGHTNESS, ATTR_HS_COLOR, ATTR_COLOR_TEMP, SUPPORT_BRIGHTNESS,
SUPPORT_COLOR, SUPPORT_COLOR_TEMP, Light)
DEPENDENCIES = ['homekit_controller']
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up Homekit lighting."""
if discovery_info is not None:
accessory = hass.data[KNOWN_ACCESSORIES][discovery_info['serial']]
add_entities([HomeKitLight(accessory, discovery_info)], True)
class HomeKitLight(HomeKitEntity, Light):
"""Representation of a Homekit light."""
def __init__(self, *args):
"""Initialise the light."""
super().__init__(*args)
self._on = None
self._brightness = None
self._color_temperature = None
self._hue = None
self._saturation = None
def update_characteristics(self, characteristics):
"""Synchronise light state with Home Assistant."""
# pylint: disable=import-error
from homekit.model.characteristics import CharacteristicsTypes
for characteristic in characteristics:
ctype = characteristic['type']
ctype = CharacteristicsTypes.get_short(ctype)
if ctype == "on":
self._chars['on'] = characteristic['iid']
self._on = characteristic['value']
elif ctype == 'brightness':
self._chars['brightness'] = characteristic['iid']
self._features |= SUPPORT_BRIGHTNESS
self._brightness = characteristic['value']
elif ctype == 'color-temperature':
self._chars['color_temperature'] = characteristic['iid']
self._features |= SUPPORT_COLOR_TEMP
self._color_temperature = characteristic['value']
elif ctype == "hue":
self._chars['hue'] = characteristic['iid']
self._features |= SUPPORT_COLOR
self._hue = characteristic['value']
elif ctype == "saturation":
self._chars['saturation'] = characteristic['iid']
self._features |= SUPPORT_COLOR
self._saturation = characteristic['value']
@property
def is_on(self):
"""Return true if device is on."""
return self._on
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
if self._features & SUPPORT_BRIGHTNESS:
return self._brightness * 255 / 100
return None
@property
def hs_color(self):
"""Return the color property."""
if self._features & SUPPORT_COLOR:
return (self._hue, self._saturation)
return None
@property
def color_temp(self):
"""Return the color temperature."""
if self._features & SUPPORT_COLOR_TEMP:
return self._color_temperature
return None
@property
def supported_features(self):
"""Flag supported features."""
return self._features
def turn_on(self, **kwargs):
"""Turn the specified light on."""
hs_color = kwargs.get(ATTR_HS_COLOR)
temperature = kwargs.get(ATTR_COLOR_TEMP)
brightness = kwargs.get(ATTR_BRIGHTNESS)
characteristics = []
if hs_color is not None:
characteristics.append({'aid': self._aid,
'iid': self._chars['hue'],
'value': hs_color[0]})
characteristics.append({'aid': self._aid,
'iid': self._chars['saturation'],
'value': hs_color[1]})
if brightness is not None:
characteristics.append({'aid': self._aid,
'iid': self._chars['brightness'],
'value': int(brightness * 100 / 255)})
if temperature is not None:
characteristics.append({'aid': self._aid,
'iid': self._chars['color-temperature'],
'value': int(temperature)})
characteristics.append({'aid': self._aid,
'iid': self._chars['on'],
'value': True})
self.put_characteristics(characteristics)
def turn_off(self, **kwargs):
"""Turn the specified light off."""
characteristics = [{'aid': self._aid,
'iid': self._chars['on'],
'value': False}]
self.put_characteristics(characteristics)
| {
"content_hash": "219e1fcf71b6b0759ee9991832d93a34",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 76,
"avg_line_length": 37.80152671755725,
"alnum_prop": 0.5581583198707593,
"repo_name": "tinloaf/home-assistant",
"id": "7c8119f6e89dbe1d180c409ea66b5767e4fd904a",
"size": "4952",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/light/homekit_controller.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1099"
},
{
"name": "Python",
"bytes": "13135313"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17137"
}
],
"symlink_target": ""
} |
from functools import wraps
from plash import utils
from plash.eval import hint, register_macro
def cache_container_hint(cache_key_templ):
def decorator(func):
@wraps(func)
def wrapper(*args):
cache_key = cache_key_templ.format(":".join(args)).replace("/", "%")
container_id = utils.plash_call("map", cache_key)
if not container_id:
container_id = func(*args)
utils.plash_call("map", cache_key, container_id)
return hint("image", container_id)
return wrapper
return decorator
@register_macro()
@cache_container_hint("docker:{}")
def from_docker(image):
"use image from local docker"
return utils.plash_call("import-docker", image)
@register_macro()
@cache_container_hint("lxc:{}")
def from_lxc(image):
"use images from images.linuxcontainers.org"
return utils.plash_call("import-lxc", image)
@register_macro()
@cache_container_hint("url:{}")
def from_url(url):
"import image from an url"
return utils.plash_call("import-url", url)
@register_macro()
def from_id(image):
"specify the image from an image id"
return hint("image", image)
class MapDoesNotExist(Exception):
pass
@register_macro()
def from_map(map_key):
"use resolved map as image"
image_id = utils.plash_call("map", map_key)
if not image_id:
raise MapDoesNotExist("map {} not found".format(repr(map_key)))
return hint("image", image_id)
@register_macro("from")
def from_(image):
"guess from where to take the image"
if image.isdigit():
return from_id(image)
else:
return from_lxc(image)
@register_macro()
@cache_container_hint("github:{}")
def from_github(user_repo_pair, file="plashfile"):
"build and use a file (default 'plashfile') from a github repo"
return utils.plash_call("build", "--eval-github", user_repo_pair, file)
| {
"content_hash": "2bf8a1166b11f797a24a907d4f01a11d",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 80,
"avg_line_length": 25.223684210526315,
"alnum_prop": 0.647887323943662,
"repo_name": "ihucos/plash",
"id": "8d479f1f35f1d0cc463607c22a89dde44b23af72",
"size": "1917",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/python/plash/macros/froms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "30841"
},
{
"name": "HTML",
"bytes": "4493"
},
{
"name": "Makefile",
"bytes": "628"
},
{
"name": "Python",
"bytes": "49927"
},
{
"name": "Shell",
"bytes": "33408"
}
],
"symlink_target": ""
} |
"""
Django settings for nospammail project.
Generated by 'django-admin startproject' using Django 1.10.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'k-+klg*(w#xyhx6(i%h%x)c%7!hek^2bm!@t3!79!ab%)=-bwi'
LOGIN_REDIRECT_URL = '/'
LOGIN_URL = '/login/'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'settings_console.apps.SettingsConsoleConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'nospammail.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'nospammail.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'nospammail',
'USER': 'nospammail',
'PASSWORD': os.environ.get('NOSPAMMAIL_PW', False),
'HOST': os.environ.get('NOSPAMMAIL_HOST', False),
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
USER_HOME_URL = '/'
| {
"content_hash": "1430b74874a075b6350a4f3e7b2eed3c",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 91,
"avg_line_length": 26.184615384615384,
"alnum_prop": 0.6780258519388954,
"repo_name": "jnyborg/nospammail",
"id": "05786ef9822ef3aae8d1a65a6c0e27f73ccf5648",
"size": "3404",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/nospammail/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "4646"
},
{
"name": "Python",
"bytes": "41044"
}
],
"symlink_target": ""
} |
from django.conf import settings
import importlib
# Merge two lots of mesobox-compatible context additions
def merge_context_additions(additions):
context = {}
boxes = {}
for c in additions:
try:
context.update(c.get("context"))
except TypeError:
pass
try:
for k, v in c.get("boxes").items():
if k in boxes:
boxes[k].append(v)
else:
boxes[k] = v
except TypeError:
pass
except AttributeError:
pass
return {"context": context, "boxes": boxes}
def context_processor(request):
additions = {}
# Get the boxes and accompanying context additions from all the installed apps.
for app in settings.INSTALLED_APPS:
try:
module = importlib.import_module(app+".boxes")
except ImportError:
continue
# Run each function now.
for b in module.BOX_INCLUDES:
b_func = getattr(module, b)
if not b_func:
raise Exception("Method %s not implemented in module %s" % (b, app))
additions = merge_context_additions([additions, b_func(request)])
# Merge boxes down to being part of the context additions dict now they have all been assembled
result = additions['context']
result['boxes'] = additions['boxes']
return result
| {
"content_hash": "4ad5adef0242d41ef80f9e73e1835547",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 99,
"avg_line_length": 29.979166666666668,
"alnum_prop": 0.5726198749131342,
"repo_name": "grundleborg/mesosphere",
"id": "7e40178d5608ae8a4241fd14519c704411701759",
"size": "1439",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mesobox/boxes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "123543"
},
{
"name": "Python",
"bytes": "26753"
}
],
"symlink_target": ""
} |
"""Light curve-related functions."""
from __future__ import (absolute_import, unicode_literals, division,
print_function)
import numpy as np
from .base import mp_root, create_gti_mask, cross_gtis, mkdir_p
from .base import contiguous_regions, calc_countrate, gti_len
from .base import _assign_value_if_none, _look_for_array_in_array
from .io import load_events, load_lcurve, save_lcurve
from .io import MP_FILE_EXTENSION, high_precision_keyword_read
import os
import logging
import warnings
def lcurve(event_list,
bin_time,
start_time=None,
stop_time=None,
centertime=True):
"""From a list of event times, estract a lightcurve.
Parameters
----------
event_list : array-like
Times of arrival of events
bin_time : float
Binning time of the light curve
Returns
-------
time : array-like
The time bins of the light curve
lc : array-like
The light curve
Other Parameters
----------------
start_time : float
Initial time of the light curve
stop_time : float
Stop time of the light curve
centertime: bool
If False, time is the start of the bin. Otherwise, the center
"""
start_time = _assign_value_if_none(start_time, np.floor(event_list[0]))
stop_time = _assign_value_if_none(stop_time, np.floor(event_list[-1]))
logging.debug("lcurve: Time limits: %g -- %g" %
(start_time, stop_time))
new_event_list = event_list[event_list >= start_time]
new_event_list = new_event_list[new_event_list <= stop_time]
# To compute the histogram, the times array must specify the bin edges.
# therefore, if nbin is the length of the lightcurve, times will have
# nbin + 1 elements
new_event_list = ((new_event_list - start_time) / bin_time).astype(int)
times = np.arange(start_time, stop_time, bin_time)
lc = np.bincount(new_event_list, minlength=len(times))
logging.debug("lcurve: Length of the lightcurve: %g" % len(times))
logging.debug("Times, kind: %s, %s" % (repr(times), type(times[0])))
logging.debug("Lc, kind: %s, %s" % (repr(lc), type(lc[0])))
logging.debug("bin_time, kind: %s, %s" % (repr(bin_time), type(bin_time)))
if centertime:
times = times + bin_time / 2.
return times, lc.astype(np.float)
def join_lightcurves(lcfilelist, outfile='out_lc' + MP_FILE_EXTENSION):
"""Join light curves from different files.
Light curves from different instruments are put in different channels.
Parameters
----------
lcfilelist :
outfile :
See Also
--------
scrunch_lightcurves : Create a single light curve from input light
curves.
"""
lcdatas = []
for lfc in lcfilelist:
logging.info("Loading file %s..." % lfc)
lcdata = load_lcurve(lfc)
logging.info("Done.")
lcdatas.append(lcdata)
del lcdata
# --------------- Check consistency of data --------------
lcdts = [lcdata['dt'] for lcdata in lcdatas]
# Find unique elements. If multiple bin times are used, throw an exception
lcdts = list(set(lcdts))
assert len(lcdts) == 1, 'Light curves must have same dt for scrunching'
instrs = [lcdata['Instr'] for lcdata in lcdatas]
# Find unique elements. A lightcurve will be produced for each instrument
instrs = list(set(instrs))
outlcs = {}
times = {}
lcs = {}
gtis = {}
for instr in instrs:
outlcs[instr] = {'dt': lcdts[0], 'Tstart': 1e32, 'Tstop': -11,
'MJDref': lcdatas[0]['MJDref'], 'source_ctrate': 0,
'total_ctrate': 0}
times[instr] = []
lcs[instr] = []
gtis[instr] = []
# -------------------------------------------------------
for lcdata in lcdatas:
instr = lcdata['Instr']
tstart = lcdata['Tstart']
tstop = lcdata['Tstop']
if tstart < outlcs[instr]['Tstart']:
outlcs[instr]['Tstart'] = tstart
if tstop > outlcs[instr]['Tstop']:
outlcs[instr]['Tstop'] = tstop
time = lcdata['time']
lc = lcdata['lc']
gti = lcdata['GTI']
times[instr].extend(time)
lcs[instr].extend(lc)
gtis[instr].extend(gti)
goodlen = gti_len(gti)
outlcs[instr]['source_ctrate'] += lcdata['source_ctrate'] * goodlen
outlcs[instr]['total_ctrate'] += lcdata['total_ctrate'] * goodlen
for instr in instrs:
gti = np.array(gtis[instr])
outlcs[instr]['time'] = np.array(times[instr])
outlcs[instr]['lc'] = np.array(lcs[instr])
outlcs[instr]['GTI'] = gti
outlcs[instr]['Instr'] = instr
goodlen = gti_len(gti)
outlcs[instr]['source_ctrate'] /= goodlen
outlcs[instr]['total_ctrate'] /= goodlen
if outfile is not None:
for instr in instrs:
if len(instrs) == 1:
tag = ""
else:
tag = instr
logging.info('Saving joined light curve to %s' % outfile)
dname, fname = os.path.split(outfile)
save_lcurve(outlcs[instr], os.path.join(dname, tag + fname))
return outlcs
def scrunch_lightcurves(lcfilelist, outfile='out_scrlc'+MP_FILE_EXTENSION,
save_joint=False):
"""Create a single light curve from input light curves.
Light curves are appended when they cover different times, and summed when
they fall in the same time range. This is done regardless of the channel
or the instrument.
Parameters
----------
lcfilelist : list of str
The list of light curve files to scrunch
Returns
-------
time : array-like
The time array
lc :
The new light curve
gti : [[gti0_0, gti0_1], [gti1_0, gti1_1], ...]
Good Time Intervals
Other Parameters
----------------
outfile : str
The output file name
save_joint : bool
If True, save the per-channel joint light curves
See Also
--------
join_lightcurves : Join light curves from different files
"""
if save_joint:
lcdata = join_lightcurves(lcfilelist)
else:
lcdata = join_lightcurves(lcfilelist, outfile=None)
instrs = list(lcdata.keys())
gti_lists = [lcdata[inst]['GTI'] for inst in instrs]
gti = cross_gtis(gti_lists)
# Determine limits
time0 = lcdata[instrs[0]]['time']
mask = create_gti_mask(time0, gti)
time0 = time0[mask]
lc0 = lcdata[instrs[0]]['lc']
lc0 = lc0[mask]
for inst in instrs[1:]:
time1 = lcdata[inst]['time']
mask = create_gti_mask(time1, gti)
time1 = time1[mask]
assert np.all(time0 == time1), \
'Something is not right with gti filtering'
lc = lcdata[inst]['lc']
lc0 += lc[mask]
out = lcdata[instrs[0]].copy()
out['lc'] = lc0
out['time'] = time0
out['dt'] = lcdata[instrs[0]]['dt']
out['GTI'] = gti
out['Instr'] = ",".join(instrs)
out['source_ctrate'] = np.sum([lcdata[i]['source_ctrate'] for i in instrs])
out['total_ctrate'] = np.sum([lcdata[i]['total_ctrate'] for i in instrs])
logging.info('Saving scrunched light curve to %s' % outfile)
save_lcurve(out, outfile)
return time0, lc0, gti
def filter_lc_gtis(time, lc, gti, safe_interval=None, delete=False,
min_length=0, return_borders=False):
"""Filter a light curve for GTIs.
Parameters
----------
time : array-like
The time bins of the light curve
lc : array-like
The light curve
gti : [[gti0_0, gti0_1], [gti1_0, gti1_1], ...]
Good Time Intervals
Returns
-------
time : array-like
The time bins of the light curve
lc : array-like
The output light curve
newgtis : [[gti0_0, gti0_1], [gti1_0, gti1_1], ...]
The output Good Time Intervals
borders : [[i0_0, i0_1], [i1_0, i1_1], ...], optional
The indexes of the light curve corresponding to the borders of the
GTIs. Returned if return_borders is set to True
Other Parameters
----------------
safe_interval : float or [float, float]
Seconds to filter out at the start and end of each GTI. If single
float, these safe windows are equal, otherwise the two numbers refer
to the start and end of the GTI respectively
delete : bool
If delete is True, the intervals outside of GTIs are filtered out from
the light curve. Otherwise, they are set to zero.
min_length : float
Minimum length of GTI. GTIs below this length will be removed.
return_borders : bool
If True, return also the indexes of the light curve corresponding to
the borders of the GTIs
"""
mask, newgtis = create_gti_mask(time, gti, return_new_gtis=True,
safe_interval=safe_interval,
min_length=min_length)
nomask = np.logical_not(mask)
if delete:
time = time[mask]
lc = lc[mask]
else:
lc[nomask] = 0
if return_borders:
mask = create_gti_mask(time, newgtis)
borders = contiguous_regions(mask)
return time, lc, newgtis, borders
else:
return time, lc, newgtis
def lcurve_from_events(f, safe_interval=0,
pi_interval=None,
e_interval=None,
min_length=0,
gti_split=False,
ignore_gtis=False,
bintime=1.,
outdir=None,
outfile=None,
noclobber=False):
"""Bin an event list in a light curve.
Parameters
----------
f : str
Input event file name
bintime : float
The bin time of the output light curve
Returns
-------
outfiles : list
List of output light curves
Other Parameters
----------------
safe_interval : float or [float, float]
Seconds to filter out at the start and end of each GTI. If single
float, these safe windows are equal, otherwise the two numbers refer
to the start and end of the GTI respectively
pi_interval : [int, int]
PI channel interval to select. Default None, meaning that all PI
channels are used
e_interval : [float, float]
Energy interval to select (only works if event list is calibrated with
`calibrate`). Default None
min_length : float
GTIs below this length will be filtered out
gti_split : bool
If True, create one light curve for each good time interval
ignore_gtis : bool
Ignore good time intervals, and get a single light curve that includes
possible gaps
outdir : str
Output directory
outfile : str
Output file
noclobber : bool
If True, do not overwrite existing files
"""
logging.info("Loading file %s..." % f)
evdata = load_events(f)
logging.info("Done.")
if bintime < 0:
bintime = 2 ** (bintime)
bintime = np.longdouble(bintime)
tag = ''
out = {}
tstart = evdata['Tstart']
tstop = evdata['Tstop']
events = evdata['time']
instr = evdata['Instr']
mjdref = evdata['MJDref']
if instr == 'PCA':
pcus = evdata['PCU']
gtis = evdata['GTI']
if ignore_gtis:
gtis = np.array([[tstart, tstop]])
out['MJDref'] = mjdref
# make tstart and tstop multiples of bin times since MJDref
tstart = np.ceil(tstart / bintime, dtype=np.longdouble) * bintime
tstop = np.floor(tstop / bintime, dtype=np.longdouble) * bintime
# First of all, calculate total count rate (no filtering applied)
tot_time, tot_lc = lcurve(events, bintime, start_time=tstart,
stop_time=tstop)
tot_time, tot_lc, newgtis, tot_borders = \
filter_lc_gtis(tot_time, tot_lc, gtis,
safe_interval=safe_interval,
delete=False,
min_length=min_length,
return_borders=True)
out['total_ctrate'] = calc_countrate(tot_time, tot_lc, bintime=bintime)
# Then, apply filters
if pi_interval is not None and np.all(np.array(pi_interval) > 0):
pis = evdata['PI']
good = np.logical_and(pis > pi_interval[0],
pis <= pi_interval[1])
events = events[good]
tag = '_PI%g-%g' % (pi_interval[0], pi_interval[1])
out['PImin'] = e_interval[0]
out['PImax'] = e_interval[0]
elif e_interval is not None and np.all(np.array(e_interval) > 0):
try:
es = evdata['E']
except:
raise \
ValueError("No energy information is present in the file." +
" Did you run MPcalibrate?")
good = np.logical_and(es > e_interval[0],
es <= e_interval[1])
events = events[good]
tag = '_E%g-%g' % (e_interval[0], e_interval[1])
out['Emin'] = e_interval[0]
out['Emax'] = e_interval[1]
# Assign default value if None
outfile = _assign_value_if_none(outfile, mp_root(f) + tag + '_lc')
# Take out extension from name, if present, then give extension. This
# avoids multiple extensions
outfile = outfile.replace(MP_FILE_EXTENSION, '') + MP_FILE_EXTENSION
outdir = _assign_value_if_none(
outdir, os.path.dirname(os.path.abspath(f)))
_, outfile = os.path.split(outfile)
mkdir_p(outdir)
outfile = os.path.join(outdir, outfile)
if noclobber and os.path.exists(outfile):
warnings.warn('File exists, and noclobber option used. Skipping')
return [outfile]
time, lc = lcurve(events, bintime, start_time=tstart,
stop_time=tstop)
time, lc, newgtis, borders = \
filter_lc_gtis(time, lc, gtis,
safe_interval=safe_interval,
delete=False,
min_length=min_length,
return_borders=True)
if len(newgtis) == 0:
warnings.warn(
"No GTIs above min_length ({0}s) found.".format(min_length))
return
assert np.all(borders == tot_borders), \
'Borders do not coincide: {0} {1}'.format(borders, tot_borders)
out['source_ctrate'] = calc_countrate(time, lc, gtis=newgtis,
bintime=bintime)
if gti_split:
outfiles = []
logging.debug(borders)
for ib, b in enumerate(borders):
local_tag = tag + '_gti%d' % ib
outf = mp_root(outfile) + local_tag + '_lc' + MP_FILE_EXTENSION
if noclobber and os.path.exists(outf):
warnings.warn(
'File exists, and noclobber option used. Skipping')
outfiles.append(outf)
logging.debug(b)
local_out = out.copy()
local_out['lc'] = lc[b[0]:b[1]]
local_out['time'] = time[b[0]:b[1]]
local_out['dt'] = bintime
local_gti = np.array([[time[b[0]], time[b[1]-1]]])
local_out['GTI'] = local_gti
local_out['Tstart'] = time[b[0]]
local_out['Tstop'] = time[b[1]-1]
local_out['Instr'] = instr
local_out['source_ctrate'] = calc_countrate(time[b[0]:b[1]],
lc[b[0]:b[1]],
bintime=bintime)
local_out['total_ctrate'] = calc_countrate(tot_time[b[0]:b[1]],
tot_lc[b[0]:b[1]],
bintime=bintime)
if instr == 'PCA':
local_out['nPCUs'] = len(set(pcus))
logging.info('Saving light curve to %s' % outf)
save_lcurve(local_out, outf)
outfiles.append(outf)
else:
out['lc'] = lc
out['time'] = time
out['dt'] = bintime
out['GTI'] = newgtis
out['Tstart'] = tstart
out['Tstop'] = tstop
out['Instr'] = instr
if instr == 'PCA':
out['nPCUs'] = len(set(pcus))
logging.info('Saving light curve to %s' % outfile)
save_lcurve(out, outfile)
outfiles = [outfile]
# For consistency in return value
return outfiles
def lcurve_from_fits(fits_file, gtistring='GTI',
timecolumn='TIME', ratecolumn=None, ratehdu=1,
fracexp_limit=0.9, outfile=None,
noclobber=False, outdir=None):
"""
Load a lightcurve from a fits file and save it in MaLTPyNT format.
.. note ::
FITS light curve handling is still under testing.
Absolute times might be incorrect depending on the light curve format.
Parameters
----------
fits_file : str
File name of the input light curve in FITS format
Returns
-------
outfile : [str]
Returned as a list with a single element for consistency with
`lcurve_from_events`
Other Parameters
----------------
gtistring : str
Name of the GTI extension in the FITS file
timecolumn : str
Name of the column containing times in the FITS file
ratecolumn : str
Name of the column containing rates in the FITS file
ratehdu : str or int
Name or index of the FITS extension containing the light curve
fracexp_limit : float
Minimum exposure fraction allowed
outfile : str
Output file name
noclobber : bool
If True, do not overwrite existing files
"""
logging.warning(
"""WARNING! FITS light curve handling is still under testing.
Absolute times might be incorrect.""")
# TODO:
# treat consistently TDB, UTC, TAI, etc. This requires some documentation
# reading. For now, we assume TDB
from astropy.io import fits as pf
from astropy.time import Time
import numpy as np
from .base import create_gti_from_condition
outfile = _assign_value_if_none(outfile, mp_root(fits_file) + '_lc')
outfile = outfile.replace(MP_FILE_EXTENSION, '') + MP_FILE_EXTENSION
outdir = _assign_value_if_none(
outdir, os.path.dirname(os.path.abspath(fits_file)))
_, outfile = os.path.split(outfile)
mkdir_p(outdir)
outfile = os.path.join(outdir, outfile)
if noclobber and os.path.exists(outfile):
warnings.warn('File exists, and noclobber option used. Skipping')
return [outfile]
lchdulist = pf.open(fits_file)
lctable = lchdulist[ratehdu].data
# Units of header keywords
tunit = lchdulist[ratehdu].header['TIMEUNIT']
try:
mjdref = high_precision_keyword_read(lchdulist[ratehdu].header,
'MJDREF')
mjdref = Time(mjdref, scale='tdb', format='mjd')
except:
mjdref = None
try:
instr = lchdulist[ratehdu].header['INSTRUME']
except:
instr = 'EXTERN'
# ----------------------------------------------------------------
# Trying to comply with all different formats of fits light curves.
# It's a madness...
try:
tstart = high_precision_keyword_read(lchdulist[ratehdu].header,
'TSTART')
tstop = high_precision_keyword_read(lchdulist[ratehdu].header,
'TSTOP')
except:
raise(Exception('TSTART and TSTOP need to be specified'))
# For nulccorr lcs this whould work
try:
timezero = high_precision_keyword_read(lchdulist[ratehdu].header,
'TIMEZERO')
# Sometimes timezero is "from tstart", sometimes it's an absolute time.
# This tries to detect which case is this, and always consider it
# referred to tstart
except:
timezero = 0
# for lcurve light curves this should instead work
if tunit == 'd':
# TODO:
# Check this. For now, I assume TD (JD - 2440000.5).
# This is likely wrong
timezero = Time(2440000.5 + timezero, scale='tdb', format='jd')
tstart = Time(2440000.5 + tstart, scale='tdb', format='jd')
tstop = Time(2440000.5 + tstop, scale='tdb', format='jd')
# if None, use NuSTAR defaulf MJDREF
mjdref = _assign_value_if_none(
mjdref, Time(np.longdouble('55197.00076601852'), scale='tdb',
format='mjd'))
timezero = (timezero - mjdref).to('s').value
tstart = (tstart - mjdref).to('s').value
tstop = (tstop - mjdref).to('s').value
if timezero > tstart:
timezero -= tstart
time = np.array(lctable.field(timecolumn), dtype=np.longdouble)
if time[-1] < tstart:
time += timezero + tstart
else:
time += timezero
try:
dt = high_precision_keyword_read(lchdulist[ratehdu].header,
'TIMEDEL')
if tunit == 'd':
dt *= 86400
except:
warnings.warn('Assuming that TIMEDEL is the difference between the'
' first two times of the light curve')
dt = time[1] - time[0]
# ----------------------------------------------------------------
ratecolumn = _assign_value_if_none(
ratecolumn,
_look_for_array_in_array(['RATE', 'RATE1', 'COUNTS'], lctable.names))
rate = np.array(lctable.field(ratecolumn), dtype=np.float)
try:
rate_e = np.array(lctable.field('ERROR'), dtype=np.longdouble)
except:
rate_e = np.zeros_like(rate)
if 'RATE' in ratecolumn:
rate *= dt
rate_e *= dt
try:
fracexp = np.array(lctable.field('FRACEXP'), dtype=np.longdouble)
except:
fracexp = np.ones_like(rate)
good_intervals = np.logical_and(fracexp >= fracexp_limit, fracexp <= 1)
good_intervals = (rate == rate) * (fracexp >= fracexp_limit) * \
(fracexp <= 1)
rate[good_intervals] /= fracexp[good_intervals]
rate_e[good_intervals] /= fracexp[good_intervals]
rate[np.logical_not(good_intervals)] = 0
try:
gtitable = lchdulist[gtistring].data
gti_list = np.array([[a, b]
for a, b in zip(gtitable.field('START'),
gtitable.field('STOP'))],
dtype=np.longdouble)
except:
gti_list = create_gti_from_condition(time, good_intervals)
lchdulist.close()
out = {}
out['lc'] = rate
out['elc'] = rate_e
out['time'] = time
out['dt'] = dt
out['GTI'] = gti_list
out['Tstart'] = tstart
out['Tstop'] = tstop
out['Instr'] = instr
out['MJDref'] = mjdref.value
out['total_ctrate'] = calc_countrate(time, rate, gtis=gti_list,
bintime=dt)
out['source_ctrate'] = calc_countrate(time, rate, gtis=gti_list,
bintime=dt)
logging.info('Saving light curve to %s' % outfile)
save_lcurve(out, outfile)
return [outfile]
def lcurve_from_txt(txt_file, outfile=None,
noclobber=False, outdir=None):
"""
Load a lightcurve from a text file.
Parameters
----------
txt_file : str
File name of the input light curve in text format. Assumes two columns:
time, counts. Times are seconds from MJDREF 55197.00076601852 (NuSTAR).
Returns
-------
outfile : [str]
Returned as a list with a single element for consistency with
`lcurve_from_events`
Other Parameters
----------------
outfile : str
Output file name
noclobber : bool
If True, do not overwrite existing files
"""
import numpy as np
outfile = _assign_value_if_none(outfile, mp_root(txt_file) + '_lc')
outfile = outfile.replace(MP_FILE_EXTENSION, '') + MP_FILE_EXTENSION
outdir = _assign_value_if_none(
outdir, os.path.dirname(os.path.abspath(txt_file)))
_, outfile = os.path.split(outfile)
mkdir_p(outdir)
outfile = os.path.join(outdir, outfile)
if noclobber and os.path.exists(outfile):
warnings.warn('File exists, and noclobber option used. Skipping')
return [outfile]
time, lc = np.genfromtxt(txt_file, delimiter=' ', unpack=True)
time = np.array(time, dtype=np.longdouble)
lc = np.array(lc, dtype=np.float)
dt = time[1] - time[0]
out = {}
out['lc'] = lc
out['time'] = time
out['dt'] = dt
gtis = np.array([[time[0] - dt / 2, time[-1] + dt / 2]])
out['GTI'] = gtis
out['Tstart'] = time[0] - dt / 2
out['Tstop'] = time[-1] + dt / 2
out['Instr'] = 'EXTERN'
out['MJDref'] = np.longdouble('55197.00076601852')
out['total_ctrate'] = calc_countrate(time, lc, gtis=gtis,
bintime=dt)
out['source_ctrate'] = calc_countrate(time, lc, gtis=gtis,
bintime=dt)
logging.info('Saving light curve to %s' % outfile)
save_lcurve(out, outfile)
return [outfile]
def _wrap_lc(args):
f, kwargs = args
try:
return lcurve_from_events(f, **kwargs)
except Exception as e:
warnings.warn("MPlcurve exception: {0}".format(str(e)))
return []
def _wrap_txt(args):
f, kwargs = args
try:
return lcurve_from_txt(f, **kwargs)
except Exception as e:
warnings.warn("MPlcurve exception: {0}".format(str(e)))
return []
def _wrap_fits(args):
f, kwargs = args
try:
return lcurve_from_fits(f, **kwargs)
except Exception as e:
warnings.warn("MPlcurve exception: {0}".format(str(e)))
def main(args=None):
"""Main function called by the `MPlcurve` command line script."""
import argparse
from multiprocessing import Pool
description = ('Create lightcurves starting from event files. It is '
'possible to specify energy or channel filtering options')
parser = argparse.ArgumentParser(description=description)
parser.add_argument("files", help="List of files", nargs='+')
parser.add_argument("-b", "--bintime", type=float, default=1,
help="Bin time; if negative, negative power of 2")
parser.add_argument("--safe-interval", nargs=2, type=float,
default=[0, 0],
help="Interval at start and stop of GTIs used" +
" for filtering")
parser.add_argument("--pi-interval", type=int, default=[-1, -1],
nargs=2,
help="PI interval used for filtering")
parser.add_argument('-e', "--e-interval", type=float, default=[-1, -1],
nargs=2,
help="Energy interval used for filtering")
parser.add_argument("-s", "--scrunch",
help="Create scrunched light curve (single channel)",
default=False,
action="store_true")
parser.add_argument("-j", "--join",
help="Create joint light curve (multiple channels)",
default=False,
action="store_true")
parser.add_argument("-g", "--gti-split",
help="Split light curve by GTI",
default=False,
action="store_true")
parser.add_argument("--minlen",
help="Minimum length of acceptable GTIs (default:4)",
default=4, type=float)
parser.add_argument("--ignore-gtis",
help="Ignore GTIs",
default=False,
action="store_true")
parser.add_argument("-d", "--outdir", type=str, default=None,
help='Output directory')
parser.add_argument("-o", "--outfile", type=str, default=None,
help='Output file name')
parser.add_argument("--loglevel",
help=("use given logging level (one between INFO, "
"WARNING, ERROR, CRITICAL, DEBUG; "
"default:WARNING)"),
default='WARNING',
type=str)
parser.add_argument("--nproc",
help=("Number of processors to use"),
default=1,
type=int)
parser.add_argument("--debug", help="use DEBUG logging level",
default=False, action='store_true')
parser.add_argument("--noclobber", help="Do not overwrite existing files",
default=False, action='store_true')
parser.add_argument("--fits-input",
help="Input files are light curves in FITS format",
default=False, action='store_true')
parser.add_argument("--txt-input",
help="Input files are light curves in txt format",
default=False, action='store_true')
args = parser.parse_args(args)
if args.debug:
args.loglevel = 'DEBUG'
bintime = args.bintime
numeric_level = getattr(logging, args.loglevel.upper(), None)
logging.basicConfig(filename='MPlcurve.log', level=numeric_level,
filemode='w')
safe_interval = args.safe_interval
pi_interval = np.array(args.pi_interval)
e_interval = np.array(args.e_interval)
# ------ Use functools.partial to wrap lcurve* with relevant keywords---
if args.fits_input:
wrap_fun = _wrap_fits
argdict = {"noclobber": args.noclobber}
elif args.txt_input:
wrap_fun = _wrap_txt
argdict = {"noclobber": args.noclobber}
else:
wrap_fun = _wrap_lc
argdict = {"noclobber": args.noclobber, "safe_interval": safe_interval,
"pi_interval": pi_interval,
"e_interval": e_interval,
"min_length": args.minlen,
"gti_split": args.gti_split,
"ignore_gtis": args.ignore_gtis,
"bintime": bintime, "outdir": args.outdir}
arglist = [[f, argdict.copy()] for f in args.files]
na = len(arglist)
outfile = args.outfile
if outfile is not None:
outname = os.path.splitext(outfile)[0]
for i in range(na):
if na > 1:
outname = outfile + "_{0}".format(i)
arglist[i][1]['outfile'] = outname
# -------------------------------------------------------------------------
outfiles = []
if os.name == 'nt' or args.nproc == 1:
for a in arglist:
outfiles.append(wrap_fun(a))
else:
pool = Pool(processes=args.nproc)
for i in pool.imap_unordered(wrap_fun, arglist):
outfiles.append(i)
pool.close()
logging.debug(outfiles)
if args.scrunch:
scrunch_lightcurves(outfiles)
if args.join:
join_lightcurves(outfiles)
def scrunch_main(args=None):
"""Main function called by the `MPscrunchlc` command line script."""
import argparse
description = \
'Sum lightcurves from different instruments or energy ranges'
parser = argparse.ArgumentParser(description=description)
parser.add_argument("files", help="List of files", nargs='+')
parser.add_argument("-o", "--out", type=str,
default="out_scrlc" + MP_FILE_EXTENSION,
help='Output file')
parser.add_argument("--loglevel",
help=("use given logging level (one between INFO, "
"WARNING, ERROR, CRITICAL, DEBUG; "
"default:WARNING)"),
default='WARNING',
type=str)
parser.add_argument("--debug", help="use DEBUG logging level",
default=False, action='store_true')
args = parser.parse_args(args)
files = args.files
if args.debug:
args.loglevel = 'DEBUG'
numeric_level = getattr(logging, args.loglevel.upper(), None)
logging.basicConfig(filename='MPscrunchlc.log', level=numeric_level,
filemode='w')
scrunch_lightcurves(files, args.out)
| {
"content_hash": "4a97e23f40902628064101d725243b6c",
"timestamp": "",
"source": "github",
"line_count": 956,
"max_line_length": 79,
"avg_line_length": 34.12029288702929,
"alnum_prop": 0.5538183267420829,
"repo_name": "matteobachetti/MaLTPyNT",
"id": "a929968d62564863669e69b0bc0c23608a865054",
"size": "32683",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "maltpynt/lcurve.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "286130"
}
],
"symlink_target": ""
} |
def extractLazyladytranslationsBlogspotCom(item):
'''
Parser for 'lazyladytranslations.blogspot.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| {
"content_hash": "24081c7e4cc34d9d84cc33d4972d9dee",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 104,
"avg_line_length": 27.476190476190474,
"alnum_prop": 0.6464471403812825,
"repo_name": "fake-name/ReadableWebProxy",
"id": "29e6e1eb1cd673d69445a3e6ff6d266989c26c83",
"size": "578",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractLazyladytranslationsBlogspotCom.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
} |
from openstack import exceptions
from openstack.tests import fakes
from openstack.tests.unit import base
class TestMicroversions(base.TestCase):
def setUp(self):
super(TestMicroversions, self).setUp()
self.use_compute_discovery()
def test_get_bad_inferred_max_microversion(self):
self.cloud.config.config['compute_api_version'] = '2.61'
self.assertRaises(
exceptions.ConfigException,
self.cloud.get_server, 'doesNotExist',
)
self.assert_calls()
def test_get_bad_default_max_microversion(self):
self.cloud.config.config['compute_default_microversion'] = '2.61'
self.assertRaises(
exceptions.ConfigException,
self.cloud.get_server, 'doesNotExist',
)
self.assert_calls()
def test_get_bad_inferred_min_microversion(self):
self.cloud.config.config['compute_api_version'] = '2.7'
self.assertRaises(
exceptions.ConfigException,
self.cloud.get_server, 'doesNotExist',
)
self.assert_calls()
def test_get_bad_default_min_microversion(self):
self.cloud.config.config['compute_default_microversion'] = '2.7'
self.assertRaises(
exceptions.ConfigException,
self.cloud.get_server, 'doesNotExist',
)
self.assert_calls()
def test_inferred_default_microversion(self):
self.cloud.config.config['compute_api_version'] = '2.42'
server1 = fakes.make_fake_server('123', 'mickey')
server2 = fakes.make_fake_server('345', 'mouse')
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'detail']),
request_headers={'OpenStack-API-Version': 'compute 2.42'},
json={'servers': [server1, server2]}),
])
r = self.cloud.get_server('mickey', bare=True)
self.assertIsNotNone(r)
self.assertEqual(server1['name'], r['name'])
self.assert_calls()
def test_default_microversion(self):
self.cloud.config.config['compute_default_microversion'] = '2.42'
server1 = fakes.make_fake_server('123', 'mickey')
server2 = fakes.make_fake_server('345', 'mouse')
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'detail']),
request_headers={'OpenStack-API-Version': 'compute 2.42'},
json={'servers': [server1, server2]}),
])
r = self.cloud.get_server('mickey', bare=True)
self.assertIsNotNone(r)
self.assertEqual(server1['name'], r['name'])
self.assert_calls()
def test_conflicting_implied_and_direct(self):
self.cloud.config.config['compute_default_microversion'] = '2.7'
self.cloud.config.config['compute_api_version'] = '2.13'
self.assertRaises(exceptions.ConfigException, self.cloud.get_server)
# We should fail before we even authenticate
self.assertEqual(0, len(self.adapter.request_history))
| {
"content_hash": "7e0db3b83888ee90daf749d4acc04188",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 76,
"avg_line_length": 30.27358490566038,
"alnum_prop": 0.6001869741352446,
"repo_name": "openstack/python-openstacksdk",
"id": "67dc79d1a78adf7244c4c994472af02f2b4e3de8",
"size": "3755",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "openstack/tests/unit/test_microversions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3804005"
},
{
"name": "Shell",
"bytes": "9027"
}
],
"symlink_target": ""
} |
import imp
import os
import io
import sys
from mock import patch
from gp_unittest import GpTestCase
class GpSshTestCase(GpTestCase):
def setUp(self):
# because gpssh does not have a .py extension, we have to use imp to import it
# if we had a gpssh.py, this is equivalent to:
# import gpssh
# self.subject = gpssh
gpssh_file = os.path.abspath(os.path.dirname(__file__) + "/../../../gpssh")
self.subject = imp.load_source('gpssh', gpssh_file)
sys.argv = []
@patch('sys.exit')
def test_when_run_without_args_prints_help_text(self, sys_exit_mock):
sys_exit_mock.side_effect = Exception("on purpose")
# GOOD_MOCK_EXAMPLE of stdout
with patch('sys.stdout', new=io.BytesIO()) as mock_stdout:
with self.assertRaisesRegexp(Exception, "on purpose"):
self.subject.main()
self.assertIn('gpssh -- ssh access to multiple hosts at once', mock_stdout.getvalue())
@patch('sys.exit')
def test_happy_ssh_to_localhost_succeeds(self, sys_mock):
sys.argv = ['', '-h', 'localhost', 'uptime']
self.subject.main()
sys_mock.assert_called_with(0)
| {
"content_hash": "f62a8e9291c1d062f047e521e11ff27c",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 94,
"avg_line_length": 33.138888888888886,
"alnum_prop": 0.6236378876781223,
"repo_name": "kaknikhil/gpdb",
"id": "776a45eca23268387659dcb2f6f12e1225be4e3f",
"size": "1193",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "gpMgmt/bin/gppylib/test/unit/test_unit_gpssh.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5665"
},
{
"name": "Batchfile",
"bytes": "11492"
},
{
"name": "C",
"bytes": "35311132"
},
{
"name": "C++",
"bytes": "3781313"
},
{
"name": "CMake",
"bytes": "17118"
},
{
"name": "CSS",
"bytes": "7407"
},
{
"name": "Csound Score",
"bytes": "179"
},
{
"name": "DTrace",
"bytes": "1160"
},
{
"name": "Fortran",
"bytes": "14777"
},
{
"name": "GDB",
"bytes": "576"
},
{
"name": "Gherkin",
"bytes": "734463"
},
{
"name": "HTML",
"bytes": "191406"
},
{
"name": "Java",
"bytes": "268244"
},
{
"name": "JavaScript",
"bytes": "23969"
},
{
"name": "Lex",
"bytes": "196275"
},
{
"name": "M4",
"bytes": "104559"
},
{
"name": "Makefile",
"bytes": "434729"
},
{
"name": "PLSQL",
"bytes": "261269"
},
{
"name": "PLpgSQL",
"bytes": "5487022"
},
{
"name": "Perl",
"bytes": "3893346"
},
{
"name": "Perl 6",
"bytes": "14377"
},
{
"name": "Python",
"bytes": "8690818"
},
{
"name": "Roff",
"bytes": "51338"
},
{
"name": "Ruby",
"bytes": "26724"
},
{
"name": "SQLPL",
"bytes": "3824391"
},
{
"name": "Shell",
"bytes": "544188"
},
{
"name": "XS",
"bytes": "8405"
},
{
"name": "XSLT",
"bytes": "5779"
},
{
"name": "Yacc",
"bytes": "488932"
}
],
"symlink_target": ""
} |
"""
:mod:`zsl_jwt.configuration`
-------------------------
The configuration of the authentication.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import * # NOQA
class AuthConfiguration(object):
"""
Auth module configuration. It holds the string identifying the
authentication service.
"""
def __init__(self, authentication_service_class):
# type: (str)->None
self._authentication_service_class = authentication_service_class
@property
def authentication_service_class(self):
# type:()->str
"""
The string identifying the class implementing the
:class:`zsl_jwt.auth.service.AuthenticationService` which is used
for login controller.
"""
return self._authentication_service_class
| {
"content_hash": "b5d5ada70f4cb15126254ca8c58d0cca",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 73,
"avg_line_length": 26.705882352941178,
"alnum_prop": 0.6585903083700441,
"repo_name": "AtteqCom/zsl_jwt",
"id": "31b899ebd927f52a496ce736582a174b6fbf752f",
"size": "908",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/zsl_jwt/auth/configuration.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "29141"
}
],
"symlink_target": ""
} |
from azure.identity import DefaultAzureCredential
from azure.mgmt.devcenter import DevCenterMgmtClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-devcenter
# USAGE
python list_usages.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = DevCenterMgmtClient(
credential=DefaultAzureCredential(),
subscription_id="0ac520ee-14c0-480f-b6c9-0a90c58ffff",
)
response = client.usages.list_by_location(
location="westus",
)
for item in response:
print(item)
# x-ms-original-file: specification/devcenter/resource-manager/Microsoft.DevCenter/preview/2022-11-11-preview/examples/Usages_ListByLocation.json
if __name__ == "__main__":
main()
| {
"content_hash": "558b005989f3f126c1eee56abba732da",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 145,
"avg_line_length": 32.333333333333336,
"alnum_prop": 0.7291471415182755,
"repo_name": "Azure/azure-sdk-for-python",
"id": "bf60f206c42edc0f0782a1ab0b9bb3ebc6667f41",
"size": "1535",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/devcenter/azure-mgmt-devcenter/generated_samples/list_usages.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from panda3d.core import Datagram
import CatalogItem
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from otp.otpbase import OTPLocalizer
from direct.interval.IntervalGlobal import *
from toontown.estate import GardenGlobals
from direct.actor import Actor
class CatalogGardenItem(CatalogItem.CatalogItem):
sequenceNumber = 0
def makeNewItem(self, itemIndex = 0, count = 3, tagCode = 1):
self.gardenIndex = itemIndex
self.numItems = count
self.giftCode = tagCode
CatalogItem.CatalogItem.makeNewItem(self)
def getPurchaseLimit(self):
if self.gardenIndex == GardenGlobals.GardenAcceleratorSpecial:
return 1
else:
return 100
def getAcceptItemErrorText(self, retcode):
if retcode == ToontownGlobals.P_ItemAvailable:
return TTLocalizer.CatalogAcceptGarden
return CatalogItem.CatalogItem.getAcceptItemErrorText(self, retcode)
def saveHistory(self):
return 1
def getTypeName(self):
return TTLocalizer.GardenTypeName
def getName(self):
return GardenGlobals.Specials[self.gardenIndex]['photoName']
def recordPurchase(self, avatar, optional):
if avatar:
avatar.addGardenItem(self.gardenIndex, self.numItems)
return ToontownGlobals.P_ItemAvailable
def getPicture(self, avatar):
photoModel = GardenGlobals.Specials[self.gardenIndex]['photoModel']
if 'photoAnimation' in GardenGlobals.Specials[self.gardenIndex]:
modelPath = photoModel + GardenGlobals.Specials[self.gardenIndex]['photoAnimation'][0]
animationName = GardenGlobals.Specials[self.gardenIndex]['photoAnimation'][1]
animationPath = photoModel + animationName
self.model = Actor.Actor()
self.model.loadModel(modelPath)
self.model.loadAnims(dict([[animationName, animationPath]]))
if settings['smoothAnimations']:
self.model.setBlend(frameBlend=True)
frame, ival = self.makeFrameModel(self.model, 0)
ival = ActorInterval(self.model, animationName, 2.0)
photoPos = GardenGlobals.Specials[self.gardenIndex]['photoPos']
frame.setPos(photoPos)
photoScale = GardenGlobals.Specials[self.gardenIndex]['photoScale']
self.model.setScale(photoScale)
self.hasPicture = True
return (frame, ival)
else:
self.model = loader.loadModel(photoModel)
frame = self.makeFrame()
self.model.reparentTo(frame)
photoPos = GardenGlobals.Specials[self.gardenIndex]['photoPos']
self.model.setPos(*photoPos)
photoScale = GardenGlobals.Specials[self.gardenIndex]['photoScale']
self.model.setScale(photoScale)
self.hasPicture = True
return (frame, None)
return None
def cleanupPicture(self):
CatalogItem.CatalogItem.cleanupPicture(self)
if hasattr(self, 'model') and self.model:
self.model.detachNode()
self.model = None
return
def output(self, store = -1):
return 'CatalogGardenItem(%s%s)' % (self.gardenIndex, self.formatOptionalData(store))
def compareTo(self, other):
return 0
def getHashContents(self):
return self.gardenIndex
def getBasePrice(self):
beanCost = GardenGlobals.Specials[self.gardenIndex]['beanCost']
return beanCost
def decodeDatagram(self, di, versionNumber, store):
CatalogItem.CatalogItem.decodeDatagram(self, di, versionNumber, store)
self.gardenIndex = di.getUint8()
self.numItems = di.getUint8()
def encodeDatagram(self, dg, store):
CatalogItem.CatalogItem.encodeDatagram(self, dg, store)
dg.addUint8(self.gardenIndex)
dg.addUint8(self.numItems)
def getRequestPurchaseErrorText(self, retcode):
retval = CatalogItem.CatalogItem.getRequestPurchaseErrorText(self, retcode)
origText = retval
if retval == TTLocalizer.CatalogPurchaseItemAvailable or retval == TTLocalizer.CatalogPurchaseItemOnOrder:
recipeKey = GardenGlobals.getRecipeKeyUsingSpecial(self.gardenIndex)
if not recipeKey == -1:
retval += GardenGlobals.getPlantItWithString(self.gardenIndex)
if self.gardenIndex == GardenGlobals.GardenAcceleratorSpecial:
if GardenGlobals.ACCELERATOR_USED_FROM_SHTIKER_BOOK:
retval = origText
retval += TTLocalizer.UseFromSpecialsTab
retval += TTLocalizer.MakeSureWatered
return retval
def getRequestPurchaseErrorTextTimeout(self):
return 20
def getDeliveryTime(self):
if self.gardenIndex == GardenGlobals.GardenAcceleratorSpecial:
return 1440
else:
return 0
def getPurchaseLimit(self):
if self.gardenIndex == GardenGlobals.GardenAcceleratorSpecial:
return 1
else:
return 0
def compareTo(self, other):
if self.gardenIndex != other.gardenIndex:
return self.gardenIndex - other.gardenIndex
return self.gardenIndex - other.gardenIndex
def reachedPurchaseLimit(self, avatar):
if avatar.onOrder.count(self) != 0:
return 1
if avatar.mailboxContents.count(self) != 0:
return 1
for specials in avatar.getGardenSpecials():
if specials[0] == self.gardenIndex:
if self.gardenIndex == GardenGlobals.GardenAcceleratorSpecial:
return 1
return 0
def isSkillTooLow(self, avatar):
recipeKey = GardenGlobals.getRecipeKeyUsingSpecial(self.gardenIndex)
recipe = GardenGlobals.Recipes[recipeKey]
numBeansRequired = len(recipe['beans'])
canPlant = avatar.getBoxCapability()
result = False
if canPlant < numBeansRequired:
result = True
if not result and self.gardenIndex in GardenGlobals.Specials and 'minSkill' in GardenGlobals.Specials[self.gardenIndex]:
minSkill = GardenGlobals.Specials[self.gardenIndex]['minSkill']
if avatar.shovelSkill < minSkill:
result = True
else:
result = False
return result
def noGarden(self, avatar):
return not avatar.getGardenStarted()
def isGift(self):
return 0 | {
"content_hash": "7d2a1afce6d99ea361fd55883f0ff389",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 128,
"avg_line_length": 39.32748538011696,
"alnum_prop": 0.6370260223048327,
"repo_name": "DedMemez/ODS-August-2017",
"id": "07c38af41364f1b5e0e775f8f578b4ac86044bce",
"size": "6820",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "catalog/CatalogGardenItem.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "10152014"
},
{
"name": "Shell",
"bytes": "707"
}
],
"symlink_target": ""
} |
def extractWwwBeanylandCom(item):
'''
Parser for 'www.beanyland.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| {
"content_hash": "6866e53957accab9ca7648b1052747fe",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 104,
"avg_line_length": 25.952380952380953,
"alnum_prop": 0.6256880733944954,
"repo_name": "fake-name/ReadableWebProxy",
"id": "714280b8c3d07d6b02fbf116d5f812c8543b0566",
"size": "546",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractWwwBeanylandCom.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
} |
"""Replays web pages under simulated network conditions.
Must be run as administrator (sudo).
To record web pages:
1. Start the program in record mode.
$ sudo ./replay.py --record archive.wpr
2. Load the web pages you want to record in a web browser. It is important to
clear browser caches before this so that all subresources are requested
from the network.
3. Kill the process to stop recording.
To replay web pages:
1. Start the program in replay mode with a previously recorded archive.
$ sudo ./replay.py archive.wpr
2. Load recorded pages in a web browser. A 404 will be served for any pages or
resources not in the recorded archive.
Network simulation examples:
# 128KByte/s uplink bandwidth, 4Mbps/s downlink bandwidth with 100ms RTT time
$ sudo ./replay.py --up 128KByte/s --down 4Mbit/s --delay_ms=100 archive.wpr
# 1% packet loss rate
$ sudo ./replay.py --packet_loss_rate=0.01 archive.wpr
"""
import json
import logging
import optparse
import os
import sys
import traceback
import cachemissarchive
import customhandlers
import dnsproxy
import httparchive
import httpclient
import httpproxy
import platformsettings
import replayspdyserver
import servermanager
import trafficshaper
if sys.version < '2.6':
print 'Need Python 2.6 or greater.'
sys.exit(1)
def configure_logging(log_level_name, log_file_name=None):
"""Configure logging level and format.
Args:
log_level_name: 'debug', 'info', 'warning', 'error', or 'critical'.
log_file_name: a file name
"""
if logging.root.handlers:
logging.critical('A logging method (e.g. "logging.warn(...)")'
' was called before logging was configured.')
log_level = getattr(logging, log_level_name.upper())
log_format = '%(asctime)s %(levelname)s %(message)s'
logging.basicConfig(level=log_level, format=log_format)
logger = logging.getLogger()
if log_file_name:
fh = logging.FileHandler(log_file_name)
fh.setLevel(log_level)
fh.setFormatter(logging.Formatter(log_format))
logger.addHandler(fh)
system_handler = platformsettings.get_system_logging_handler()
if system_handler:
logger.addHandler(system_handler)
def AddDnsForward(server_manager, host):
"""Forward DNS traffic."""
server_manager.Append(platformsettings.set_temporary_primary_nameserver, host)
def AddDnsProxy(server_manager, options, host, real_dns_lookup, http_archive):
dns_filters = []
if options.dns_private_passthrough:
private_filter = dnsproxy.PrivateIpFilter(real_dns_lookup, http_archive)
dns_filters.append(private_filter)
server_manager.AppendRecordCallback(private_filter.InitializeArchiveHosts)
server_manager.AppendReplayCallback(private_filter.InitializeArchiveHosts)
if options.shaping_dns:
delay_filter = dnsproxy.DelayFilter(options.record, **options.shaping_dns)
dns_filters.append(delay_filter)
server_manager.AppendRecordCallback(delay_filter.SetRecordMode)
server_manager.AppendReplayCallback(delay_filter.SetReplayMode)
server_manager.Append(dnsproxy.DnsProxyServer, host,
dns_lookup=dnsproxy.ReplayDnsLookup(host, dns_filters))
def AddWebProxy(server_manager, options, host, real_dns_lookup, http_archive,
cache_misses):
inject_script = httpclient.GetInjectScript(options.inject_scripts)
custom_handlers = customhandlers.CustomHandlers(options, http_archive)
if options.spdy:
assert not options.record, 'spdy cannot be used with --record.'
archive_fetch = httpclient.ReplayHttpArchiveFetch(
http_archive,
inject_script,
options.diff_unknown_requests,
cache_misses=cache_misses,
use_closest_match=options.use_closest_match)
server_manager.Append(
replayspdyserver.ReplaySpdyServer, archive_fetch,
custom_handlers, host=host, port=options.port,
certfile=options.certfile)
else:
custom_handlers.add_server_manager_handler(server_manager)
archive_fetch = httpclient.ControllableHttpArchiveFetch(
http_archive, real_dns_lookup,
inject_script,
options.diff_unknown_requests, options.record,
cache_misses=cache_misses, use_closest_match=options.use_closest_match)
server_manager.AppendRecordCallback(archive_fetch.SetRecordMode)
server_manager.AppendReplayCallback(archive_fetch.SetReplayMode)
server_manager.Append(
httpproxy.HttpProxyServer,
archive_fetch, custom_handlers,
host=host, port=options.port, **options.shaping_http)
if options.ssl:
server_manager.Append(
httpproxy.HttpsProxyServer,
archive_fetch, custom_handlers, options.certfile,
host=host, port=options.ssl_port, **options.shaping_http)
def AddTrafficShaper(server_manager, options, host):
if options.shaping_dummynet:
ssl_port = options.ssl_shaping_port if options.ssl else None
kwargs = dict(
host=host, port=options.shaping_port, ssl_port=ssl_port,
use_loopback=not options.server_mode, **options.shaping_dummynet)
if not options.dns_forwarding:
kwargs['dns_port'] = None
server_manager.Append(trafficshaper.TrafficShaper, **kwargs)
class OptionsWrapper(object):
"""Add checks, updates, and methods to option values.
Example:
options, args = option_parser.parse_args()
options = OptionsWrapper(options, option_parser) # run checks and updates
if options.record and options.HasTrafficShaping():
[...]
"""
_TRAFFICSHAPING_OPTIONS = set(
['down', 'up', 'delay_ms', 'packet_loss_rate', 'init_cwnd', 'net'])
_CONFLICTING_OPTIONS = (
('record', ('down', 'up', 'delay_ms', 'packet_loss_rate', 'net',
'spdy', 'use_server_delay')),
('append', ('down', 'up', 'delay_ms', 'packet_loss_rate', 'net',
'spdy', 'use_server_delay')), # same as --record
('net', ('down', 'up', 'delay_ms')),
('server', ('server_mode',)),
)
# The --net values come from http://www.webpagetest.org/.
# https://sites.google.com/a/webpagetest.org/docs/other-resources/2011-fcc-broadband-data
_NET_CONFIGS = (
# key --down --up --delay_ms
('dsl', ('1536Kbit/s', '384Kbit/s', '50')),
('cable', ( '5Mbit/s', '1Mbit/s', '28')),
('fios', ( '20Mbit/s', '5Mbit/s', '4')),
)
NET_CHOICES = [key for key, values in _NET_CONFIGS]
def __init__(self, options, parser):
self._options = options
self._parser = parser
self._nondefaults = set([
name for name, value in parser.defaults.items()
if getattr(options, name) != value])
self._CheckConflicts()
self._MassageValues()
def _CheckConflicts(self):
"""Give an error if mutually exclusive options are used."""
for option, bad_options in self._CONFLICTING_OPTIONS:
if option in self._nondefaults:
for bad_option in bad_options:
if bad_option in self._nondefaults:
self._parser.error('Option --%s cannot be used with --%s.' %
(bad_option, option))
def _ShapingKeywordArgs(self, shaping_key):
"""Return the shaping keyword args for |shaping_key|.
Args:
shaping_key: one of 'dummynet', 'dns', 'http'.
Returns:
{} # if shaping_key does not apply, or options have default values.
{k: v, ...}
"""
kwargs = {}
def AddItemIfSet(d, kw_key, opt_key=None):
opt_key = opt_key or kw_key
if opt_key in self._nondefaults:
d[kw_key] = getattr(self, opt_key)
if ((self.shaping_type == 'proxy' and shaping_key in ('dns', 'http')) or
self.shaping_type == shaping_key):
AddItemIfSet(kwargs, 'delay_ms')
if shaping_key in ('dummynet', 'http'):
AddItemIfSet(kwargs, 'down_bandwidth', opt_key='down')
AddItemIfSet(kwargs, 'up_bandwidth', opt_key='up')
if shaping_key == 'dummynet':
AddItemIfSet(kwargs, 'packet_loss_rate')
AddItemIfSet(kwargs, 'init_cwnd')
elif self.shaping_type != 'none':
if 'packet_loss_rate' in self._nondefaults:
logging.warn('Shaping type, %s, ignores --packet_loss_rate=%s',
self.shaping_type, self.packet_loss_rate)
if 'init_cwnd' in self._nondefaults:
logging.warn('Shaping type, %s, ignores --init_cwnd=%s',
self.shaping_type, self.init_cwnd)
return kwargs
def _MassageValues(self):
"""Set options that depend on the values of other options."""
if self.append and not self.record:
self._options.record = True
for net_choice, values in self._NET_CONFIGS:
if net_choice == self.net:
self._options.down, self._options.up, self._options.delay_ms = values
self._nondefaults.update(['down', 'up', 'delay_ms'])
if not self.shaping_port:
self._options.shaping_port = self.port
if not self.ssl_shaping_port:
self._options.ssl_shaping_port = self.ssl_port
if not self.ssl:
self._options.certfile = None
self.shaping_dns = self._ShapingKeywordArgs('dns')
self.shaping_http = self._ShapingKeywordArgs('http')
self.shaping_dummynet = self._ShapingKeywordArgs('dummynet')
def __getattr__(self, name):
"""Make the original option values available."""
return getattr(self._options, name)
def __repr__(self):
"""Return a json representation of the original options dictionary."""
return json.dumps(self._options.__dict__)
def IsRootRequired(self):
"""Returns True iff the options require root access."""
return (self.shaping_dummynet or
self.dns_forwarding or
self.port < 1024 or
self.ssl_port < 1024) and self.admin_check
def replay(options, replay_filename):
if options.IsRootRequired():
platformsettings.rerun_as_administrator()
configure_logging(options.log_level, options.log_file)
server_manager = servermanager.ServerManager(options.record)
cache_misses = None
if options.cache_miss_file:
if os.path.exists(options.cache_miss_file):
logging.warning('Cache Miss Archive file %s already exists; '
'replay will load and append entries to archive file',
options.cache_miss_file)
cache_misses = cachemissarchive.CacheMissArchive.Load(
options.cache_miss_file)
else:
cache_misses = cachemissarchive.CacheMissArchive(
options.cache_miss_file)
if options.server:
AddDnsForward(server_manager, options.server)
else:
host = platformsettings.get_server_ip_address(options.server_mode)
real_dns_lookup = dnsproxy.RealDnsLookup(
name_servers=[platformsettings.get_original_primary_nameserver()])
if options.record:
httparchive.HttpArchive.AssertWritable(replay_filename)
if options.append and os.path.exists(replay_filename):
http_archive = httparchive.HttpArchive.Load(replay_filename)
logging.info('Appending to %s (loaded %d existing responses)',
replay_filename, len(http_archive))
else:
http_archive = httparchive.HttpArchive()
else:
http_archive = httparchive.HttpArchive.Load(replay_filename)
logging.info('Loaded %d responses from %s',
len(http_archive), replay_filename)
server_manager.AppendRecordCallback(real_dns_lookup.ClearCache)
server_manager.AppendRecordCallback(http_archive.clear)
if options.dns_forwarding:
if not options.server_mode:
AddDnsForward(server_manager, host)
AddDnsProxy(server_manager, options, host, real_dns_lookup, http_archive)
if options.ssl and options.certfile is None:
options.certfile = os.path.join(os.path.dirname(__file__), 'wpr_cert.pem')
http_proxy_address = platformsettings.get_httpproxy_ip_address(
options.server_mode)
AddWebProxy(server_manager, options, http_proxy_address, real_dns_lookup,
http_archive, cache_misses)
AddTrafficShaper(server_manager, options, host)
exit_status = 0
try:
server_manager.Run()
except KeyboardInterrupt:
logging.info('Shutting down.')
except (dnsproxy.DnsProxyException,
trafficshaper.TrafficShaperException,
platformsettings.NotAdministratorError,
platformsettings.DnsUpdateError) as e:
logging.critical('%s: %s', e.__class__.__name__, e)
exit_status = 1
except:
logging.critical(traceback.format_exc())
exit_status = 2
if options.record:
http_archive.Persist(replay_filename)
logging.info('Saved %d responses to %s', len(http_archive), replay_filename)
if cache_misses:
cache_misses.Persist()
logging.info('Saved %d cache misses and %d requests to %s',
cache_misses.get_total_cache_misses(),
len(cache_misses.request_counts.keys()),
options.cache_miss_file)
return exit_status
def GetOptionParser():
class PlainHelpFormatter(optparse.IndentedHelpFormatter):
def format_description(self, description):
if description:
return description + '\n'
else:
return ''
option_parser = optparse.OptionParser(
usage='%prog [options] replay_file',
formatter=PlainHelpFormatter(),
description=__doc__,
epilog='http://code.google.com/p/web-page-replay/')
option_parser.add_option('--spdy', default=False,
action='store_true',
help='Replay via SPDY. (Can be combined with --no-ssl).')
option_parser.add_option('-r', '--record', default=False,
action='store_true',
help='Download real responses and record them to replay_file')
option_parser.add_option('--append', default=False,
action='store_true',
help='Append responses to replay_file.')
option_parser.add_option('-l', '--log_level', default='debug',
action='store',
type='choice',
choices=('debug', 'info', 'warning', 'error', 'critical'),
help='Minimum verbosity level to log')
option_parser.add_option('-f', '--log_file', default=None,
action='store',
type='string',
help='Log file to use in addition to writting logs to stderr.')
option_parser.add_option('-e', '--cache_miss_file', default=None,
action='store',
dest='cache_miss_file',
type='string',
help='Archive file to record cache misses as pickled objects.'
'Cache misses occur when a request cannot be served in replay mode.')
network_group = optparse.OptionGroup(option_parser,
'Network Simulation Options',
'These options configure the network simulation in replay mode')
network_group.add_option('-u', '--up', default='0',
action='store',
type='string',
help='Upload Bandwidth in [K|M]{bit/s|Byte/s}. Zero means unlimited.')
network_group.add_option('-d', '--down', default='0',
action='store',
type='string',
help='Download Bandwidth in [K|M]{bit/s|Byte/s}. Zero means unlimited.')
network_group.add_option('-m', '--delay_ms', default='0',
action='store',
type='string',
help='Propagation delay (latency) in milliseconds. Zero means no delay.')
network_group.add_option('-p', '--packet_loss_rate', default='0',
action='store',
type='string',
help='Packet loss rate in range [0..1]. Zero means no loss.')
network_group.add_option('-w', '--init_cwnd', default='0',
action='store',
type='string',
help='Set initial cwnd (linux only, requires kernel patch)')
network_group.add_option('--net', default=None,
action='store',
type='choice',
choices=OptionsWrapper.NET_CHOICES,
help='Select a set of network options: %s.' % ', '.join(
OptionsWrapper.NET_CHOICES))
network_group.add_option('--shaping_type', default='dummynet',
action='store',
choices=('dummynet', 'proxy'),
help='When shaping is configured (i.e. --up, --down, etc.) decides '
'whether to use |dummynet| (default), or |proxy| servers.')
option_parser.add_option_group(network_group)
harness_group = optparse.OptionGroup(option_parser,
'Replay Harness Options',
'These advanced options configure various aspects of the replay harness')
harness_group.add_option('-S', '--server', default=None,
action='store',
type='string',
help='IP address of host running "replay.py --server_mode". '
'This only changes the primary DNS nameserver to use the given IP.')
harness_group.add_option('-M', '--server_mode', default=False,
action='store_true',
help='Run replay DNS & http proxies, and trafficshaping on --port '
'without changing the primary DNS nameserver. '
'Other hosts may connect to this using "replay.py --server" '
'or by pointing their DNS to this server.')
harness_group.add_option('-i', '--inject_scripts', default='deterministic.js',
action='store',
dest='inject_scripts',
help='A comma separated list of JavaScript sources to inject in all '
'pages. By default a script is injected that eliminates sources '
'of entropy such as Date() and Math.random() deterministic. '
'CAUTION: Without deterministic.js, many pages will not replay.')
harness_group.add_option('-D', '--no-diff_unknown_requests', default=True,
action='store_false',
dest='diff_unknown_requests',
help='During replay, do not show a diff of unknown requests against '
'their nearest match in the archive.')
harness_group.add_option('-C', '--use_closest_match', default=False,
action='store_true',
dest='use_closest_match',
help='During replay, if a request is not found, serve the closest match'
'in the archive instead of giving a 404.')
harness_group.add_option('-U', '--use_server_delay', default=False,
action='store_true',
dest='use_server_delay',
help='During replay, simulate server delay by delaying response time to'
'requests.')
harness_group.add_option('-I', '--screenshot_dir', default=None,
action='store',
type='string',
help='Save PNG images of the loaded page in the given directory.')
harness_group.add_option('-P', '--no-dns_private_passthrough', default=True,
action='store_false',
dest='dns_private_passthrough',
help='Don\'t forward DNS requests that resolve to private network '
'addresses. CAUTION: With this option important services like '
'Kerberos will resolve to the HTTP proxy address.')
harness_group.add_option('-x', '--no-dns_forwarding', default=True,
action='store_false',
dest='dns_forwarding',
help='Don\'t forward DNS requests to the local replay server. '
'CAUTION: With this option an external mechanism must be used to '
'forward traffic to the replay server.')
harness_group.add_option('-o', '--port', default=80,
action='store',
type='int',
help='Port number to listen on.')
harness_group.add_option('--ssl_port', default=443,
action='store',
type='int',
help='SSL port number to listen on.')
harness_group.add_option('--shaping_port', default=None,
action='store',
type='int',
help='Port on which to apply traffic shaping. Defaults to the '
'listen port (--port)')
harness_group.add_option('--ssl_shaping_port', default=None,
action='store',
type='int',
help='SSL port on which to apply traffic shaping. Defaults to the '
'SSL listen port (--ssl_port)')
harness_group.add_option('-c', '--certfile', default=None,
action='store',
type='string',
help='Certificate file to use with SSL (gets auto-generated if needed).')
harness_group.add_option('--no-ssl', default=True,
action='store_false',
dest='ssl',
help='Do not setup an SSL proxy.')
option_parser.add_option_group(harness_group)
harness_group.add_option('--no-admin-check', default=True,
action='store_false',
dest='admin_check',
help='Do not check if administrator access is needed.')
return option_parser
def main():
option_parser = GetOptionParser()
options, args = option_parser.parse_args()
options = OptionsWrapper(options, option_parser)
if options.server:
replay_filename = None
elif len(args) != 1:
option_parser.error('Must specify a replay_file')
else:
replay_filename = args[0]
return replay(options, replay_filename)
if __name__ == '__main__':
sys.exit(main())
| {
"content_hash": "42336135505b29bcc6d0a4eaff07beed",
"timestamp": "",
"source": "github",
"line_count": 507,
"max_line_length": 91,
"avg_line_length": 40.625246548323474,
"alnum_prop": 0.6615041025392048,
"repo_name": "windyuuy/opera",
"id": "5eb423e849a86fb90d8906a976f2a2a013556ffb",
"size": "21217",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "chromium/src/third_party/webpagereplay/replay.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "25707"
},
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Assembly",
"bytes": "51642"
},
{
"name": "Batchfile",
"bytes": "35942"
},
{
"name": "C",
"bytes": "4303018"
},
{
"name": "C#",
"bytes": "35203"
},
{
"name": "C++",
"bytes": "207333360"
},
{
"name": "CMake",
"bytes": "25089"
},
{
"name": "CSS",
"bytes": "681256"
},
{
"name": "Dart",
"bytes": "24294"
},
{
"name": "Emacs Lisp",
"bytes": "25534"
},
{
"name": "Groff",
"bytes": "5283"
},
{
"name": "HTML",
"bytes": "10400943"
},
{
"name": "IDL",
"bytes": "836"
},
{
"name": "Java",
"bytes": "2821184"
},
{
"name": "JavaScript",
"bytes": "14563996"
},
{
"name": "Lua",
"bytes": "13749"
},
{
"name": "Makefile",
"bytes": "55521"
},
{
"name": "Objective-C",
"bytes": "1211523"
},
{
"name": "Objective-C++",
"bytes": "6221908"
},
{
"name": "PHP",
"bytes": "61320"
},
{
"name": "Perl",
"bytes": "82949"
},
{
"name": "Protocol Buffer",
"bytes": "280464"
},
{
"name": "Python",
"bytes": "12627773"
},
{
"name": "Rebol",
"bytes": "262"
},
{
"name": "Ruby",
"bytes": "937"
},
{
"name": "Scheme",
"bytes": "10604"
},
{
"name": "Shell",
"bytes": "894814"
},
{
"name": "VimL",
"bytes": "4953"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "14650"
}
],
"symlink_target": ""
} |
"""Tests for dirs.py."""
from tests.common import testing
import time
from titan.common.lib.google.apputils import basetest
from titan.files import files
from titan.files import dirs
PATH_WRITE_ACTION = dirs.ModifiedPath.WRITE
PATH_DELETE_ACTION = dirs.ModifiedPath.DELETE
class DirManagingFile(dirs.DirManagerMixin, files.File):
pass
class DirManagerTest(testing.BaseTestCase):
def tearDown(self):
files.UnregisterFileFactory()
super(DirManagerTest, self).tearDown()
def testEndToEnd(self):
files.RegisterFileFactory(lambda *args, **kwargs: DirManagingFile)
# Make the ETA buffer negative so tasks are available instantly for lease.
self.stubs.SmartSet(dirs, 'TASKQUEUE_LEASE_ETA_BUFFER',
-(dirs.TASKQUEUE_LEASE_ETA_BUFFER * 86400))
# Make time.time() return a constant (to guarantee tasks are all created
# in the same window, so they can be processed with a single call).
now = time.time()
self.stubs.Set(dirs.time, 'time', lambda: now)
files.File('/a/b/foo').Write('')
files.File('/a/b/bar').Write('')
files.File('/a/d/foo').Write('')
# Run the consumer (the cron job).
dir_task_consumer = dirs.DirTaskConsumer()
dir_task_consumer.ProcessNextWindow()
# List root dir.
self.assertEqual(dirs.Dirs(['/a']), dirs.Dirs.List('/'))
# List /a.
self.assertEqual(dirs.Dirs(['/a/b', '/a/d']), dirs.Dirs.List('/a/'))
# List /a/b/.
self.assertEqual(dirs.Dirs([]), dirs.Dirs.List('/a/b/'))
# List /fake/dir.
self.assertEqual(dirs.Dirs([]), dirs.Dirs.List('/fake/dir'))
# Test deleting directories.
files.File('/a/d/foo').Delete()
dir_task_consumer = dirs.DirTaskConsumer()
dir_task_consumer.ProcessNextWindow()
# List /a.
self.assertEqual(dirs.Dirs(['/a/b']), dirs.Dirs.List('/a/'))
self.assertEqual(dirs.Dirs(['/a']), dirs.Dirs.List('/'))
# Delete the remaining files and list again.
files.File('/a/b/foo').Delete()
files.File('/a/b/bar').Delete()
dir_task_consumer = dirs.DirTaskConsumer()
dir_task_consumer.ProcessNextWindow()
self.assertEqual(dirs.Dirs([]), dirs.Dirs.List('/'))
def testComputeAffectedDirs(self):
dir_service = dirs.DirService()
# /a/b/foo is written.
modified_path = dirs.ModifiedPath(
'/a/b/foo', modified=0, action=PATH_WRITE_ACTION)
affected_dirs = dir_service.ComputeAffectedDirs([modified_path])
expected_affected_dirs = {
'dirs_with_adds': set(['/a', '/a/b']),
'dirs_with_deletes': set(),
}
self.assertEqual(expected_affected_dirs, affected_dirs)
# /a/b/foo is deleted.
modified_path = dirs.ModifiedPath(
'/a/b/foo', modified=0, action=PATH_DELETE_ACTION)
affected_dirs = dir_service.ComputeAffectedDirs([modified_path])
expected_affected_dirs = {
'dirs_with_adds': set(),
'dirs_with_deletes': set(['/a', '/a/b']),
}
self.assertEqual(expected_affected_dirs, affected_dirs)
# /a/b/foo is added, then deleted -- dirs should exist in only one list.
added_path = dirs.ModifiedPath(
'/a/b/foo', modified=123123.1, action=PATH_WRITE_ACTION)
deleted_path = dirs.ModifiedPath(
'/a/b/foo', modified=123123.2, action=PATH_DELETE_ACTION)
affected_dirs = dir_service.ComputeAffectedDirs([added_path, deleted_path])
expected_affected_dirs = {
'dirs_with_adds': set(),
'dirs_with_deletes': set(['/a', '/a/b']),
}
self.assertEqual(expected_affected_dirs, affected_dirs)
# Test different file paths -- dirs should exist in both lists.
added_path = dirs.ModifiedPath(
'/a/b/foo', modified=123123.1, action=PATH_WRITE_ACTION)
deleted_path = dirs.ModifiedPath(
'/a/b/c/d/bar', modified=123123.2, action=PATH_DELETE_ACTION)
affected_dirs = dir_service.ComputeAffectedDirs([added_path, deleted_path])
expected_affected_dirs = {
'dirs_with_adds': set(['/a', '/a/b']),
'dirs_with_deletes': set(['/a', '/a/b', '/a/b/c', '/a/b/c/d']),
}
self.assertEqual(expected_affected_dirs, affected_dirs)
# Test chronological ordering, even with out-of-order arguments.
path1 = dirs.ModifiedPath(
'/a/b/foo', modified=123123.0, action=PATH_DELETE_ACTION)
path2 = dirs.ModifiedPath(
'/a/b/foo', modified=123123.2, action=PATH_WRITE_ACTION)
path3 = dirs.ModifiedPath(
'/a/b/foo', modified=123123.1, action=PATH_DELETE_ACTION)
affected_dirs = dir_service.ComputeAffectedDirs([path1, path2, path3])
expected_affected_dirs = {
'dirs_with_adds': set(['/a', '/a/b']),
'dirs_with_deletes': set(),
}
self.assertEqual(expected_affected_dirs, affected_dirs)
if __name__ == '__main__':
basetest.main()
| {
"content_hash": "5753171f3f2fa70dc955da4d28449516",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 79,
"avg_line_length": 37.77777777777778,
"alnum_prop": 0.6497899159663866,
"repo_name": "paulftw/titan-files",
"id": "9d8bb377b8325a8892c71c1db864a18ab6a6f9c1",
"size": "5379",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/files/dirs_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "11357"
},
{
"name": "Python",
"bytes": "684850"
}
],
"symlink_target": ""
} |
import os
import tempfile
import shutil
from pip.req import InstallRequirement, RequirementSet, parse_requirements
from pip.log import logger
from pip.locations import (virtualenv_no_global, distutils_scheme,
build_prefix)
from pip.basecommand import Command
from pip.index import PackageFinder
from pip.exceptions import (
InstallationError, CommandError, PreviousBuildDirError,
)
from pip import cmdoptions
class InstallCommand(Command):
"""
Install packages from:
- PyPI (and other indexes) using requirement specifiers.
- VCS project urls.
- Local project directories.
- Local or remote source archives.
pip also supports installing from "requirements files", which provide
an easy way to specify a whole environment to be installed.
"""
name = 'install'
usage = """
%prog [options] <requirement specifier> ...
%prog [options] -r <requirements file> ...
%prog [options] [-e] <vcs project url> ...
%prog [options] [-e] <local project path> ...
%prog [options] <archive url/path> ..."""
summary = 'Install packages.'
def __init__(self, *args, **kw):
super(InstallCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(cmdoptions.editable.make())
cmd_opts.add_option(cmdoptions.requirements.make())
cmd_opts.add_option(cmdoptions.build_dir.make())
cmd_opts.add_option(
'-t', '--target',
dest='target_dir',
metavar='dir',
default=None,
help='Install packages into <dir>.')
cmd_opts.add_option(
'-d', '--download', '--download-dir', '--download-directory',
dest='download_dir',
metavar='dir',
default=None,
help=("Download packages into <dir> instead of installing them, "
"regardless of what's already installed."),
)
cmd_opts.add_option(cmdoptions.download_cache.make())
cmd_opts.add_option(cmdoptions.src.make())
cmd_opts.add_option(
'-U', '--upgrade',
dest='upgrade',
action='store_true',
help='Upgrade all packages to the newest available version. '
'This process is recursive regardless of whether a dependency'
' is already satisfied.'
)
cmd_opts.add_option(
'--force-reinstall',
dest='force_reinstall',
action='store_true',
help='When upgrading, reinstall all packages even if they are '
'already up-to-date.')
cmd_opts.add_option(
'-I', '--ignore-installed',
dest='ignore_installed',
action='store_true',
help='Ignore the installed packages (reinstalling instead).')
cmd_opts.add_option(cmdoptions.no_deps.make())
cmd_opts.add_option(
'--no-install',
dest='no_install',
action='store_true',
help="DEPRECATED. Download and unpack all packages, but don't "
"actually install them."
)
cmd_opts.add_option(
'--no-download',
dest='no_download',
action="store_true",
help="DEPRECATED. Don't download any packages, just install the "
"ones already downloaded (completes an install run with "
"--no-install).")
cmd_opts.add_option(cmdoptions.install_options.make())
cmd_opts.add_option(cmdoptions.global_options.make())
cmd_opts.add_option(
'--user',
dest='use_user_site',
action='store_true',
help='Install using the user scheme.')
cmd_opts.add_option(
'--egg',
dest='as_egg',
action='store_true',
help="Install packages as eggs, not 'flat', like pip normally "
"does. This option is not about installing *from* eggs. "
"(WARNING: Because this option overrides pip's normal install"
" logic, requirements files may not behave as expected.)")
cmd_opts.add_option(
'--root',
dest='root_path',
metavar='dir',
default=None,
help="Install everything relative to this alternate root "
"directory.")
cmd_opts.add_option(
"--compile",
action="store_true",
dest="compile",
default=True,
help="Compile py files to pyc",
)
cmd_opts.add_option(
"--no-compile",
action="store_false",
dest="compile",
help="Do not compile py files to pyc",
)
cmd_opts.add_option(cmdoptions.use_wheel.make())
cmd_opts.add_option(cmdoptions.no_use_wheel.make())
cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help="Include pre-release and development versions. By default, "
"pip only finds stable versions.")
cmd_opts.add_option(cmdoptions.no_clean.make())
index_opts = cmdoptions.make_option_group(
cmdoptions.index_group,
self.parser,
)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def _build_package_finder(self, options, index_urls, session):
"""
Create a package finder appropriate to this install command.
This method is meant to be overridden by subclasses, not
called directly.
"""
return PackageFinder(
find_links=options.find_links,
index_urls=index_urls,
use_wheel=options.use_wheel,
allow_external=options.allow_external,
allow_unverified=options.allow_unverified,
allow_all_external=options.allow_all_external,
allow_all_prereleases=options.pre,
process_dependency_links=options.process_dependency_links,
session=session,
)
def run(self, options, args):
if (
options.no_install or
options.no_download or
(options.build_dir != build_prefix) or
options.no_clean
):
logger.deprecated(
'1.7',
'DEPRECATION: --no-install, --no-download, --build, '
'and --no-clean are deprecated. See '
'https://github.com/pypa/pip/issues/906.',
)
if options.download_dir:
options.no_install = True
options.ignore_installed = True
options.build_dir = os.path.abspath(options.build_dir)
options.src_dir = os.path.abspath(options.src_dir)
install_options = options.install_options or []
if options.use_user_site:
if virtualenv_no_global():
raise InstallationError(
"Can not perform a '--user' install. User site-packages "
"are not visible in this virtualenv."
)
install_options.append('--user')
temp_target_dir = None
if options.target_dir:
options.ignore_installed = True
temp_target_dir = tempfile.mkdtemp()
options.target_dir = os.path.abspath(options.target_dir)
if (os.path.exists(options.target_dir)
and not os.path.isdir(options.target_dir)):
raise CommandError(
"Target path exists but is not a directory, will not "
"continue."
)
install_options.append('--home=' + temp_target_dir)
global_options = options.global_options or []
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.notify('Ignoring indexes: %s' % ','.join(index_urls))
index_urls = []
if options.use_mirrors:
logger.deprecated(
"1.7",
"--use-mirrors has been deprecated and will be removed"
" in the future. Explicit uses of --index-url and/or "
"--extra-index-url is suggested."
)
if options.mirrors:
logger.deprecated(
"1.7",
"--mirrors has been deprecated and will be removed in "
" the future. Explicit uses of --index-url and/or "
"--extra-index-url is suggested."
)
index_urls += options.mirrors
if options.download_cache:
logger.deprecated(
"1.8",
"--download-cache has been deprecated and will be removed in "
" the future. Pip now automatically uses and configures its "
"cache."
)
with self._build_session(options) as session:
finder = self._build_package_finder(options, index_urls, session)
requirement_set = RequirementSet(
build_dir=options.build_dir,
src_dir=options.src_dir,
download_dir=options.download_dir,
upgrade=options.upgrade,
as_egg=options.as_egg,
ignore_installed=options.ignore_installed,
ignore_dependencies=options.ignore_dependencies,
force_reinstall=options.force_reinstall,
use_user_site=options.use_user_site,
target_dir=temp_target_dir,
session=session,
pycompile=options.compile,
)
for name in args:
requirement_set.add_requirement(
InstallRequirement.from_line(name, None))
for name in options.editables:
requirement_set.add_requirement(
InstallRequirement.from_editable(
name,
default_vcs=options.default_vcs
)
)
for filename in options.requirements:
for req in parse_requirements(
filename,
finder=finder, options=options, session=session):
requirement_set.add_requirement(req)
if not requirement_set.has_requirements:
opts = {'name': self.name}
if options.find_links:
msg = ('You must give at least one requirement to %(name)s'
' (maybe you meant "pip %(name)s %(links)s"?)' %
dict(opts, links=' '.join(options.find_links)))
else:
msg = ('You must give at least one requirement '
'to %(name)s (see "pip help %(name)s")' % opts)
logger.warn(msg)
return
try:
if not options.no_download:
requirement_set.prepare_files(finder)
else:
requirement_set.locate_files()
if not options.no_install:
requirement_set.install(
install_options,
global_options,
root=options.root_path,
)
installed = ' '.join([
req.name for req in
requirement_set.successfully_installed
])
if installed:
logger.notify('Successfully installed %s' % installed)
else:
downloaded = ' '.join([
req.name
for req in requirement_set.successfully_downloaded
])
if downloaded:
logger.notify(
'Successfully downloaded %s' % downloaded
)
except PreviousBuildDirError:
options.no_clean = True
raise
finally:
# Clean up
if ((not options.no_clean)
and ((not options.no_install)
or options.download_dir)):
requirement_set.cleanup_files()
if options.target_dir:
if not os.path.exists(options.target_dir):
os.makedirs(options.target_dir)
lib_dir = distutils_scheme('', home=temp_target_dir)['purelib']
for item in os.listdir(lib_dir):
shutil.move(
os.path.join(lib_dir, item),
os.path.join(options.target_dir, item),
)
shutil.rmtree(temp_target_dir)
return requirement_set
| {
"content_hash": "14c31bdaa19bf5096306a3d6df992775",
"timestamp": "",
"source": "github",
"line_count": 352,
"max_line_length": 79,
"avg_line_length": 36.90056818181818,
"alnum_prop": 0.5219031488182309,
"repo_name": "laborautonomo/pip",
"id": "e267d0a8b8e5a25614b188646910f008fce3ada4",
"size": "12989",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pip/commands/install.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import rospy,sys
from std_msgs.msg import String
sys.path.append("/home/docker/catkin_ws/devel/lib/python2.7/dist-packages")
from mavros_msgs.msg import State,PositionTarget
from mavros_msgs.srv import SetMode,CommandBool
from geometry_msgs.msg import PoseStamped,Point
state=None
def callback(data):
#rospy.loginfo(rospy.get_caller_id() + 'I heard %s', data)
global state
state=data
#print state.connected,state.mode
def listener():
rospy.init_node('listener', anonymous=True)
rate=rospy.Rate(20)
rospy.Subscriber('mavros/state', State, callback)
local_pos_pub=rospy.Publisher('mavros/setpoint_position/local',PoseStamped,queue_size=2)
local_posi_raw_pub=rospy.Publisher('mavros/setpoint_raw/local',PositionTarget,queue_size=2)
#local_vel_pub=rospy.Publisher('mavros/setpoint_position/local',PoseStamped,queue_size=2)
set_mode_cmd=rospy.ServiceProxy('mavros/set_mode',SetMode)
arm_cmd=rospy.ServiceProxy('mavros/cmd/arming',CommandBool)
newpos=PoseStamped()
newpos.pose.position.z=10.0#=Point(0,0,2)
#set_mode_cmd('POSITION CONTROL','')
#for _ in range(10):
# rate.sleep()
newvel=PositionTarget()
newvel.velocity.x=-0.5
newvel.yaw=-90.0/180.0*3.142
newvel.type_mask=newvel.FRAME_LOCAL_NED | newvel.IGNORE_AFX | newvel.IGNORE_AFY |newvel.IGNORE_AFZ
newvel.type_mask=newvel.type_mask | newvel.IGNORE_PX | newvel.IGNORE_PY | newvel.IGNORE_PZ
for _ in range(100):
rate.sleep()
local_pos_pub.publish(newpos);
mymode='OFFBOARD'
last_req=rospy.get_time()
start_time=last_req;
#print '---',rospy.get_time(),start_time
while rospy.get_time()-start_time<70:
if rospy.get_time()-last_req>5:
if state.mode != mymode:
set_mode_cmd(0,mymode)
rospy.loginfo('setting mode...')
elif not state.armed:
arm_cmd(True)
rospy.loginfo('arming...')
last_req=rospy.get_time()
dt=rospy.get_time()-start_time
if dt<20:
local_pos_pub.publish(newpos);
elif dt<40 :
local_posi_raw_pub.publish(newvel);
elif dt<60 :
newvel.velocity.x=0.5
local_posi_raw_pub.publish(newvel);
else:
newpos.pose.position.z=0
local_pos_pub.publish(newpos);
rate.sleep()
if __name__ == '__main__':
listener()
| {
"content_hash": "b24f544e2305221dda58f62297d6e42d",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 102,
"avg_line_length": 36.492537313432834,
"alnum_prop": 0.6368098159509202,
"repo_name": "orig74/DroneSimLab",
"id": "4678f3c50f212e6cca906a60b2f13be078b1b438",
"size": "2521",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demos/px4_gazebo/demo_catkin_ws/src/control_test/scripts/offb2.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CMake",
"bytes": "6141"
},
{
"name": "Dockerfile",
"bytes": "15473"
},
{
"name": "Python",
"bytes": "15489"
},
{
"name": "Shell",
"bytes": "11172"
},
{
"name": "Vim script",
"bytes": "26"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from textwrap import dedent
from mock import MagicMock
from pants.backend.codegen.thrift.java.java_thrift_library import JavaThriftLibrary
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.targets.scala_library import ScalaLibrary
from pants.base.exceptions import TargetDefinitionException
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.goal.context import Context
from pants_test.jvm.nailgun_task_test_base import NailgunTaskTestBase
from twitter.common.collections import OrderedSet
from pants.contrib.scrooge.tasks.scrooge_gen import ScroogeGen
GEN_ADAPT = '--gen-adapt'
class ScroogeGenTest(NailgunTaskTestBase):
@classmethod
def task_type(cls):
return ScroogeGen
@classmethod
def alias_groups(cls):
return super(ScroogeGenTest, cls).alias_groups().merge(
BuildFileAliases(targets={'java_thrift_library': JavaThriftLibrary,
'java_library': JavaLibrary,
'scala_library': ScalaLibrary}))
def test_validate_compiler_configs(self):
# Set synthetic defaults for the global scope.
self.set_options_for_scope('thrift-defaults',
compiler='unchecked',
language='uniform',
service_deps='service_deps',
structs_deps='structs_deps')
self.add_to_build_file('test_validate', dedent('''
java_thrift_library(name='one',
sources=[],
dependencies=[],
)
'''))
self.add_to_build_file('test_validate', dedent('''
java_thrift_library(name='two',
sources=[],
dependencies=[':one'],
)
'''))
target = self.target('test_validate:one')
context = self.context(target_roots=[target])
task = self.prepare_execute(context)
task._validate_compiler_configs(self.target('test_validate:one'))
task._validate_compiler_configs(self.target('test_validate:two'))
def test_scala(self):
sources = [os.path.join(self.test_workdir, 'org/pantsbuild/example/Example.scala')]
self._test_help('scala', ScalaLibrary, [GEN_ADAPT], sources)
def test_compiler_args(self):
sources = [os.path.join(self.test_workdir, 'org/pantsbuild/example/Example.scala')]
self._test_help('scala', ScalaLibrary, [GEN_ADAPT], sources)
def test_android(self):
sources = [os.path.join(self.test_workdir, 'org/pantsbuild/android_example/Example.java')]
self._test_help('android', JavaLibrary, [GEN_ADAPT], sources)
def test_invalid_lang(self):
with self.assertRaises(TargetDefinitionException):
self._test_help('not-a-lang', JavaLibrary, [GEN_ADAPT], [])
def test_empty_compiler_args(self):
sources = [os.path.join(self.test_workdir, 'org/pantsbuild/example/Example.scala')]
self._test_help('scala', ScalaLibrary, [], sources)
def compiler_args_to_string(self, compiler_args):
quoted = ["'{}'".format(x) for x in compiler_args]
comma_separated = ', '.join(quoted)
return '[{}]'.format(comma_separated)
def _test_create_build_str(self, language, compiler_args):
compiler_args_str = self.compiler_args_to_string(compiler_args)
return dedent('''
java_thrift_library(name='a',
sources=['a.thrift'],
dependencies=[],
compiler='scrooge',
language='{language}',
compiler_args={compiler_args},
strict_deps=True,
)
'''.format(language=language, compiler_args=compiler_args_str))
def _test_help(self, language, library_type, compiler_args, sources):
contents = dedent('''#@namespace android org.pantsbuild.android_example
namespace java org.pantsbuild.example
struct Example {
1: optional i64 number
}
''')
self.create_file(relpath='test_smoke/a.thrift', contents=contents)
build_string = self._test_create_build_str(language, compiler_args)
self.add_to_build_file('test_smoke', build_string)
target = self.target('test_smoke:a')
context = self.context(target_roots=[target])
task = self.prepare_execute(context)
task.gen = MagicMock()
task.gen.return_value = {'test_smoke/a.thrift': sources}
saved_add_new_target = Context.add_new_target
try:
mock = MagicMock()
Context.add_new_target = mock
task.execute()
self.assertEqual(1, mock.call_count)
_, call_kwargs = mock.call_args
self.assertEqual(call_kwargs['target_type'], library_type)
self.assertEqual(call_kwargs['dependencies'], OrderedSet())
self.assertEqual(call_kwargs['provides'], None)
self.assertEqual(call_kwargs['derived_from'], target)
self.assertEqual(call_kwargs['strict_deps'], True)
sources = call_kwargs['sources']
self.assertEqual(sources.files, ())
finally:
Context.add_new_target = saved_add_new_target
def test_basic_deps(self):
contents = dedent('''#@namespace android org.pantsbuild.android_example
namespace java org.pantsbuild.example
struct Example {
1: optional i64 number
}
''')
self._test_dependencies_help(contents, False, False)
def test_service_deps(self):
contents = dedent('''#@namespace android org.pantsbuild.android_example
namespace java org.pantsbuild.example
service MultiplicationService
{
int multiply(1:int n1, 2:int n2),
}''')
self._test_dependencies_help(contents, True, False)
def test_exception_deps(self):
contents = dedent('''#@namespace android org.pantsbuild.android_example
namespace java org.pantsbuild.example
exception InvalidOperation {
1: i32 what,
2: string why
}''')
self._test_dependencies_help(contents, False, True)
def _test_dependencies_help(self, contents, declares_service, declares_exception):
source = 'test_smoke/a.thrift'
self.create_file(relpath=source, contents=contents)
self.assertEqual(ScroogeGen._declares_service(source), declares_service)
self.assertEqual(ScroogeGen._declares_exception(source), declares_exception)
| {
"content_hash": "5414d7a8a2dac60a8df5a4a49285c191",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 94,
"avg_line_length": 36.51176470588236,
"alnum_prop": 0.6742387626872886,
"repo_name": "twitter/pants",
"id": "fe4b8f20b14b54dbfdaaded128d743da709e648b",
"size": "6354",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/scrooge/tests/python/pants_test/contrib/scrooge/tasks/test_scrooge_gen.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "655"
},
{
"name": "C++",
"bytes": "2010"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "Dockerfile",
"bytes": "5639"
},
{
"name": "GAP",
"bytes": "1283"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "2765"
},
{
"name": "HTML",
"bytes": "85294"
},
{
"name": "Java",
"bytes": "498956"
},
{
"name": "JavaScript",
"bytes": "22906"
},
{
"name": "Python",
"bytes": "6700799"
},
{
"name": "Rust",
"bytes": "765598"
},
{
"name": "Scala",
"bytes": "89346"
},
{
"name": "Shell",
"bytes": "94395"
},
{
"name": "Thrift",
"bytes": "2953"
}
],
"symlink_target": ""
} |
"""A lexical analyzer class for simple shell-like syntaxes."""
# Module and documentation by Eric S. Raymond, 21 Dec 1998
# Input stacking and error message cleanup added by ESR, March 2000
# push_source() and pop_source() made explicit by ESR, January 2001.
# Posix compliance, split(), string arguments, and
# iterator interface by Gustavo Niemeyer, April 2003.
# changes to tokenize more like Posix shells by Vinay Sajip, July 2016.
import os
import re
import sys
from collections import deque
from io import StringIO
__all__ = ["shlex", "split", "quote", "join"]
class shlex:
"A lexical analyzer class for simple shell-like syntaxes."
def __init__(self, instream=None, infile=None, posix=False,
punctuation_chars=False):
if isinstance(instream, str):
instream = StringIO(instream)
if instream is not None:
self.instream = instream
self.infile = infile
else:
self.instream = sys.stdin
self.infile = None
self.posix = posix
if posix:
self.eof = None
else:
self.eof = ''
self.commenters = '#'
self.wordchars = ('abcdfeghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_')
if self.posix:
self.wordchars += ('ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ'
'ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ')
self.whitespace = ' \t\r\n'
self.whitespace_split = False
self.quotes = '\'"'
self.escape = '\\'
self.escapedquotes = '"'
self.state = ' '
self.pushback = deque()
self.lineno = 1
self.debug = 0
self.token = ''
self.filestack = deque()
self.source = None
if not punctuation_chars:
punctuation_chars = ''
elif punctuation_chars is True:
punctuation_chars = '();<>|&'
self._punctuation_chars = punctuation_chars
if punctuation_chars:
# _pushback_chars is a push back queue used by lookahead logic
self._pushback_chars = deque()
# these chars added because allowed in file names, args, wildcards
self.wordchars += '~-./*?='
#remove any punctuation chars from wordchars
t = self.wordchars.maketrans(dict.fromkeys(punctuation_chars))
self.wordchars = self.wordchars.translate(t)
@property
def punctuation_chars(self):
return self._punctuation_chars
def push_token(self, tok):
"Push a token onto the stack popped by the get_token method"
if self.debug >= 1:
print("shlex: pushing token " + repr(tok))
self.pushback.appendleft(tok)
def push_source(self, newstream, newfile=None):
"Push an input source onto the lexer's input source stack."
if isinstance(newstream, str):
newstream = StringIO(newstream)
self.filestack.appendleft((self.infile, self.instream, self.lineno))
self.infile = newfile
self.instream = newstream
self.lineno = 1
if self.debug:
if newfile is not None:
print('shlex: pushing to file %s' % (self.infile,))
else:
print('shlex: pushing to stream %s' % (self.instream,))
def pop_source(self):
"Pop the input source stack."
self.instream.close()
(self.infile, self.instream, self.lineno) = self.filestack.popleft()
if self.debug:
print('shlex: popping to %s, line %d' \
% (self.instream, self.lineno))
self.state = ' '
def get_token(self):
"Get a token from the input stream (or from stack if it's nonempty)"
if self.pushback:
tok = self.pushback.popleft()
if self.debug >= 1:
print("shlex: popping token " + repr(tok))
return tok
# No pushback. Get a token.
raw = self.read_token()
# Handle inclusions
if self.source is not None:
while raw == self.source:
spec = self.sourcehook(self.read_token())
if spec:
(newfile, newstream) = spec
self.push_source(newstream, newfile)
raw = self.get_token()
# Maybe we got EOF instead?
while raw == self.eof:
if not self.filestack:
return self.eof
else:
self.pop_source()
raw = self.get_token()
# Neither inclusion nor EOF
if self.debug >= 1:
if raw != self.eof:
print("shlex: token=" + repr(raw))
else:
print("shlex: token=EOF")
return raw
def read_token(self):
quoted = False
escapedstate = ' '
while True:
if self.punctuation_chars and self._pushback_chars:
nextchar = self._pushback_chars.pop()
else:
nextchar = self.instream.read(1)
if nextchar == '\n':
self.lineno += 1
if self.debug >= 3:
print("shlex: in state %r I see character: %r" % (self.state,
nextchar))
if self.state is None:
self.token = '' # past end of file
break
elif self.state == ' ':
if not nextchar:
self.state = None # end of file
break
elif nextchar in self.whitespace:
if self.debug >= 2:
print("shlex: I see whitespace in whitespace state")
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno += 1
elif self.posix and nextchar in self.escape:
escapedstate = 'a'
self.state = nextchar
elif nextchar in self.wordchars:
self.token = nextchar
self.state = 'a'
elif nextchar in self.punctuation_chars:
self.token = nextchar
self.state = 'c'
elif nextchar in self.quotes:
if not self.posix:
self.token = nextchar
self.state = nextchar
elif self.whitespace_split:
self.token = nextchar
self.state = 'a'
else:
self.token = nextchar
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif self.state in self.quotes:
quoted = True
if not nextchar: # end of file
if self.debug >= 2:
print("shlex: I see EOF in quotes state")
# XXX what error should be raised here?
raise ValueError("No closing quotation")
if nextchar == self.state:
if not self.posix:
self.token += nextchar
self.state = ' '
break
else:
self.state = 'a'
elif (self.posix and nextchar in self.escape and self.state
in self.escapedquotes):
escapedstate = self.state
self.state = nextchar
else:
self.token += nextchar
elif self.state in self.escape:
if not nextchar: # end of file
if self.debug >= 2:
print("shlex: I see EOF in escape state")
# XXX what error should be raised here?
raise ValueError("No escaped character")
# In posix shells, only the quote itself or the escape
# character may be escaped within quotes.
if (escapedstate in self.quotes and
nextchar != self.state and nextchar != escapedstate):
self.token += self.state
self.token += nextchar
self.state = escapedstate
elif self.state in ('a', 'c'):
if not nextchar:
self.state = None # end of file
break
elif nextchar in self.whitespace:
if self.debug >= 2:
print("shlex: I see whitespace in word state")
self.state = ' '
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno += 1
if self.posix:
self.state = ' '
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif self.state == 'c':
if nextchar in self.punctuation_chars:
self.token += nextchar
else:
if nextchar not in self.whitespace:
self._pushback_chars.append(nextchar)
self.state = ' '
break
elif self.posix and nextchar in self.quotes:
self.state = nextchar
elif self.posix and nextchar in self.escape:
escapedstate = 'a'
self.state = nextchar
elif (nextchar in self.wordchars or nextchar in self.quotes
or (self.whitespace_split and
nextchar not in self.punctuation_chars)):
self.token += nextchar
else:
if self.punctuation_chars:
self._pushback_chars.append(nextchar)
else:
self.pushback.appendleft(nextchar)
if self.debug >= 2:
print("shlex: I see punctuation in word state")
self.state = ' '
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
result = self.token
self.token = ''
if self.posix and not quoted and result == '':
result = None
if self.debug > 1:
if result:
print("shlex: raw token=" + repr(result))
else:
print("shlex: raw token=EOF")
return result
def sourcehook(self, newfile):
"Hook called on a filename to be sourced."
if newfile[0] == '"':
newfile = newfile[1:-1]
# This implements cpp-like semantics for relative-path inclusion.
if isinstance(self.infile, str) and not os.path.isabs(newfile):
newfile = os.path.join(os.path.dirname(self.infile), newfile)
return (newfile, open(newfile, "r"))
def error_leader(self, infile=None, lineno=None):
"Emit a C-compiler-like, Emacs-friendly error-message leader."
if infile is None:
infile = self.infile
if lineno is None:
lineno = self.lineno
return "\"%s\", line %d: " % (infile, lineno)
def __iter__(self):
return self
def __next__(self):
token = self.get_token()
if token == self.eof:
raise StopIteration
return token
def split(s, comments=False, posix=True):
"""Split the string *s* using shell-like syntax."""
lex = shlex(s, posix=posix)
lex.whitespace_split = True
if not comments:
lex.commenters = ''
return list(lex)
def join(split_command):
"""Return a shell-escaped string from *split_command*."""
return ' '.join(quote(arg) for arg in split_command)
_find_unsafe = re.compile(r'[^\w@%+=:,./-]', re.ASCII).search
def quote(s):
"""Return a shell-escaped version of the string *s*."""
if not s:
return "''"
if _find_unsafe(s) is None:
return s
# use single quotes, and put single quotes into double quotes
# the string $'b is then quoted as '$'"'"'b'
return "'" + s.replace("'", "'\"'\"'") + "'"
def _print_tokens(lexer):
while 1:
tt = lexer.get_token()
if not tt:
break
print("Token: " + repr(tt))
if __name__ == '__main__':
if len(sys.argv) == 1:
_print_tokens(shlex())
else:
fn = sys.argv[1]
with open(fn) as f:
_print_tokens(shlex(f, fn))
| {
"content_hash": "381007ca65bf1e30afb3960ed84a4a95",
"timestamp": "",
"source": "github",
"line_count": 346,
"max_line_length": 78,
"avg_line_length": 38.33236994219653,
"alnum_prop": 0.4951368468672246,
"repo_name": "xyuanmu/XX-Net",
"id": "c817274583135a3a64711cd38801276fe395a2c7",
"size": "13325",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "python3.8.2/Lib/shlex.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4145"
},
{
"name": "C",
"bytes": "53301"
},
{
"name": "CSS",
"bytes": "94951"
},
{
"name": "HTML",
"bytes": "252022"
},
{
"name": "JavaScript",
"bytes": "22405"
},
{
"name": "Python",
"bytes": "15474534"
},
{
"name": "Shell",
"bytes": "10208"
},
{
"name": "Visual Basic",
"bytes": "1795"
}
],
"symlink_target": ""
} |
__all__ = [
'PJS'
]
def escape_value(value):
if type(value) in [str, unicode]:
value = '"{0}"'.format(value)
return value
class Attr(dict):
def __str__(self):
if self['namespace']:
return u'{namespace}.{attr_name}'.format(**self)
else:
return self['attr_name']
class AttrAccessor(object):
def __init__(self, attr_name):
self.attr_name = attr_name
def __get__(self, obj, owner):
attr = Attr()
attr['namespace'] = obj.namespace
attr['attr_name'] = self.attr_name
return attr
def __set__(self, obj, value):
obj.root.context.nodes.append(u'{0}.{1} = {2}'.format(obj.namespace, self.attr_name, escape_value(value)))
def __del__(self, obj):
pass
class Node(object):
html = AttrAccessor('innerHTML')
text = AttrAccessor('innerText')
def __init__(self, selector=None, parent=None, level=0, multi=False, var=None, namespace=''):
self.selector = selector
self.parent = parent
self.level = level+1
self.multi = multi
self.nodes = []
self.var = var
self.namespace = namespace and namespace+namespace[-1] or var[0]
self.delim = u'\n' + '\t'*self.level
if not parent:
self.root = self
self.context = self
else:
self.root = parent.root
def __enter__(self):
self.root.context = self
return self
def __exit__(self, e,v,t):
self.root.context = self.parent
if self.parent:
self.parent.add_node(self)
def add_node(self, node):
self.nodes.append(node.render())
def get_selector(self):
if self.selector:
return u'{0}.querySelector{1}("{2}")'.format(self.parent.namespace, self.multi and 'All' or '', self.selector)
if self.parent:
return u'{0}.{1}'.format(self.parent.namespace, self.var)
else:
return self.var
def e(self, selector):
return Node(selector=selector, parent=self, level=self.level, multi=False, namespace=self.namespace)
def el(self, selector):
return Node(selector=selector, parent=self, level=self.level, multi=True, namespace=self.namespace)
def render(self):
return u'(function({0}){{{3}{2};{4}}})({1})'.format(
self.namespace,
self.get_selector(),
';{0}'.format(self.delim).join(self.nodes),
self.delim,
self.delim[:-1]
)
class Window(Node):
@property
def document(self):
return Node(var='document', parent=self, level=1)
class PJSDescriptor(object):
def __init__(self, klass, kwargs):
self.klass = klass
self.kwargs = kwargs
def __get__(self, obj, owner):
node = self.klass(**self.kwargs)
obj._node = node
return node
def __set__(self, obj, value):
pass
class PJS(object):
window = PJSDescriptor(Window, {'var':'window'})
document = PJSDescriptor(Node, {'var':'document'})
_node = None
def __init__(self):
self._node = None
def __enter__(self):
return self
def __exit__(self, e, v, t):
pass
def __getattr__(self, attr):
if vars(self).has_key(attr):
return vars(self)[attr]
else:
return Attr(attr_name=attr, namespace=None)
def __setattr__(self, attr, value):
if attr in ['_node', 'window', 'document']:
vars(self)[attr] = value
else:
self._node.context.nodes.append(u'{0} = {1}'.format(attr, escape_value(value)))
def var(self, var):
defines = []
if type(var) == list:
defines.extend(var)
elif type(var) == dict:
defines.extend(['{0} = {1}'.format(name, escape_value(value)) for name, value in var.items()])
else:
defines.append(var)
self._node.context.nodes.append(u'var {0}'.format(', '.join(defines)))
def render(self):
return self._node.render()
| {
"content_hash": "874c1a83b40edd33321a871e3c32596c",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 122,
"avg_line_length": 26.125,
"alnum_prop": 0.539712918660287,
"repo_name": "veeloox/ramen",
"id": "320fc88d9f3654285e064662fddcd8ccdaca430b",
"size": "4180",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ramen/pjs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "94580"
}
],
"symlink_target": ""
} |
import pika, os, uuid, logging,sys, ConfigParser
import json
import time
import threading
logging.basicConfig()
#####################################################################################################################
# CR: I'll write all my comments starting with CR: if you don't mind, so you'll understand why I made those changes #
# TODO: I'll write stuff starting with TODO if it need to be done eventually #
#####################################################################################################################
# For cli
default_configuration_file = 'rabbitcoat.conf'
def validate(value, current, default=None):
if ((value is None) or (value == "")):
if ((not (default is None)) and ((current is None) or (current == ""))):
return default
else:
return current
else:
return value
class RabbitFrame(threading.Thread):
def __init__(self, logger, config, queue, server=None, user=None, password=None, vhost=None, timeout=None):
self.server = self.user = self.password = self.vhost = self.timeout = None
self.logger = logger
self.queue = queue
# Is the frame ready to start
self._ready = False
self._ioloop = None
# Later this could be changed, but code declaring the exchange will need to be added.
self.exchange = queue
self.closing = False
self.config = config
self.__loadConfiguration()
self.__initInfrastructure(server, user, password, vhost, timeout)
def __loadConfiguration(self):
parser = ConfigParser.SafeConfigParser(allow_no_value=True)
parser.read(self.config)
self.server = validate(parser.get('RABBITMQ', 'server') ,self.server)
self.user = validate(parser.get('RABBITMQ', 'user') ,self.user)
self.vhost = validate(parser.get('RABBITMQ', 'vhost') ,self.vhost ,self.user)
self.password = validate(parser.get('RABBITMQ', 'password') ,self.password)
self.timeout = validate(parser.getint('RABBITMQ', 'timeout') ,self.timeout ,5)
def __setInfrastructure(self, server=None, user=None, password=None, vhost=None, timeout=None):
self.server = validate(server,self.server)
self.user = validate(user,self.user)
self.password = validate(password,self.password)
self.vhost = validate(vhost,self.vhost)
self.timeout = validate(timeout,self.timeout)
def __initInfrastructure(self, server=None, user=None, password=None, vhost=None, timeout=None):
self.__setInfrastructure(server, user, password, vhost, timeout) #override config file if the user wants
ampq_url = "amqp://%s:%s@%s/%s" % (self.user,self.password,self.server,self.vhost)
self.url = os.environ.get('CLOUDAMQP_URL',ampq_url)
self._connect()
self._ioloop = threading.Thread(target=self.connection.ioloop.start, name='%s_ioloop' %self.queue, args=())
self._ioloop.start()
def _connect(self):
params = pika.URLParameters(self.url)
params.socket_timeout = self.timeout
self.connection = pika.SelectConnection(params,
on_open_callback = self._onOpen,
on_close_callback = self._onClose,
stop_ioloop_on_close=False)
def _onOpen(self, connection):
self.connection.channel(on_open_callback=self._onChannelOpen) # start a channel
def _onClose(self, connection, reply_code, reply_text):
self.channel = None
if self.closing:
self.connection.ioloop.stop()
else:
self.logger.error('Channel closed, reopening')
if not self.closing:
# Create a new connection
self._connect()
# Since this gets called by the thread, we don't need to start another thread
self.connection.ioloop.start()
def _onChannelOpen(self, channel):
self.channel = channel
self.channel.add_on_close_callback(self._onChannelClose)
self.channel.exchange_declare(self._onExchangeOk,
self.exchange) # self.EXCHANGE_TYPE)
def _onChannelClose(self, channel, reply_code, reply_text):
# When a channel is closed it's because of bad usage, close the channel
self.closing = True
self.connection.close()
def _onExchangeOk(self, unused_frame):
self.channel.queue_declare(self._onQueueOk, self.queue, durable=True)
def _onQueueOk(self, method_frame):
self.channel.queue_bind(self._onBindOk, self.queue, self.exchange)
def _onBindOk(self, unused_frame):
self._ready = True
class RabbitSender(RabbitFrame):
failure_sleep = 10
max_retries = 3
def __init__(self, logger, config, queue, reply_to=None):
RabbitFrame.__init__(self, logger, config, queue)
self.lock = threading.Lock()
self.reply_to = reply_to
while not self._ready:
time.sleep(0.5)
def Send(self, data=None, corr_id=None, reply_to_queue=None):
if corr_id == None:
corr_id = str(uuid.uuid4())
message = json.dumps(data)
reply_to_queue = validate(reply_to_queue, self.reply_to)
# Make this thread safe just in case
retries = 0
while True:
# send a message
try:
with self.lock:
self.channel.basic_publish(exchange=self.exchange,
routing_key=self.queue,
body=message,
properties=pika.BasicProperties(
delivery_mode = 2, # make message persistent
correlation_id = corr_id,
reply_to = reply_to_queue,
))
self.logger.debug("Sender: produced message to queue %s with:\n\tcorrelation ID: %s\n\tbody: %s" %(self.queue, corr_id, message))
return corr_id
except Exception:
retries += 1
# Never happened more than once
if retries >= self.max_retries:
self.logger.exception("Error publishing to queue %s" %(self.queue))
return None
time.sleep(self.failure_sleep)
# This is what the callback function looks like
def printCallback(data, properties):
print 'Receiver: %s' %data
class RabbitReceiver(RabbitFrame, threading.Thread):
def __init__(self, logger, config, queue, callback, read_repeatedly=False):
RabbitFrame.__init__(self, logger, config, queue)
threading.Thread.__init__(self, name='RabbitReceiver %s' %queue)
self.read_repeatedly = read_repeatedly
self.callback = callback
while not self._ready:
time.sleep(0.5)
def __wrapper(self, ch, method, properties, body):
# Take care of parsing and acknowledging
try:
if (body is not None):
data = json.loads(body)
self.callback(data, properties)
ch.basic_ack(delivery_tag = method.delivery_tag)
except Exception:
self.logger.exception('Error in rabbitcoat receiver on queue %s, %s' %(self.queue, body))
def run(self):
''' Bind a callback to the queue '''
#TODO: This should be moved to a log eventually
self.logger.debug("Receiver: starting to consume messeges on queue %s" %self.queue)
self.channel.basic_consume(self.__wrapper,
self.queue,
no_ack=self.read_repeatedly)
# A basic print response for debugging
def printResponse(body):
return "response: got %s" % str(body)
class SimpleRabbitResponder(RabbitReceiver):
'''A simple responder that responds to one queue only
'''
def __init__(self, config, inbound_queue, response_function, out_queue, read_repeatedly=False):
RabbitReceiver.__init__(self, config, queue, self.__responderCallback, read_repeatedly)
self.sender = RabbitSender(self.config, out_queue)
self.response_function = self.response_function
def __responderCallback(self, data, properties):
'''Respond to the relevant queue
This goes through __callback first, so it receives just json data and properties.
'''
response = self.response_function(data)
self.sender.Send(data=response, corr_id=properties.correlation_id)
class VersatileRabbitResponder(RabbitReceiver):
'''
A class that supports multiple out queues, rather than one like SimpleRabbitResponder.
'''
def __init__(self, config, inbound_queue, response_function, default_out_queue=None, read_repeatedly=False):
RabbitReceiver.__init__(self, config, queue, self.__callback, read_repeatedly)
'''
CR: This is a design I thought of... How about we make a constant sender for each queue we're using?
Most of the gears will be using one outbound queue, and it would be inefficient to declare a sender each time
Some gears (which work with many others, like the parser that parses many articles) work with many queues,
So why not create a sender for each queue we encounter? Since we'll probably use it a lot, and there aren't too many queues
Feel free to change this but I think it's better than the other option
'''
self.senders = {}
# In case outbound queue isn't specified, reply_to MUST be specified in every message!
if default_out_queue != None:
sender = RabbitSender(self.config, default_out_queue)
# Define a default sender, when reply_to is None
self.senders[None] = sender
self.senders[default_out_queue] = sender
self.response_function = self.response_function
def __callback(self, ch, method, properties, body):
'''
Respond to the relevant queue
'''
response = self.response_function(body)
reply_to = properies.reply_to
if self.senders.has_key(reply_to):
sender = self.senders[reply_to]
# We have no idea where to send the response
elif reply_to is None:
#TODO: Logging
print 'ERROR: no where to reply'
ch.basic_ack(delivery_tag = method.delivery_tag)
return
else:
sender = RabbitSender(self.config, reply_to)
self.senders[reply_to] = sender
''' CR: The other option
# Only declare a new sender if it's different from the constant one
if (reply_to != None) and (reply_to != self.outbound_queue):
sender = RabbitSender(self.config, properties.reply_to, inbound_queue)
'''
sender.Send(self, data=response, corr_id=properties.correlation_id)
ch.basic_ack(delivery_tag = method.delivery_tag)
class RabbitRequester(RabbitSender):
def __init__(self, config, outbound_queue, callback, inbound_queue, read_repeatedly=False):
RabbitSender.__init__(self, config, outbound_queue, inbound_queue)
def ask(self,send_queue=None, reply_to_queue=None,message=None,callback=None,corr_id=str(uuid.uuid4())):
self.Send(send_queue, message,corr_id,reply_to_queue)
'''
CR: Not changing this because I don't need it, but you send one message and the thread goes on forever, not a good idea
'''
receiver = RabbitReceiver(config, reply_to_queue, callback,False)
receiver.run()
def main(argv):
#rabbit = rabbitcoat()
#rabbit.Receive()
print "testers unimplemented *yet*- use cli.py"
if __name__ == "__main__":
main(sys.argv[1:])
| {
"content_hash": "73bcc6d7618fd26f1496f4227913eadb",
"timestamp": "",
"source": "github",
"line_count": 298,
"max_line_length": 161,
"avg_line_length": 41.61409395973154,
"alnum_prop": 0.5781791790984598,
"repo_name": "picotera/rabbit-coat",
"id": "9e19be801a786cceabebfb0100f381ef427d7145",
"size": "12423",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/rabbitcoat.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "80"
},
{
"name": "Java",
"bytes": "13579"
},
{
"name": "Python",
"bytes": "25005"
}
],
"symlink_target": ""
} |
from inspectors.extensions import ma
from .models import (
User
)
class UserSchema(ma.ModelSchema):
class Meta:
model = User
user_schema = UserSchema()
| {
"content_hash": "efafd944eb2ab9c6063214dba68a5f7c",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 36,
"avg_line_length": 16.636363636363637,
"alnum_prop": 0.644808743169399,
"repo_name": "codeforamerica/mdc-inspectors",
"id": "eb2a795dc7298b7db64fa31f22e32776cea0c60f",
"size": "183",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "inspectors/registration/serializers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2582"
},
{
"name": "HTML",
"bytes": "36333"
},
{
"name": "JavaScript",
"bytes": "240856"
},
{
"name": "Makefile",
"bytes": "832"
},
{
"name": "Python",
"bytes": "53187"
}
],
"symlink_target": ""
} |
"""
file specification
"""
reserveChangedState = False
class FileSpec(object):
# attributes
_attributes = ('row_ID','PandaID','GUID','lfn','type','dataset','status','prodDBlock',
'prodDBlockToken','dispatchDBlock','dispatchDBlockToken','destinationDBlock',
'destinationDBlockToken','destinationSE','fsize','md5sum','checksum','scope',
'jediTaskID','datasetID','fileID','attemptNr')
# slots
__slots__ = _attributes+('_owner','_changedAttrs','_oldPandaID')
# attributes which have 0 by default
_zeroAttrs = ('fsize',)
# mapping between sequence and attr
_seqAttrMap = {'row_ID':'ATLAS_PANDA.FILESTABLE4_ROW_ID_SEQ.nextval'}
# constructor
def __init__(self):
# install attributes
for attr in self._attributes:
object.__setattr__(self,attr,None)
# set owner to synchronize PandaID
object.__setattr__(self,'_owner',None)
# map of changed attributes
object.__setattr__(self,'_changedAttrs',{})
# old PandaID
object.__setattr__(self,'_oldPandaID','NULL')
# override __getattribute__ for SQL and PandaID
def __getattribute__(self,name):
# PandaID
if name == 'PandaID':
if self._owner == None:
return 'NULL'
return self._owner.PandaID
# others
ret = object.__getattribute__(self,name)
if ret == None:
return "NULL"
return ret
# override __setattr__ to collecte the changed attributes
def __setattr__(self,name,value):
oldVal = getattr(self,name)
object.__setattr__(self,name,value)
newVal = getattr(self,name)
# collect changed attributes
if oldVal != newVal:
self._changedAttrs[name] = value
# set owner
def setOwner(self,owner):
self._owner = owner
self._oldPandaID = self.PandaID
# reset changed attribute list
def resetChangedList(self):
self._oldPandaID = self.PandaID
object.__setattr__(self,'_changedAttrs',{})
# return a tuple of values
def values(self):
ret = []
for attr in self._attributes:
val = getattr(self,attr)
ret.append(val)
return tuple(ret)
# return map of values
def valuesMap(self,useSeq=False,onlyChanged=False):
ret = {}
for attr in self._attributes:
if useSeq and self._seqAttrMap.has_key(attr):
continue
if onlyChanged:
if attr == 'PandaID':
if self.PandaID == self._oldPandaID:
continue
elif not self._changedAttrs.has_key(attr):
continue
val = getattr(self,attr)
if val == 'NULL':
if attr in self._zeroAttrs:
val = 0
else:
val = None
ret[':%s' % attr] = val
return ret
# pack tuple into FileSpec
def pack(self,values):
for i in range(len(self._attributes)):
attr= self._attributes[i]
val = values[i]
object.__setattr__(self,attr,val)
# return state values to be pickled
def __getstate__(self):
state = []
for attr in self._attributes:
val = getattr(self,attr)
state.append(val)
if reserveChangedState:
state.append(self._changedAttrs)
# append owner info
state.append(self._owner)
return state
# restore state from the unpickled state values
def __setstate__(self,state):
pandaID = 'NULL'
for i in range(len(self._attributes)):
if i+1 < len(state):
object.__setattr__(self,self._attributes[i],state[i])
else:
object.__setattr__(self,self._attributes[i],'NULL')
if self._attributes[i] == 'PandaID':
pandaID = state[i]
object.__setattr__(self,'_owner',state[-1])
object.__setattr__(self,'_oldPandaID',pandaID)
if reserveChangedState:
object.__setattr__(self,'_changedAttrs',state[-2])
else:
object.__setattr__(self,'_changedAttrs',{})
# return column names for INSERT
def columnNames(cls,withMod=False):
ret = ""
for attr in cls._attributes:
if ret != "":
ret += ','
ret += attr
# add modificationTime
if withMod:
ret += ",modificationTime"
return ret
columnNames = classmethod(columnNames)
# return expression of values for INSERT
def valuesExpression(cls):
ret = "VALUES("
for attr in cls._attributes:
ret += "%s"
if attr != cls._attributes[len(cls._attributes)-1]:
ret += ","
ret += ")"
return ret
valuesExpression = classmethod(valuesExpression)
# return expression of bind variables for INSERT
def bindValuesExpression(cls,useSeq=False,withMod=False):
from config import panda_config
ret = "VALUES("
for attr in cls._attributes:
if useSeq and cls._seqAttrMap.has_key(attr):
#if panda_config.backend == 'mysql':
# # mysql
# ret += "%s," % "NULL"
#else:
# # oracle
# ret += "%s," % cls._seqAttrMap[attr]
#Back to the sequence for MySQL
ret += "%s," % cls._seqAttrMap[attr]
else:
ret += ":%s," % attr
ret = ret[:-1]
# add modificationTime
if withMod:
ret += ",:modificationTime"
ret += ")"
return ret
bindValuesExpression = classmethod(bindValuesExpression)
# return an expression for UPDATE
def updateExpression(cls):
ret = ""
for attr in cls._attributes:
ret = ret + attr + "=%s"
if attr != cls._attributes[len(cls._attributes)-1]:
ret += ","
return ret
updateExpression = classmethod(updateExpression)
# return an expression of bind variables for UPDATE
def bindUpdateExpression(cls):
ret = ""
for attr in cls._attributes:
ret += '%s=:%s,' % (attr,attr)
ret = ret[:-1]
ret += ' '
return ret
bindUpdateExpression = classmethod(bindUpdateExpression)
# return an expression of bind variables for UPDATE to update only changed attributes
def bindUpdateChangesExpression(self):
ret = ""
for attr in self._attributes:
if self._changedAttrs.has_key(attr) or \
(attr == 'PandaID' and self.PandaID != self._oldPandaID):
ret += '%s=:%s,' % (attr,attr)
ret = ret[:-1]
ret += ' '
return ret
# check if unmerged input
def isUnMergedInput(self):
if self.type == 'input' and self.dispatchDBlockToken == 'TOMERGE':
return True
return False
# check if unmerged output
def isUnMergedOutput(self):
if self.type in ['output','log'] and self.destinationDBlockToken == 'TOMERGE':
return True
return False
# allow no output
def allowNoOutput(self):
if self.dispatchDBlockToken in ['NULL',None,'']:
items = []
else:
items = self.dispatchDBlockToken.split(',')
if not 'an' in items:
items.append('an')
self.dispatchDBlockToken = ','.join(items)
# check if no output is allowed
def isAllowedNoOutput(self):
try:
if 'an' in self.dispatchDBlockToken.split(','):
return True
except:
pass
return False
| {
"content_hash": "78b7c7e9a3abec5c5e8a93bf85ff5793",
"timestamp": "",
"source": "github",
"line_count": 261,
"max_line_length": 96,
"avg_line_length": 30.45977011494253,
"alnum_prop": 0.5343396226415095,
"repo_name": "RRCKI/panda-server",
"id": "318c3b5c6421d363aeedbb27b7c9e07f9e5fecf8",
"size": "7950",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandaserver/taskbuffer/FileSpec.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "PLSQL",
"bytes": "23194"
},
{
"name": "Python",
"bytes": "2670522"
},
{
"name": "Shell",
"bytes": "16124"
}
],
"symlink_target": ""
} |
import os
import sys
from django.core.exceptions import ImproperlyConfigured
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sfchat.settings.local")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "964f5e17c9e46d9275328be27a920336",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 76,
"avg_line_length": 24.25,
"alnum_prop": 0.7422680412371134,
"repo_name": "MySmile/sfchat",
"id": "3d1e742dd116ac5197577f165a23b25f10c0c855",
"size": "291",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "5563"
},
{
"name": "HTML",
"bytes": "40427"
},
{
"name": "JavaScript",
"bytes": "117323"
},
{
"name": "Makefile",
"bytes": "2488"
},
{
"name": "Python",
"bytes": "79515"
},
{
"name": "Shell",
"bytes": "1902"
}
],
"symlink_target": ""
} |
"""Job Args"""
from argparse import ArgumentParser
class Args:
"""Job Args"""
def __init__(self, parser: ArgumentParser):
"""Initialize class properties."""
| {
"content_hash": "3312974b2cbd00f95b03192a3fbba01f",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 47,
"avg_line_length": 19.555555555555557,
"alnum_prop": 0.6306818181818182,
"repo_name": "kstilwell/tcex",
"id": "829c4fce96c1228c96acfcb2fec1d0554bd5b8b6",
"size": "176",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app_init/job/args.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "241378"
}
],
"symlink_target": ""
} |
import sys
import os.path
import subprocess
import argparse
#print 'Number of arguments:', len(sys.argv), 'arguments.'
#print 'Argument List:', str(sys.argv)
parser = argparse.ArgumentParser(description="Sort and Index BAM files")
parser.add_argument('--output', type=str)
parser.add_argument('input_file')
results, leftovers = parser.parse_known_args()
print results.input_file
#print results.output
if results.output is not None:
outFilenamePrefix, file_extension = os.path.splitext(results.output)
outFilenamePrefix = os.path.dirname(results.input_file)+ "/" +outFilenamePrefix;
else:
outFilenamePrefix, file_extension = os.path.splitext(results.input_file)
outFilenamePrefix += "_sorted"
#print "output file: " + outFilenamePrefix + ".bam"
inputFile = results.input_file
inFilenamePrefix, inFileExtension = os.path.splitext(results.input_file)
if inFileExtension == ".sam":
args = ("samtools", "view", "-Sb", results.input_file)
print "converting to BAM..."
#print args
f = open(inFilenamePrefix + ".bam", "w")
popen = subprocess.Popen(args, stdout=f)
popen.wait()
f.close()
if popen.returncode != 0:
print "error"
sys.exit()
print inFilenamePrefix + ".bam created"
inputFile = inFilenamePrefix + ".bam"
args = ("samtools", "sort", results.input_file, outFilenamePrefix)
print "sorting..."
#print args
popen = subprocess.Popen(args, stdout=subprocess.PIPE)
popen.wait()
output = popen.stdout.read()
print output
if popen.returncode != 0:
print "error"
sys.exit()
print outFilenamePrefix + ".bam created"
print "indexing..."
args = ("samtools", "index", outFilenamePrefix+".bam")
popen = subprocess.Popen(args, stdout=subprocess.PIPE)
popen.wait()
output = popen.stdout.read()
print output
if popen.returncode != 0:
print "error"
sys.exit()
print outFilenamePrefix+".bai created" | {
"content_hash": "66f92394b2313d398386d7d6e86f8356",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 81,
"avg_line_length": 28.761904761904763,
"alnum_prop": 0.7356512141280354,
"repo_name": "charite/Q",
"id": "f4fde2f5a90e8de72e7682e7d58e6a57b60f42ee",
"size": "1835",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/bam_indexer.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "131230"
},
{
"name": "C++",
"bytes": "19443598"
},
{
"name": "CMake",
"bytes": "11693"
},
{
"name": "Makefile",
"bytes": "40030"
},
{
"name": "Objective-C",
"bytes": "423432"
},
{
"name": "Perl",
"bytes": "2036"
},
{
"name": "Python",
"bytes": "16235"
},
{
"name": "R",
"bytes": "2384"
},
{
"name": "Shell",
"bytes": "46703"
}
],
"symlink_target": ""
} |
import logging
import sys
import os.path
from nepugia.formats import PACFormat
from nepugia.util.file_io import chunked_copy, FileInFile
from nepugia.compression.huffmanc import HuffmanCoding
logger = logging.getLogger(__name__)
def main():
pac_filename = sys.argv[1]
target_dir = sys.argv[2]
extract_pac_file(pac_filename, target_dir)
def extract_pac_file(src_file, dest_dir):
get_target_path = lambda p: os.path.join(dest_dir, p.replace('\\', '/'))
logger.info('Opening PAC file: %s', src_file)
with open(src_file) as pac_handle:
pac_data = PACFormat.parse_stream(pac_handle)
hc = HuffmanCoding()
logger.info('Parsed %03d entries', len(pac_data.entries))
for entry in pac_data.entries:
target_path = get_target_path(entry.name)
logger.debug('Found entry: id=%03d offset=0x%08X compressed=%d',
entry.id, entry.offset, entry.compression_flag)
logger.info('Unpacking entry "%s" @ %06d bytes to "%s" @ %06d bytes',
entry.name, entry.stored_size, target_path, entry.real_size)
try:
os.makedirs(os.path.dirname(target_path))
except OSError:
pass
with open(target_path, 'w') as target_file:
with entry.vf_open(pac_handle) as entry_handle:
if entry.compression_flag:
chunk_set = entry.chunk_set.value
logger.info('Parsed %03d chunks of %08d bytes @ offset=0x%08X',
chunk_set.header.chunk_count, chunk_set.header.chunk_size, chunk_set.header.header_size)
for i, chunk in enumerate(chunk_set.chunks):
logger.info('Decompressing chunk #%03d @ %06d -> %06d bytes',
i, chunk.stored_size, chunk.real_size)
with chunk.vf_open(entry_handle) as chunk_handle:
try:
hc.decompress_stream(
chunk_handle, target_file, chunk.real_size)
except Exception as err:
print err
else:
chunked_copy(entry_handle.read, target_file.write)
if __name__ == '__main__':
main()
| {
"content_hash": "2a42491e07260130666484d0435336a4",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 116,
"avg_line_length": 39.26229508196721,
"alnum_prop": 0.5427974947807933,
"repo_name": "aheadley/python-nepugia",
"id": "a3ee2911ad33d857d2610660ca41e8fd83089f87",
"size": "3586",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nepugia/scripts/extract_pac.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42035"
}
],
"symlink_target": ""
} |
"""
Handles all requests relating to transferring ownership of volumes.
"""
import hashlib
import hmac
import os
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
import six
from cinder.db import base
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.policies import volume_transfer as policy
from cinder import quota
from cinder import quota_utils
from cinder.volume import api as volume_api
from cinder.volume import utils as volume_utils
volume_transfer_opts = [
cfg.IntOpt('volume_transfer_salt_length', default=8,
help='The number of characters in the salt.'),
cfg.IntOpt('volume_transfer_key_length', default=16,
help='The number of characters in the '
'autogenerated auth key.'), ]
CONF = cfg.CONF
CONF.register_opts(volume_transfer_opts)
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
class API(base.Base):
"""API for interacting volume transfers."""
def __init__(self, db_driver=None):
self.volume_api = volume_api.API()
super(API, self).__init__(db_driver)
def get(self, context, transfer_id):
context.authorize(policy.GET_POLICY)
rv = self.db.transfer_get(context, transfer_id)
return dict(rv)
def delete(self, context, transfer_id):
"""Make the RPC call to delete a volume transfer."""
transfer = self.db.transfer_get(context, transfer_id)
volume_ref = self.db.volume_get(context, transfer.volume_id)
context.authorize(policy.DELETE_POLICY, target_obj=volume_ref)
volume_utils.notify_about_volume_usage(context, volume_ref,
"transfer.delete.start")
if volume_ref['status'] != 'awaiting-transfer':
LOG.error("Volume in unexpected state")
self.db.transfer_destroy(context, transfer_id)
volume_utils.notify_about_volume_usage(context, volume_ref,
"transfer.delete.end")
def get_all(self, context, filters=None):
filters = filters or {}
context.authorize(policy.GET_ALL_POLICY)
if context.is_admin and 'all_tenants' in filters:
transfers = self.db.transfer_get_all(context)
else:
transfers = self.db.transfer_get_all_by_project(context,
context.project_id)
return transfers
def _get_random_string(self, length):
"""Get a random hex string of the specified length."""
rndstr = ""
# Note that the string returned by this function must contain only
# characters that the recipient can enter on their keyboard. The
# function ssh224().hexdigit() achieves this by generating a hash
# which will only contain hexadecimal digits.
while len(rndstr) < length:
rndstr += hashlib.sha224(os.urandom(255)).hexdigest()
return rndstr[0:length]
def _get_crypt_hash(self, salt, auth_key):
"""Generate a random hash based on the salt and the auth key."""
if not isinstance(salt, (six.binary_type, six.text_type)):
salt = str(salt)
if isinstance(salt, six.text_type):
salt = salt.encode('utf-8')
if not isinstance(auth_key, (six.binary_type, six.text_type)):
auth_key = str(auth_key)
if isinstance(auth_key, six.text_type):
auth_key = auth_key.encode('utf-8')
return hmac.new(salt, auth_key, hashlib.sha1).hexdigest()
def create(self, context, volume_id, display_name):
"""Creates an entry in the transfers table."""
LOG.info("Generating transfer record for volume %s", volume_id)
volume_ref = self.db.volume_get(context, volume_id)
context.authorize(policy.CREATE_POLICY, target_obj=volume_ref)
if volume_ref['status'] != "available":
raise exception.InvalidVolume(reason=_("status must be available"))
if volume_ref['encryption_key_id'] is not None:
raise exception.InvalidVolume(
reason=_("transferring encrypted volume is not supported"))
volume_utils.notify_about_volume_usage(context, volume_ref,
"transfer.create.start")
# The salt is just a short random string.
salt = self._get_random_string(CONF.volume_transfer_salt_length)
auth_key = self._get_random_string(CONF.volume_transfer_key_length)
crypt_hash = self._get_crypt_hash(salt, auth_key)
# TODO(ollie): Transfer expiry needs to be implemented.
transfer_rec = {'volume_id': volume_id,
'display_name': display_name,
'salt': salt,
'crypt_hash': crypt_hash,
'expires_at': None}
try:
transfer = self.db.transfer_create(context, transfer_rec)
except Exception:
LOG.error("Failed to create transfer record for %s", volume_id)
raise
volume_utils.notify_about_volume_usage(context, volume_ref,
"transfer.create.end")
return {'id': transfer['id'],
'volume_id': transfer['volume_id'],
'display_name': transfer['display_name'],
'auth_key': auth_key,
'created_at': transfer['created_at']}
def accept(self, context, transfer_id, auth_key):
"""Accept a volume that has been offered for transfer."""
# We must use an elevated context to see the volume that is still
# owned by the donor.
context.authorize(policy.ACCEPT_POLICY)
transfer = self.db.transfer_get(context.elevated(), transfer_id)
crypt_hash = self._get_crypt_hash(transfer['salt'], auth_key)
if crypt_hash != transfer['crypt_hash']:
msg = (_("Attempt to transfer %s with invalid auth key.") %
transfer_id)
LOG.error(msg)
raise exception.InvalidAuthKey(reason=msg)
volume_id = transfer['volume_id']
vol_ref = objects.Volume.get_by_id(context.elevated(), volume_id)
if vol_ref['consistencygroup_id']:
msg = _("Volume %s must not be part of a consistency "
"group.") % vol_ref['id']
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
try:
values = {'per_volume_gigabytes': vol_ref.size}
QUOTAS.limit_check(context, project_id=context.project_id,
**values)
except exception.OverQuota as e:
quotas = e.kwargs['quotas']
raise exception.VolumeSizeExceedsLimit(
size=vol_ref.size, limit=quotas['per_volume_gigabytes'])
try:
reserve_opts = {'volumes': 1, 'gigabytes': vol_ref.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
vol_ref.volume_type_id)
reservations = QUOTAS.reserve(context, **reserve_opts)
except exception.OverQuota as e:
quota_utils.process_reserve_over_quota(context, e,
resource='volumes',
size=vol_ref.size)
try:
donor_id = vol_ref['project_id']
reserve_opts = {'volumes': -1, 'gigabytes': -vol_ref.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
vol_ref.volume_type_id)
donor_reservations = QUOTAS.reserve(context.elevated(),
project_id=donor_id,
**reserve_opts)
except Exception:
donor_reservations = None
LOG.exception("Failed to update quota donating volume"
" transfer id %s", transfer_id)
volume_utils.notify_about_volume_usage(context, vol_ref,
"transfer.accept.start")
try:
# Transfer ownership of the volume now, must use an elevated
# context.
self.volume_api.accept_transfer(context,
vol_ref,
context.user_id,
context.project_id)
self.db.transfer_accept(context.elevated(),
transfer_id,
context.user_id,
context.project_id)
QUOTAS.commit(context, reservations)
if donor_reservations:
QUOTAS.commit(context, donor_reservations, project_id=donor_id)
LOG.info("Volume %s has been transferred.", volume_id)
except Exception:
with excutils.save_and_reraise_exception():
QUOTAS.rollback(context, reservations)
if donor_reservations:
QUOTAS.rollback(context, donor_reservations,
project_id=donor_id)
vol_ref = self.db.volume_get(context, volume_id)
volume_utils.notify_about_volume_usage(context, vol_ref,
"transfer.accept.end")
return {'id': transfer_id,
'display_name': transfer['display_name'],
'volume_id': vol_ref['id']}
| {
"content_hash": "1d1d44e40e252416b91696cd8f87597c",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 79,
"avg_line_length": 43.52466367713004,
"alnum_prop": 0.5595507933237173,
"repo_name": "Datera/cinder",
"id": "213fd4064719c25eb08c1769a4052db141c1b4eb",
"size": "10367",
"binary": false,
"copies": "3",
"ref": "refs/heads/datera_queens_backport",
"path": "cinder/transfer/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15242306"
},
{
"name": "Shell",
"bytes": "8187"
}
],
"symlink_target": ""
} |
"""
Created on June 18, 2015
@author: shiruilu
Common utils for CAPE
"""
import cv2
import numpy as np
from scipy.signal import argrelextrema
import matplotlib.pyplot as plt
def safe_convert(x, new_dtype):
"""
http://stackoverflow.com/a/23325108/2729100
convert x to new_dtype, clip values larger than max or smaller than min
"""
info = np.iinfo(new_dtype)
return x.clip(info.min, info.max).astype(new_dtype)
def get_smoothed_hist(I_channel, ksize=30, sigma=10):
"""
get smoothed hist from a single channel
+TODO: consider replace the calc of face_enhancement.py _H, H
ARGs:
I_channel -- MASKED single channle image (not necessarily gray), 0 will not be counted.
ksize &
sigma -- For Gaussian kernel, following 3*sigma rule
RETURN:
h -- Smoothed hist
"""
# unsmoothed hist (cv2.calcHist return 2d vector)
_h = cv2.calcHist([I_channel],[0],None,[255],[1,256]).T.ravel()
# smooth hist, correlate only takes 1d input
h = np.correlate(_h, cv2.getGaussianKernel(ksize,sigma).ravel(), 'same')
return h
def detect_bimodal(H):
"""
H: all the (smoothed) histograms of faces on the image
RETURN:
bimodal_Fs: True means detected
False means undetected (i.e. not bimodal)
None means not sure, will plot H[i] for analysis
D, M, B: *Arrays* of detected Dark, Median, Bright intensities.
i.e. x-index of H
"""
# argrelextrema return (array([ 54, 132]),) (a tuple), only [0] used for 1d
maximas_Fs = [ argrelextrema(h, np.greater, order=10)[0] for h in H ]
# argrelextrema return (array([ 54, 132]),) (a tuple), only [0] used for 1d
minimas_Fs = [ argrelextrema(h, np.less, order=10)[0] for h in H ]
# # to visualize the bimodal:
# print "maximas each face(hist): ", maximas_Fs \
# , "minimas each face(hist): ", minimas_Fs
# plt.plot(H[i]); plt.xlim([1,256]); plt.show()
bimodal_Fs = np.zeros(len(H) ,dtype=bool)
D = np.zeros(len(H)); M = np.zeros(len(H)); B = np.zeros(len(H));
for i in range(len(H)): # each face i
tot_face_pix = np.sum(H[i])
if len(maximas_Fs[i]) ==2 and len(minimas_Fs[i]) ==1: #bimodal detected
d = maximas_Fs[i][0]
b = maximas_Fs[i][1]
m = minimas_Fs[i][0]
# print 'd,b,m: ',d,b,m
B[i] = b; M[i] = m; D[i] = d;
# NOTICE: Here its 0.003 not 5%(as described in CAPE)!
# 5% should be cumulated from several cylinders around the peak
# Here it's ONLY the highest peak
if H[i][d] >=0.003*tot_face_pix and H[i][b] >=0.003*tot_face_pix \
and (H[i][m] <=0.8*H[i][d] and H[i][m] <=0.8*H[i][b]):
bimodal_Fs[i] = True
elif len(maximas_Fs[i]) >2 or len(minimas_Fs[i]) >1:
print '?? more than two maximas, or more than one minima, see the plot'
plt.plot(H[i]); plt.xlim([1,256]); plt.show()
bimodal_Fs[i] = None
else:
None
return bimodal_Fs, D, M, B
def frange(start, stop, step):
it = start
while(it < stop):
yield it
it += step
def mask_skin(img, mask):
img_cp = img.copy()
img_cp[ ~mask ] = 0 # non-skin area set to 0
return img_cp
def mag(img, dtype='int'):
"""
magnify from [0,1] to [0.255]
"""
if dtype == 'int':
return safe_convert(np.rint(img*255), np.uint8)
elif dtype == 'float':
return (img*255)
elif dtype == 'trim':
return safe_convert(np.rint(img), np.uint8)
else:
raise ValueError('no such data type')
def display(img, name='', mode='bgr'):
"""
display image using matplotlib
ARGS:
img: bgr mode
name: string, displayed as title
"""
if mode == 'bgr':
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
elif mode == 'rgb':
plt.imshow(img)
elif mode == 'gray':
plt.imshow(img, 'gray')
elif mode == 'rainbow': # for 0-1 img
plt.imshow(img, cmap='rainbow')
else:
raise ValueError('CAPE display: unkown mode')
plt.title(name)
plt.show() | {
"content_hash": "53814e2f90537776018a55beba722774",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 91,
"avg_line_length": 33.472,
"alnum_prop": 0.5750478011472275,
"repo_name": "shiruilu/CAPE",
"id": "a770bf13281d79ef59f3fcfd6abcf040bfe85b7a",
"size": "4184",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cape_util/cape_util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "51976"
}
],
"symlink_target": ""
} |
from operator import add
from django.db.models import FileField
from django.db.models.signals import post_delete
def _get_subclasses(klass):
return (klass,) + reduce(add, map(_get_subclasses, klass.__subclasses__()), ())
def get_subclasses(model, include_abstract=False):
"""
Returns a list of unique models that inherit from the specified model. If
include_abstract is True, abstract inheriting models will also be returned.
"""
return list(set([klass for klass in _get_subclasses(model) \
if hasattr(klass, '_meta') and (include_abstract or not klass._meta.abstract)]))
def _delete_files(sender, instance=None, **kwargs):
if instance:
for file_field in [field.name
for field in instance._meta.fields
if isinstance(field, FileField)
and getattr(instance, field.name)]:
print file_field, getattr(instance, file_field).path
getattr(instance, file_field).delete(save=False)
def delete_files_on_delete(model):
"""
A convenience function to delete any files referred to by File/Image fields
in a model when an instance or subclass of that model is deleted.
If invoking this for extensively inherited models, this should be placed
somehwere that executed after all models have been initialised, such as
urls.py.
This function is only useful in Django 1.2.5 and later. Previous versions
have this behaviour built-in.
"""
for klass in get_subclasses(model):
if any(isinstance(field, FileField) for field in klass._meta.fields):
post_delete.connect(_delete_files, sender=klass)
| {
"content_hash": "f7893534a17dcfe3c433e595f1b01ce8",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 88,
"avg_line_length": 41.525,
"alnum_prop": 0.686935580975316,
"repo_name": "ixc/glamkit-feincmstools",
"id": "d8abe6dd4c98b55c6a0cc3d7071d1475378f1748",
"size": "1661",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "feincmstools/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "450"
},
{
"name": "JavaScript",
"bytes": "2301"
},
{
"name": "Python",
"bytes": "41872"
}
],
"symlink_target": ""
} |
from bambou import NURESTFetcher
class NUWirelessPortsFetcher(NURESTFetcher):
""" Represents a NUWirelessPorts fetcher
Notes:
This fetcher enables to fetch NUWirelessPort objects.
See:
bambou.NURESTFetcher
"""
@classmethod
def managed_class(cls):
""" Return NUWirelessPort class that is managed.
Returns:
.NUWirelessPort: the managed class
"""
from .. import NUWirelessPort
return NUWirelessPort
| {
"content_hash": "ef679ed7b151c6dfe3d0947e05b35cde",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 65,
"avg_line_length": 20.96,
"alnum_prop": 0.6145038167938931,
"repo_name": "nuagenetworks/vspk-python",
"id": "b2e508b1ee7ae3382274af679d3d0921431aa9cb",
"size": "2135",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "vspk/v6/fetchers/nuwirelessports_fetcher.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "12909327"
}
],
"symlink_target": ""
} |
from nylas.client.restful_models import RestfulModel, Message, File, Contact
from nylas.utils import HttpMethod
import re
def _add_options_to_body(body, options):
options_dict = options.__dict__
# Only append set options to body to prevent a 400 error
options_filtered = {k: v for k, v in options_dict.items() if v is not None}
return body.update(options_filtered)
class Neural(RestfulModel):
def __init__(self, api):
RestfulModel.__init__(self, Neural, api)
def sentiment_analysis_message(self, message_ids):
body = {"message_id": message_ids}
return self.api._request_neural_resource(NeuralSentimentAnalysis, body)
def sentiment_analysis_text(self, text):
body = {"text": text}
return self.api._request_neural_resource(NeuralSentimentAnalysis, body)
def extract_signature(self, message_ids, parse_contacts=None, options=None):
body = {"message_id": message_ids}
if parse_contacts is not None and isinstance(parse_contacts, bool):
body["parse_contacts"] = parse_contacts
if options is not None and isinstance(options, NeuralMessageOptions):
_add_options_to_body(body, options)
signatures = self.api._request_neural_resource(NeuralSignatureExtraction, body)
if parse_contacts is not False:
for sig in signatures:
sig.contacts = NeuralSignatureContact.create(self.api, **sig.contacts)
return signatures
def ocr_request(self, file_id, pages=None):
body = {"file_id": file_id}
if pages is not None and isinstance(pages, list):
body["pages"] = pages
return self.api._request_neural_resource(NeuralOcr, body)
def categorize(self, message_ids):
body = {"message_id": message_ids}
categorized = self.api._request_neural_resource(NeuralCategorizer, body)
for message in categorized:
message.categorizer = Categorize.create(self.api, **message.categorizer)
return categorized
def clean_conversation(self, message_ids, options=None):
body = {"message_id": message_ids}
if options is not None and isinstance(options, NeuralMessageOptions):
_add_options_to_body(body, options)
return self.api._request_neural_resource(NeuralCleanConversation, body)
class NeuralMessageOptions:
def __init__(
self,
ignore_links=None,
ignore_images=None,
ignore_tables=None,
remove_conclusion_phrases=None,
images_as_markdowns=None,
):
self.ignore_links = ignore_links
self.ignore_images = ignore_images
self.ignore_tables = ignore_tables
self.remove_conclusion_phrases = remove_conclusion_phrases
self.images_as_markdowns = images_as_markdowns
class NeuralSentimentAnalysis(RestfulModel):
attrs = [
"account_id",
"sentiment",
"sentiment_score",
"processed_length",
"text",
]
collection_name = "sentiment"
def __init__(self, api):
RestfulModel.__init__(self, NeuralSentimentAnalysis, api)
class NeuralSignatureExtraction(Message):
attrs = Message.attrs + ["signature", "model_version", "contacts"]
collection_name = "signature"
def __init__(self, api):
RestfulModel.__init__(self, NeuralSignatureExtraction, api)
class NeuralSignatureContact(RestfulModel):
attrs = ["job_titles", "links", "phone_numbers", "emails", "names"]
collection_name = "signature_contact"
def __init__(self, api):
RestfulModel.__init__(self, NeuralSignatureContact, api)
def to_contact_object(self):
contact = {}
if self.names is not None:
contact["given_name"] = self.names[0]["first_name"]
contact["surname"] = self.names[0]["last_name"]
if self.job_titles is not None:
contact["job_title"] = self.job_titles[0]
if self.emails is not None:
contact["emails"] = []
for email in self.emails:
contact["emails"].append({"type": "personal", "email": email})
if self.phone_numbers is not None:
contact["phone_numbers"] = []
for number in self.phone_numbers:
contact["phone_numbers"].append({"type": "mobile", "number": number})
if self.links is not None:
contact["web_pages"] = []
for url in self.links:
description = url["description"] if url["description"] else "homepage"
contact["web_pages"].append({"type": description, "url": url["url"]})
return Contact.create(self.api, **contact)
class NeuralCategorizer(Message):
attrs = Message.attrs + ["categorizer"]
collection_name = "categorize"
def __init__(self, api):
RestfulModel.__init__(self, NeuralCategorizer, api)
def recategorize(self, category):
data = {"message_id": self.id, "category": category}
self.api._request_neural_resource(
NeuralCategorizer, data, "categorize/feedback", method=HttpMethod.POST
)
data = {"message_id": self.id}
response = self.api._request_neural_resource(NeuralCategorizer, data)
categorize = response[0]
if categorize.categorizer:
categorize.categorizer = Categorize.create(
self.api, **categorize.categorizer
)
return categorize
class Categorize(RestfulModel):
attrs = ["category", "categorized_at", "model_version", "subcategories"]
datetime_attrs = {"categorized_at": "categorized_at"}
collection_name = "category"
def __init__(self, api):
RestfulModel.__init__(self, Categorize, api)
class NeuralCleanConversation(Message):
attrs = Message.attrs + [
"conversation",
"model_version",
]
collection_name = "conversation"
def __init__(self, api):
RestfulModel.__init__(self, NeuralCleanConversation, api)
def extract_images(self):
pattern = "[\(']cid:(.*?)[\)']"
file_ids = re.findall(pattern, self.conversation)
files = []
for match in file_ids:
files.append(self.api.files.get(match))
return files
class NeuralOcr(File):
attrs = File.attrs + [
"ocr",
"processed_pages",
]
collection_name = "ocr"
def __init__(self, api):
RestfulModel.__init__(self, NeuralOcr, api)
| {
"content_hash": "e7676b8fd910d791c7be63d293ab7a22",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 87,
"avg_line_length": 35.11413043478261,
"alnum_prop": 0.62590930196564,
"repo_name": "nylas/nylas-python",
"id": "a07106318118eed4561ea36898fbb91ad0d585c3",
"size": "6461",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "nylas/client/neural_api_models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "321339"
}
],
"symlink_target": ""
} |
from api.models import City, Country
from django.contrib.gis.db.models.functions import Area
Country.objects.annotate(area=Area('border_webmercator')).order_by('-area')
City.objects.filter(properties__wikipedia='Seattle')
| {
"content_hash": "e128d99930214565950bc349c9ca8c2e",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 75,
"avg_line_length": 32.142857142857146,
"alnum_prop": 0.7911111111111111,
"repo_name": "notthatbreezy/django-example",
"id": "3a926f1f47a9ab3d5a2ba774c014ff53939dc04f",
"size": "225",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/opt/django_example/django_example/examples.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "12260"
}
],
"symlink_target": ""
} |
import sys
import time
try:
import psutil
hasPsutil = True
except ImportError:
hasPsutil = False
def splicedCheck (mode):
splicedFound = True
if hasPsutil == True:
if mode == "start":
splicedFound = False
count = 0
while splicedFound == False and count < 10:
time.sleep(1)
for proc in psutil.process_iter():
if proc.name() == u"spliced.exe":
splicedFound = True
if splicedFound == False:
count += 1
elif mode == "stop":
count = 0
splicedFound = True
while splicedFound == True and count < 10:
time.sleep(1)
splicedFound = False
for proc in psutil.process_iter():
if proc.name() == u"spliced.exe":
splicedFound = True
count += 1
break
else:
raise Exception("Invalid mode for splicedCheck..")
return splicedFound
| {
"content_hash": "aeb62ca75388e3cbd05b1e2dce300535",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 62,
"avg_line_length": 25.04255319148936,
"alnum_prop": 0.44180118946474084,
"repo_name": "osrf/opensplice",
"id": "6014e1e9f22a6b3420f619355ffa9bf57cb268bf",
"size": "1177",
"binary": false,
"copies": "2",
"ref": "refs/heads/osrf-6.9.0",
"path": "build/scripts/overnight/python/splicedCheck.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "16400"
},
{
"name": "Batchfile",
"bytes": "192174"
},
{
"name": "C",
"bytes": "19618578"
},
{
"name": "C#",
"bytes": "2428591"
},
{
"name": "C++",
"bytes": "8036199"
},
{
"name": "CMake",
"bytes": "35186"
},
{
"name": "CSS",
"bytes": "41427"
},
{
"name": "HTML",
"bytes": "457045"
},
{
"name": "Java",
"bytes": "5184488"
},
{
"name": "JavaScript",
"bytes": "540355"
},
{
"name": "LLVM",
"bytes": "13059"
},
{
"name": "Lex",
"bytes": "51476"
},
{
"name": "Makefile",
"bytes": "513684"
},
{
"name": "Objective-C",
"bytes": "38424"
},
{
"name": "Perl",
"bytes": "164028"
},
{
"name": "Python",
"bytes": "915683"
},
{
"name": "Shell",
"bytes": "363583"
},
{
"name": "TeX",
"bytes": "8134"
},
{
"name": "Visual Basic",
"bytes": "290"
},
{
"name": "Yacc",
"bytes": "202848"
}
],
"symlink_target": ""
} |
from OpenGL.GL import *
from .geometric import *
from ..spaces.objective import *
from ..spaces.statespace import *
from ..spaces.configurationspace import *
from ..spaces.edgechecker import *
from ..spaces.metric import *
from ..planners.problem import PlanningProblem
class FlappyControlSpace(ControlSpace):
def __init__(self,flappy):
self.flappy = flappy
def configurationSpace(self):
return self.flappy.configurationSpace()
def controlSet(self,x):
return MultiSet(TimeBiasSet(self.flappy.time_range,self.flappy.controlSet()),self.flappy.controlSet())
#return MultiSet(BoxSet([0],[self.flappy.time_range]),self.flappy.controlSet())
def nextState(self,x,u):
return self.eval(x,u,1.0)
def eval(self,x,u,amount):
x_i,y_i,vy_i = x
t,thrust = u
tc = t*amount
#instantaneous version
#net_acceler = self.flappy.gravity
#vy_i += thrust*self.flappy.thrust
#yilun's version
net_acceler = self.flappy.gravity + thrust*self.flappy.thrust
return [x_i+self.flappy.v_x*tc,
y_i+vy_i*tc+0.5*net_acceler*(tc**2),
vy_i+net_acceler*tc]
def interpolator(self,x,u):
return LambdaInterpolator(lambda s:self.eval(x,u,s),self.configurationSpace(),10)
class Flappy:
def __init__(self):
self.x_range = 1000
self.y_range = 600
self.min_altitude = 300
self.max_velocity = 40
self.start_state = [50, 250, 0]
self.goal_state = [950, 200, 0]
self.goal_radius = 50
self.time_range = 10
#u = lambda:round(random.random())
self.obstacles = []
self.obstacles = [(175, 450, 50, 100), (175, 0, 50, 100), (175, 150, 50, 200),
(375,200, 50, 300),(375, 0, 50, 100),
(575, 500, 50, 100), (575, 0, 50, 125), (575, 200, 50, 200),
(775, 200, 50, 400)]
self.v_x = 5
self.gravity = -1
self.thrust = 4
def controlSet(self):
return FiniteSet([[0],[1]])
def controlSpace(self):
return FlappyControlSpace(self)
def workspace(self):
wspace = Geometric2DCSpace()
wspace.box.bmin = [0,0]
wspace.box.bmax = [self.x_range,self.y_range]
for o in self.obstacles:
wspace.addObstacle(Box(o[0],o[1],o[0]+o[2],o[1]+o[3]))
return wspace
def configurationSpace(self):
wspace = Geometric2DCSpace()
wspace.box.bmin = [0,0]
wspace.box.bmax = [self.x_range,self.y_range]
for o in self.obstacles:
wspace.addObstacle(Box(o[0],o[1],o[0]+o[2],o[1]+o[3]))
res = MultiConfigurationSpace(wspace,BoxConfigurationSpace([-self.max_velocity],[self.max_velocity]))
return res
def startState(self):
return self.start_state
def goalSet(self):
r = self.goal_radius
return BoxSet([self.goal_state[0]-r,self.goal_state[1]-r,-self.max_velocity],
[self.goal_state[0]+r,self.goal_state[1]+r,self.max_velocity])
class FlappyObjectiveFunction(ObjectiveFunction):
"""Given a function pointwise(x,u), produces the incremental cost
by incrementing over the interpolator's length.
"""
def __init__(self,flappy,timestep=0.2):
self.flappy = flappy
self.space = flappy.controlSpace()
self.timestep = timestep
def incremental(self,x,u):
e = self.space.interpolator(x,u)
tmax = u[0]
t = 0
c = 0
while t < tmax:
t = min(tmax,t+self.timestep)
xnext = e.eval(t / tmax)
c += vectorops.distance(x,xnext)
x = xnext
return c
def flappyTest():
p = Flappy()
objective = FlappyObjectiveFunction(p)
return PlanningProblem(p.controlSpace(),p.startState(),p.goalSet(),
objective=objective,
visualizer=p.workspace(),
euclidean = True)
| {
"content_hash": "33f9a76bed496002b24582ca66f9a020",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 110,
"avg_line_length": 34.458333333333336,
"alnum_prop": 0.5673518742442564,
"repo_name": "krishauser/pyOptimalMotionPlanning",
"id": "63162d0faebc8c8cbf6d231c627bf136567e8783",
"size": "4135",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pomp/example_problems/flappy.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "610310"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.