text stringlengths 4 1.02M | meta dict |
|---|---|
"""Helpers for the decoding phase of jobs."""
import pickle
import lingvo.compat as tf
from lingvo.core import py_utils
from lingvo.core.ops import record_pb2
def WriteKeyValuePairs(filename, key_value_pairs):
"""Writes `key_value_pairs` to `filename`."""
with open(filename, 'wb') as f:
pickle.dump(key_value_pairs, f, protocol=pickle.HIGHEST_PROTOCOL)
def SerializeOutputs(nmap: py_utils.NestedMap) -> bytes:
"""Return a serialized representation of the contents of `nmap`.
Args:
nmap: A NestedMap of data to serialize.
Returns:
A serialized record_pb2.Record() of the contents of `nmap`.
"""
record = record_pb2.Record()
flat_nmap = nmap.FlattenItems()
for key, value in flat_nmap:
record.fields[key].CopyFrom(tf.make_tensor_proto(value))
serialized = record.SerializeToString()
return serialized
| {
"content_hash": "c9c9d3261749867f24f2e1b7ff9024e3",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 69,
"avg_line_length": 28.2,
"alnum_prop": 0.7222222222222222,
"repo_name": "tensorflow/lingvo",
"id": "c693a534c0c2863c46b119131ca9cc8c22499bf9",
"size": "1535",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lingvo/core/decoder_lib.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5163"
},
{
"name": "C++",
"bytes": "556344"
},
{
"name": "Dockerfile",
"bytes": "8484"
},
{
"name": "Jupyter Notebook",
"bytes": "36721"
},
{
"name": "Python",
"bytes": "9574124"
},
{
"name": "Shell",
"bytes": "50408"
},
{
"name": "Starlark",
"bytes": "182688"
},
{
"name": "TeX",
"bytes": "37275"
}
],
"symlink_target": ""
} |
import abc
from typing import Awaitable, Callable, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v10.services.types import batch_job_service
from google.longrunning import operations_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-ads",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class BatchJobServiceTransport(abc.ABC):
"""Abstract transport class for BatchJobService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/adwords",)
DEFAULT_HOST: str = "googleads.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file,
**scopes_kwargs,
quota_project_id=quota_project_id,
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(
service_account.Credentials, "with_always_use_jwt_access"
)
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.mutate_batch_job: gapic_v1.method.wrap_method(
self.mutate_batch_job,
default_timeout=None,
client_info=client_info,
),
self.list_batch_job_results: gapic_v1.method.wrap_method(
self.list_batch_job_results,
default_timeout=None,
client_info=client_info,
),
self.run_batch_job: gapic_v1.method.wrap_method(
self.run_batch_job,
default_timeout=None,
client_info=client_info,
),
self.add_batch_job_operations: gapic_v1.method.wrap_method(
self.add_batch_job_operations,
default_timeout=None,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def operations_client(self):
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def mutate_batch_job(
self,
) -> Callable[
[batch_job_service.MutateBatchJobRequest],
Union[
batch_job_service.MutateBatchJobResponse,
Awaitable[batch_job_service.MutateBatchJobResponse],
],
]:
raise NotImplementedError()
@property
def list_batch_job_results(
self,
) -> Callable[
[batch_job_service.ListBatchJobResultsRequest],
Union[
batch_job_service.ListBatchJobResultsResponse,
Awaitable[batch_job_service.ListBatchJobResultsResponse],
],
]:
raise NotImplementedError()
@property
def run_batch_job(
self,
) -> Callable[
[batch_job_service.RunBatchJobRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def add_batch_job_operations(
self,
) -> Callable[
[batch_job_service.AddBatchJobOperationsRequest],
Union[
batch_job_service.AddBatchJobOperationsResponse,
Awaitable[batch_job_service.AddBatchJobOperationsResponse],
],
]:
raise NotImplementedError()
__all__ = ("BatchJobServiceTransport",)
| {
"content_hash": "a522278d3553632fcc1005c87fc24019",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 101,
"avg_line_length": 36.31794871794872,
"alnum_prop": 0.6107031911889297,
"repo_name": "googleads/google-ads-python",
"id": "1dc1bed3ebbf6bb0f21ac964430d85e2a439348c",
"size": "7682",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/ads/googleads/v10/services/services/batch_job_service/transports/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23399881"
}
],
"symlink_target": ""
} |
"""
tu
~~~~~~~~~~~~~~~
Text Universes module.
"""
import webapp2
from main import jinja_env
class BaseHandler(webapp2.RequestHandler):
def prepare_response(self, template_path, **kwargs):
temp = jinja_env.get_template(template_path)
self.response.write(temp.render(kwargs))
def prepare_json_response(self, js):
self.response.content_type = 'application/json'
self.response.write(js)
| {
"content_hash": "24ead71add2502673b2895a59a9c7bb7",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 56,
"avg_line_length": 24.38888888888889,
"alnum_prop": 0.6514806378132119,
"repo_name": "dlebech/Text-Universes",
"id": "e4023fbccb1fde6ae48a332ba5b9789a1616f2c9",
"size": "463",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/tu/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "5741"
},
{
"name": "Python",
"bytes": "12913"
}
],
"symlink_target": ""
} |
"""Generate a dot graph from the output of several profilers."""
__author__ = "Jose Fonseca et al"
import sys
import math
import os.path
import re
import textwrap
import optparse
import xml.parsers.expat
import collections
import locale
# Python 2.x/3.x compatibility
if sys.version_info[0] >= 3:
PYTHON_3 = True
def compat_iteritems(x): return x.items() # No iteritems() in Python 3
def compat_itervalues(x): return x.values() # No itervalues() in Python 3
def compat_keys(x): return list(x.keys()) # keys() is a generator in Python 3
basestring = str # No class basestring in Python 3
unichr = chr # No unichr in Python 3
xrange = range # No xrange in Python 3
else:
PYTHON_3 = False
def compat_iteritems(x): return x.iteritems()
def compat_itervalues(x): return x.itervalues()
def compat_keys(x): return x.keys()
try:
# Debugging helper module
import debug
except ImportError:
pass
MULTIPLICATION_SIGN = unichr(0xd7)
def times(x):
return "%u%s" % (x, MULTIPLICATION_SIGN)
def percentage(p):
return "%.02f%%" % (p*100.0,)
def add(a, b):
return a + b
def equal(a, b):
if a == b:
return a
else:
return None
def fail(a, b):
assert False
tol = 2 ** -23
def ratio(numerator, denominator):
try:
ratio = float(numerator)/float(denominator)
except ZeroDivisionError:
# 0/0 is undefined, but 1.0 yields more useful results
return 1.0
if ratio < 0.0:
if ratio < -tol:
sys.stderr.write('warning: negative ratio (%s/%s)\n' % (numerator, denominator))
return 0.0
if ratio > 1.0:
if ratio > 1.0 + tol:
sys.stderr.write('warning: ratio greater than one (%s/%s)\n' % (numerator, denominator))
return 1.0
return ratio
class UndefinedEvent(Exception):
"""Raised when attempting to get an event which is undefined."""
def __init__(self, event):
Exception.__init__(self)
self.event = event
def __str__(self):
return 'unspecified event %s' % self.event.name
class Event(object):
"""Describe a kind of event, and its basic operations."""
def __init__(self, name, null, aggregator, formatter = str):
self.name = name
self._null = null
self._aggregator = aggregator
self._formatter = formatter
def __eq__(self, other):
return self is other
def __hash__(self):
return id(self)
def null(self):
return self._null
def aggregate(self, val1, val2):
"""Aggregate two event values."""
assert val1 is not None
assert val2 is not None
return self._aggregator(val1, val2)
def format(self, val):
"""Format an event value."""
assert val is not None
return self._formatter(val)
CALLS = Event("Calls", 0, add, times)
SAMPLES = Event("Samples", 0, add, times)
SAMPLES2 = Event("Samples", 0, add, times)
# Count of samples where a given function was either executing or on the stack.
# This is used to calculate the total time ratio according to the
# straightforward method described in Mike Dunlavey's answer to
# stackoverflow.com/questions/1777556/alternatives-to-gprof, item 4 (the myth
# "that recursion is a tricky confusing issue"), last edited 2012-08-30: it's
# just the ratio of TOTAL_SAMPLES over the number of samples in the profile.
#
# Used only when totalMethod == callstacks
TOTAL_SAMPLES = Event("Samples", 0, add, times)
TIME = Event("Time", 0.0, add, lambda x: '(' + str(x) + ')')
TIME_RATIO = Event("Time ratio", 0.0, add, lambda x: '(' + percentage(x) + ')')
TOTAL_TIME = Event("Total time", 0.0, fail)
TOTAL_TIME_RATIO = Event("Total time ratio", 0.0, fail, percentage)
totalMethod = 'callratios'
class Object(object):
"""Base class for all objects in profile which can store events."""
def __init__(self, events=None):
if events is None:
self.events = {}
else:
self.events = events
def __hash__(self):
return id(self)
def __eq__(self, other):
return self is other
def __contains__(self, event):
return event in self.events
def __getitem__(self, event):
try:
return self.events[event]
except KeyError:
raise UndefinedEvent(event)
def __setitem__(self, event, value):
if value is None:
if event in self.events:
del self.events[event]
else:
self.events[event] = value
class Call(Object):
"""A call between functions.
There should be at most one call object for every pair of functions.
"""
def __init__(self, callee_id):
Object.__init__(self)
self.callee_id = callee_id
self.ratio = None
self.weight = None
class Function(Object):
"""A function."""
def __init__(self, id, name):
Object.__init__(self)
self.id = id
self.name = name
self.module = None
self.process = None
self.calls = {}
self.called = None
self.weight = None
self.cycle = None
def add_call(self, call):
if call.callee_id in self.calls:
sys.stderr.write('warning: overwriting call from function %s to %s\n' % (str(self.id), str(call.callee_id)))
self.calls[call.callee_id] = call
def get_call(self, callee_id):
if not callee_id in self.calls:
call = Call(callee_id)
call[SAMPLES] = 0
call[SAMPLES2] = 0
call[CALLS] = 0
self.calls[callee_id] = call
return self.calls[callee_id]
_parenthesis_re = re.compile(r'\([^()]*\)')
_angles_re = re.compile(r'<[^<>]*>')
_const_re = re.compile(r'\s+const$')
def stripped_name(self):
"""Remove extraneous information from C++ demangled function names."""
name = self.name
# Strip function parameters from name by recursively removing paired parenthesis
while True:
name, n = self._parenthesis_re.subn('', name)
if not n:
break
# Strip const qualifier
name = self._const_re.sub('', name)
# Strip template parameters from name by recursively removing paired angles
while True:
name, n = self._angles_re.subn('', name)
if not n:
break
return name
# TODO: write utility functions
def __repr__(self):
return self.name
class Cycle(Object):
"""A cycle made from recursive function calls."""
def __init__(self):
Object.__init__(self)
# XXX: Do cycles need an id?
self.functions = set()
def add_function(self, function):
assert function not in self.functions
self.functions.add(function)
# XXX: Aggregate events?
if function.cycle is not None:
for other in function.cycle.functions:
if function not in self.functions:
self.add_function(other)
function.cycle = self
class Profile(Object):
"""The whole profile."""
def __init__(self):
Object.__init__(self)
self.functions = {}
self.cycles = []
def add_function(self, function):
if function.id in self.functions:
sys.stderr.write('warning: overwriting function %s (id %s)\n' % (function.name, str(function.id)))
self.functions[function.id] = function
def add_cycle(self, cycle):
self.cycles.append(cycle)
def validate(self):
"""Validate the edges."""
for function in compat_itervalues(self.functions):
for callee_id in compat_keys(function.calls):
assert function.calls[callee_id].callee_id == callee_id
if callee_id not in self.functions:
sys.stderr.write('warning: call to undefined function %s from function %s\n' % (str(callee_id), function.name))
del function.calls[callee_id]
def find_cycles(self):
"""Find cycles using Tarjan's strongly connected components algorithm."""
# Apply the Tarjan's algorithm successively until all functions are visited
visited = set()
for function in compat_itervalues(self.functions):
if function not in visited:
self._tarjan(function, 0, [], {}, {}, visited)
cycles = []
for function in compat_itervalues(self.functions):
if function.cycle is not None and function.cycle not in cycles:
cycles.append(function.cycle)
self.cycles = cycles
if 0:
for cycle in cycles:
sys.stderr.write("Cycle:\n")
for member in cycle.functions:
sys.stderr.write("\tFunction %s\n" % member.name)
def prune_root(self, root):
visited = set()
frontier = set([root])
while len(frontier) > 0:
node = frontier.pop()
visited.add(node)
f = self.functions[node]
newNodes = f.calls.keys()
frontier = frontier.union(set(newNodes) - visited)
subtreeFunctions = {}
for n in visited:
subtreeFunctions[n] = self.functions[n]
self.functions = subtreeFunctions
def prune_leaf(self, leaf):
edgesUp = collections.defaultdict(set)
for f in self.functions.keys():
for n in self.functions[f].calls.keys():
edgesUp[n].add(f)
# build the tree up
visited = set()
frontier = set([leaf])
while len(frontier) > 0:
node = frontier.pop()
visited.add(node)
frontier = frontier.union(edgesUp[node] - visited)
downTree = set(self.functions.keys())
upTree = visited
path = downTree.intersection(upTree)
pathFunctions = {}
for n in path:
f = self.functions[n]
newCalls = {}
for c in f.calls.keys():
if c in path:
newCalls[c] = f.calls[c]
f.calls = newCalls
pathFunctions[n] = f
self.functions = pathFunctions
def getFunctionId(self, funcName):
for f in self.functions:
if self.functions[f].name == funcName:
return f
return False
def _tarjan(self, function, order, stack, orders, lowlinks, visited):
"""Tarjan's strongly connected components algorithm.
See also:
- http://en.wikipedia.org/wiki/Tarjan's_strongly_connected_components_algorithm
"""
visited.add(function)
orders[function] = order
lowlinks[function] = order
order += 1
pos = len(stack)
stack.append(function)
for call in compat_itervalues(function.calls):
callee = self.functions[call.callee_id]
# TODO: use a set to optimize lookup
if callee not in orders:
order = self._tarjan(callee, order, stack, orders, lowlinks, visited)
lowlinks[function] = min(lowlinks[function], lowlinks[callee])
elif callee in stack:
lowlinks[function] = min(lowlinks[function], orders[callee])
if lowlinks[function] == orders[function]:
# Strongly connected component found
members = stack[pos:]
del stack[pos:]
if len(members) > 1:
cycle = Cycle()
for member in members:
cycle.add_function(member)
return order
def call_ratios(self, event):
# Aggregate for incoming calls
cycle_totals = {}
for cycle in self.cycles:
cycle_totals[cycle] = 0.0
function_totals = {}
for function in compat_itervalues(self.functions):
function_totals[function] = 0.0
# Pass 1: function_total gets the sum of call[event] for all
# incoming arrows. Same for cycle_total for all arrows
# that are coming into the *cycle* but are not part of it.
for function in compat_itervalues(self.functions):
for call in compat_itervalues(function.calls):
if call.callee_id != function.id:
callee = self.functions[call.callee_id]
if event in call.events:
function_totals[callee] += call[event]
if callee.cycle is not None and callee.cycle is not function.cycle:
cycle_totals[callee.cycle] += call[event]
else:
sys.stderr.write("call_ratios: No data for " + function.name + " call to " + callee.name + "\n")
# Pass 2: Compute the ratios. Each call[event] is scaled by the
# function_total of the callee. Calls into cycles use the
# cycle_total, but not calls within cycles.
for function in compat_itervalues(self.functions):
for call in compat_itervalues(function.calls):
assert call.ratio is None
if call.callee_id != function.id:
callee = self.functions[call.callee_id]
if event in call.events:
if callee.cycle is not None and callee.cycle is not function.cycle:
total = cycle_totals[callee.cycle]
else:
total = function_totals[callee]
call.ratio = ratio(call[event], total)
else:
# Warnings here would only repeat those issued above.
call.ratio = 0.0
def integrate(self, outevent, inevent):
"""Propagate function time ratio along the function calls.
Must be called after finding the cycles.
See also:
- http://citeseer.ist.psu.edu/graham82gprof.html
"""
# Sanity checking
assert outevent not in self
for function in compat_itervalues(self.functions):
assert outevent not in function
assert inevent in function
for call in compat_itervalues(function.calls):
assert outevent not in call
if call.callee_id != function.id:
assert call.ratio is not None
# Aggregate the input for each cycle
for cycle in self.cycles:
total = inevent.null()
for function in compat_itervalues(self.functions):
total = inevent.aggregate(total, function[inevent])
self[inevent] = total
# Integrate along the edges
total = inevent.null()
for function in compat_itervalues(self.functions):
total = inevent.aggregate(total, function[inevent])
self._integrate_function(function, outevent, inevent)
self[outevent] = total
def _integrate_function(self, function, outevent, inevent):
if function.cycle is not None:
return self._integrate_cycle(function.cycle, outevent, inevent)
else:
if outevent not in function:
total = function[inevent]
for call in compat_itervalues(function.calls):
if call.callee_id != function.id:
total += self._integrate_call(call, outevent, inevent)
function[outevent] = total
return function[outevent]
def _integrate_call(self, call, outevent, inevent):
assert outevent not in call
assert call.ratio is not None
callee = self.functions[call.callee_id]
subtotal = call.ratio *self._integrate_function(callee, outevent, inevent)
call[outevent] = subtotal
return subtotal
def _integrate_cycle(self, cycle, outevent, inevent):
if outevent not in cycle:
# Compute the outevent for the whole cycle
total = inevent.null()
for member in cycle.functions:
subtotal = member[inevent]
for call in compat_itervalues(member.calls):
callee = self.functions[call.callee_id]
if callee.cycle is not cycle:
subtotal += self._integrate_call(call, outevent, inevent)
total += subtotal
cycle[outevent] = total
# Compute the time propagated to callers of this cycle
callees = {}
for function in compat_itervalues(self.functions):
if function.cycle is not cycle:
for call in compat_itervalues(function.calls):
callee = self.functions[call.callee_id]
if callee.cycle is cycle:
try:
callees[callee] += call.ratio
except KeyError:
callees[callee] = call.ratio
for member in cycle.functions:
member[outevent] = outevent.null()
for callee, call_ratio in compat_iteritems(callees):
ranks = {}
call_ratios = {}
partials = {}
self._rank_cycle_function(cycle, callee, 0, ranks)
self._call_ratios_cycle(cycle, callee, ranks, call_ratios, set())
partial = self._integrate_cycle_function(cycle, callee, call_ratio, partials, ranks, call_ratios, outevent, inevent)
assert partial == max(partials.values())
assert not total or abs(1.0 - partial/(call_ratio*total)) <= 0.001
return cycle[outevent]
def _rank_cycle_function(self, cycle, function, rank, ranks):
if function not in ranks or ranks[function] > rank:
ranks[function] = rank
for call in compat_itervalues(function.calls):
if call.callee_id != function.id:
callee = self.functions[call.callee_id]
if callee.cycle is cycle:
self._rank_cycle_function(cycle, callee, rank + 1, ranks)
def _call_ratios_cycle(self, cycle, function, ranks, call_ratios, visited):
if function not in visited:
visited.add(function)
for call in compat_itervalues(function.calls):
if call.callee_id != function.id:
callee = self.functions[call.callee_id]
if callee.cycle is cycle:
if ranks[callee] > ranks[function]:
call_ratios[callee] = call_ratios.get(callee, 0.0) + call.ratio
self._call_ratios_cycle(cycle, callee, ranks, call_ratios, visited)
def _integrate_cycle_function(self, cycle, function, partial_ratio, partials, ranks, call_ratios, outevent, inevent):
if function not in partials:
partial = partial_ratio*function[inevent]
for call in compat_itervalues(function.calls):
if call.callee_id != function.id:
callee = self.functions[call.callee_id]
if callee.cycle is not cycle:
assert outevent in call
partial += partial_ratio*call[outevent]
else:
if ranks[callee] > ranks[function]:
callee_partial = self._integrate_cycle_function(cycle, callee, partial_ratio, partials, ranks, call_ratios, outevent, inevent)
call_ratio = ratio(call.ratio, call_ratios[callee])
call_partial = call_ratio*callee_partial
try:
call[outevent] += call_partial
except UndefinedEvent:
call[outevent] = call_partial
partial += call_partial
partials[function] = partial
try:
function[outevent] += partial
except UndefinedEvent:
function[outevent] = partial
return partials[function]
def aggregate(self, event):
"""Aggregate an event for the whole profile."""
total = event.null()
for function in compat_itervalues(self.functions):
try:
total = event.aggregate(total, function[event])
except UndefinedEvent:
return
self[event] = total
def ratio(self, outevent, inevent):
assert outevent not in self
assert inevent in self
for function in compat_itervalues(self.functions):
assert outevent not in function
assert inevent in function
function[outevent] = ratio(function[inevent], self[inevent])
for call in compat_itervalues(function.calls):
assert outevent not in call
if inevent in call:
call[outevent] = ratio(call[inevent], self[inevent])
self[outevent] = 1.0
def prune(self, node_thres, edge_thres):
"""Prune the profile"""
# compute the prune ratios
for function in compat_itervalues(self.functions):
try:
function.weight = function[TOTAL_TIME_RATIO]
except UndefinedEvent:
pass
for call in compat_itervalues(function.calls):
callee = self.functions[call.callee_id]
if TOTAL_TIME_RATIO in call:
# handle exact cases first
call.weight = call[TOTAL_TIME_RATIO]
else:
try:
# make a safe estimate
call.weight = min(function[TOTAL_TIME_RATIO], callee[TOTAL_TIME_RATIO])
except UndefinedEvent:
pass
# prune the nodes
for function_id in compat_keys(self.functions):
function = self.functions[function_id]
if function.weight is not None:
if function.weight < node_thres:
del self.functions[function_id]
# prune the egdes
for function in compat_itervalues(self.functions):
for callee_id in compat_keys(function.calls):
call = function.calls[callee_id]
if callee_id not in self.functions or call.weight is not None and call.weight < edge_thres:
del function.calls[callee_id]
def dump(self):
for function in compat_itervalues(self.functions):
sys.stderr.write('Function %s:\n' % (function.name,))
self._dump_events(function.events)
for call in compat_itervalues(function.calls):
callee = self.functions[call.callee_id]
sys.stderr.write(' Call %s:\n' % (callee.name,))
self._dump_events(call.events)
for cycle in self.cycles:
sys.stderr.write('Cycle:\n')
self._dump_events(cycle.events)
for function in cycle.functions:
sys.stderr.write(' Function %s\n' % (function.name,))
def _dump_events(self, events):
for event, value in compat_iteritems(events):
sys.stderr.write(' %s: %s\n' % (event.name, event.format(value)))
class Struct:
"""Masquerade a dictionary with a structure-like behavior."""
def __init__(self, attrs = None):
if attrs is None:
attrs = {}
self.__dict__['_attrs'] = attrs
def __getattr__(self, name):
try:
return self._attrs[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
self._attrs[name] = value
def __str__(self):
return str(self._attrs)
def __repr__(self):
return repr(self._attrs)
class ParseError(Exception):
"""Raised when parsing to signal mismatches."""
def __init__(self, msg, line):
self.msg = msg
# TODO: store more source line information
self.line = line
def __str__(self):
return '%s: %r' % (self.msg, self.line)
class Parser:
"""Parser interface."""
stdinInput = True
multipleInput = False
def __init__(self):
pass
def parse(self):
raise NotImplementedError
class LineParser(Parser):
"""Base class for parsers that read line-based formats."""
def __init__(self, stream):
Parser.__init__(self)
self._stream = stream
self.__line = None
self.__eof = False
self.line_no = 0
def readline(self):
line = self._stream.readline()
if not line:
self.__line = ''
self.__eof = True
else:
self.line_no += 1
line = line.rstrip('\r\n')
if not PYTHON_3:
encoding = self._stream.encoding
if encoding is None:
encoding = locale.getpreferredencoding()
line = line.decode(encoding)
self.__line = line
def lookahead(self):
assert self.__line is not None
return self.__line
def consume(self):
assert self.__line is not None
line = self.__line
self.readline()
return line
def eof(self):
assert self.__line is not None
return self.__eof
XML_ELEMENT_START, XML_ELEMENT_END, XML_CHARACTER_DATA, XML_EOF = range(4)
class XmlToken:
def __init__(self, type, name_or_data, attrs = None, line = None, column = None):
assert type in (XML_ELEMENT_START, XML_ELEMENT_END, XML_CHARACTER_DATA, XML_EOF)
self.type = type
self.name_or_data = name_or_data
self.attrs = attrs
self.line = line
self.column = column
def __str__(self):
if self.type == XML_ELEMENT_START:
return '<' + self.name_or_data + ' ...>'
if self.type == XML_ELEMENT_END:
return '</' + self.name_or_data + '>'
if self.type == XML_CHARACTER_DATA:
return self.name_or_data
if self.type == XML_EOF:
return 'end of file'
assert 0
class XmlTokenizer:
"""Expat based XML tokenizer."""
def __init__(self, fp, skip_ws = True):
self.fp = fp
self.tokens = []
self.index = 0
self.final = False
self.skip_ws = skip_ws
self.character_pos = 0, 0
self.character_data = ''
self.parser = xml.parsers.expat.ParserCreate()
self.parser.StartElementHandler = self.handle_element_start
self.parser.EndElementHandler = self.handle_element_end
self.parser.CharacterDataHandler = self.handle_character_data
def handle_element_start(self, name, attributes):
self.finish_character_data()
line, column = self.pos()
token = XmlToken(XML_ELEMENT_START, name, attributes, line, column)
self.tokens.append(token)
def handle_element_end(self, name):
self.finish_character_data()
line, column = self.pos()
token = XmlToken(XML_ELEMENT_END, name, None, line, column)
self.tokens.append(token)
def handle_character_data(self, data):
if not self.character_data:
self.character_pos = self.pos()
self.character_data += data
def finish_character_data(self):
if self.character_data:
if not self.skip_ws or not self.character_data.isspace():
line, column = self.character_pos
token = XmlToken(XML_CHARACTER_DATA, self.character_data, None, line, column)
self.tokens.append(token)
self.character_data = ''
def next(self):
size = 16*1024
while self.index >= len(self.tokens) and not self.final:
self.tokens = []
self.index = 0
data = self.fp.read(size)
self.final = len(data) < size
try:
self.parser.Parse(data, self.final)
except xml.parsers.expat.ExpatError as e:
#if e.code == xml.parsers.expat.errors.XML_ERROR_NO_ELEMENTS:
if e.code == 3:
pass
else:
raise e
if self.index >= len(self.tokens):
line, column = self.pos()
token = XmlToken(XML_EOF, None, None, line, column)
else:
token = self.tokens[self.index]
self.index += 1
return token
def pos(self):
return self.parser.CurrentLineNumber, self.parser.CurrentColumnNumber
class XmlTokenMismatch(Exception):
def __init__(self, expected, found):
self.expected = expected
self.found = found
def __str__(self):
return '%u:%u: %s expected, %s found' % (self.found.line, self.found.column, str(self.expected), str(self.found))
class XmlParser(Parser):
"""Base XML document parser."""
def __init__(self, fp, **options):
Parser.__init__(self)
self.tokenizer = XmlTokenizer(fp)
self.consume()
def consume(self):
self.token = self.tokenizer.next()
def match_element_start(self, name):
return self.token.type == XML_ELEMENT_START and self.token.name_or_data == name
def match_element_end(self, name):
return self.token.type == XML_ELEMENT_END and self.token.name_or_data == name
def element_start(self, name):
while self.token.type == XML_CHARACTER_DATA:
self.consume()
if self.token.type != XML_ELEMENT_START:
raise XmlTokenMismatch(XmlToken(XML_ELEMENT_START, name), self.token)
if self.token.name_or_data != name:
raise XmlTokenMismatch(XmlToken(XML_ELEMENT_START, name), self.token)
attrs = self.token.attrs
self.consume()
return attrs
def element_end(self, name):
while self.token.type == XML_CHARACTER_DATA:
self.consume()
if self.token.type != XML_ELEMENT_END:
raise XmlTokenMismatch(XmlToken(XML_ELEMENT_END, name), self.token)
if self.token.name_or_data != name:
raise XmlTokenMismatch(XmlToken(XML_ELEMENT_END, name), self.token)
self.consume()
def character_data(self, strip = True):
data = ''
while self.token.type == XML_CHARACTER_DATA:
data += self.token.name_or_data
self.consume()
if strip:
data = data.strip()
return data
class GprofParser(Parser):
"""Parser for GNU gprof output.
See also:
- Chapter "Interpreting gprof's Output" from the GNU gprof manual
http://sourceware.org/binutils/docs-2.18/gprof/Call-Graph.html#Call-Graph
- File "cg_print.c" from the GNU gprof source code
http://sourceware.org/cgi-bin/cvsweb.cgi/~checkout~/src/gprof/cg_print.c?rev=1.12&cvsroot=src
"""
def __init__(self, fp, **options):
Parser.__init__(self)
self.fp = fp
self.functions = {}
self.cycles = {}
def readline(self):
line = self.fp.readline()
if not line:
sys.stderr.write('error: unexpected end of file\n')
sys.exit(1)
line = line.rstrip('\r\n')
return line
_int_re = re.compile(r'^\d+$')
_float_re = re.compile(r'^\d+\.\d+$')
def translate(self, mo):
"""Extract a structure from a match object, while translating the types in the process."""
attrs = {}
groupdict = mo.groupdict()
for name, value in compat_iteritems(groupdict):
if value is None:
value = None
elif self._int_re.match(value):
value = int(value)
elif self._float_re.match(value):
value = float(value)
attrs[name] = (value)
return Struct(attrs)
_cg_header_re = re.compile(
# original gprof header
r'^\s+called/total\s+parents\s*$|' +
r'^index\s+%time\s+self\s+descendents\s+called\+self\s+name\s+index\s*$|' +
r'^\s+called/total\s+children\s*$|' +
# GNU gprof header
r'^index\s+%\s+time\s+self\s+children\s+called\s+name\s*$'
)
_cg_ignore_re = re.compile(
# spontaneous
r'^\s+<spontaneous>\s*$|'
# internal calls (such as "mcount")
r'^.*\((\d+)\)$'
)
_cg_primary_re = re.compile(
r'^\[(?P<index>\d+)\]?' +
r'\s+(?P<percentage_time>\d+\.\d+)' +
r'\s+(?P<self>\d+\.\d+)' +
r'\s+(?P<descendants>\d+\.\d+)' +
r'\s+(?:(?P<called>\d+)(?:\+(?P<called_self>\d+))?)?' +
r'\s+(?P<name>\S.*?)' +
r'(?:\s+<cycle\s(?P<cycle>\d+)>)?' +
r'\s\[(\d+)\]$'
)
_cg_parent_re = re.compile(
r'^\s+(?P<self>\d+\.\d+)?' +
r'\s+(?P<descendants>\d+\.\d+)?' +
r'\s+(?P<called>\d+)(?:/(?P<called_total>\d+))?' +
r'\s+(?P<name>\S.*?)' +
r'(?:\s+<cycle\s(?P<cycle>\d+)>)?' +
r'\s\[(?P<index>\d+)\]$'
)
_cg_child_re = _cg_parent_re
_cg_cycle_header_re = re.compile(
r'^\[(?P<index>\d+)\]?' +
r'\s+(?P<percentage_time>\d+\.\d+)' +
r'\s+(?P<self>\d+\.\d+)' +
r'\s+(?P<descendants>\d+\.\d+)' +
r'\s+(?:(?P<called>\d+)(?:\+(?P<called_self>\d+))?)?' +
r'\s+<cycle\s(?P<cycle>\d+)\sas\sa\swhole>' +
r'\s\[(\d+)\]$'
)
_cg_cycle_member_re = re.compile(
r'^\s+(?P<self>\d+\.\d+)?' +
r'\s+(?P<descendants>\d+\.\d+)?' +
r'\s+(?P<called>\d+)(?:\+(?P<called_self>\d+))?' +
r'\s+(?P<name>\S.*?)' +
r'(?:\s+<cycle\s(?P<cycle>\d+)>)?' +
r'\s\[(?P<index>\d+)\]$'
)
_cg_sep_re = re.compile(r'^--+$')
def parse_function_entry(self, lines):
parents = []
children = []
while True:
if not lines:
sys.stderr.write('warning: unexpected end of entry\n')
line = lines.pop(0)
if line.startswith('['):
break
# read function parent line
mo = self._cg_parent_re.match(line)
if not mo:
if self._cg_ignore_re.match(line):
continue
sys.stderr.write('warning: unrecognized call graph entry: %r\n' % line)
else:
parent = self.translate(mo)
parents.append(parent)
# read primary line
mo = self._cg_primary_re.match(line)
if not mo:
sys.stderr.write('warning: unrecognized call graph entry: %r\n' % line)
return
else:
function = self.translate(mo)
while lines:
line = lines.pop(0)
# read function subroutine line
mo = self._cg_child_re.match(line)
if not mo:
if self._cg_ignore_re.match(line):
continue
sys.stderr.write('warning: unrecognized call graph entry: %r\n' % line)
else:
child = self.translate(mo)
children.append(child)
function.parents = parents
function.children = children
self.functions[function.index] = function
def parse_cycle_entry(self, lines):
# read cycle header line
line = lines[0]
mo = self._cg_cycle_header_re.match(line)
if not mo:
sys.stderr.write('warning: unrecognized call graph entry: %r\n' % line)
return
cycle = self.translate(mo)
# read cycle member lines
cycle.functions = []
for line in lines[1:]:
mo = self._cg_cycle_member_re.match(line)
if not mo:
sys.stderr.write('warning: unrecognized call graph entry: %r\n' % line)
continue
call = self.translate(mo)
cycle.functions.append(call)
self.cycles[cycle.cycle] = cycle
def parse_cg_entry(self, lines):
if lines[0].startswith("["):
self.parse_cycle_entry(lines)
else:
self.parse_function_entry(lines)
def parse_cg(self):
"""Parse the call graph."""
# skip call graph header
while not self._cg_header_re.match(self.readline()):
pass
line = self.readline()
while self._cg_header_re.match(line):
line = self.readline()
# process call graph entries
entry_lines = []
while line != '\014': # form feed
if line and not line.isspace():
if self._cg_sep_re.match(line):
self.parse_cg_entry(entry_lines)
entry_lines = []
else:
entry_lines.append(line)
line = self.readline()
def parse(self):
self.parse_cg()
self.fp.close()
profile = Profile()
profile[TIME] = 0.0
cycles = {}
for index in self.cycles:
cycles[index] = Cycle()
for entry in compat_itervalues(self.functions):
# populate the function
function = Function(entry.index, entry.name)
function[TIME] = entry.self
if entry.called is not None:
function.called = entry.called
if entry.called_self is not None:
call = Call(entry.index)
call[CALLS] = entry.called_self
function.called += entry.called_self
# populate the function calls
for child in entry.children:
call = Call(child.index)
assert child.called is not None
call[CALLS] = child.called
if child.index not in self.functions:
# NOTE: functions that were never called but were discovered by gprof's
# static call graph analysis dont have a call graph entry so we need
# to add them here
missing = Function(child.index, child.name)
function[TIME] = 0.0
function.called = 0
profile.add_function(missing)
function.add_call(call)
profile.add_function(function)
if entry.cycle is not None:
try:
cycle = cycles[entry.cycle]
except KeyError:
sys.stderr.write('warning: <cycle %u as a whole> entry missing\n' % entry.cycle)
cycle = Cycle()
cycles[entry.cycle] = cycle
cycle.add_function(function)
profile[TIME] = profile[TIME] + function[TIME]
for cycle in compat_itervalues(cycles):
profile.add_cycle(cycle)
# Compute derived events
profile.validate()
profile.ratio(TIME_RATIO, TIME)
profile.call_ratios(CALLS)
profile.integrate(TOTAL_TIME, TIME)
profile.ratio(TOTAL_TIME_RATIO, TOTAL_TIME)
return profile
# Clone&hack of GprofParser for VTune Amplifier XE 2013 gprof-cc output.
# Tested only with AXE 2013 for Windows.
# - Use total times as reported by AXE.
# - In the absence of call counts, call ratios are faked from the relative
# proportions of total time. This affects only the weighting of the calls.
# - Different header, separator, and end marker.
# - Extra whitespace after function names.
# - You get a full entry for <spontaneous>, which does not have parents.
# - Cycles do have parents. These are saved but unused (as they are
# for functions).
# - Disambiguated "unrecognized call graph entry" error messages.
# Notes:
# - Total time of functions as reported by AXE passes the val3 test.
# - CPU Time:Children in the input is sometimes a negative number. This
# value goes to the variable descendants, which is unused.
# - The format of gprof-cc reports is unaffected by the use of
# -knob enable-call-counts=true (no call counts, ever), or
# -show-as=samples (results are quoted in seconds regardless).
class AXEParser(Parser):
"Parser for VTune Amplifier XE 2013 gprof-cc report output."
def __init__(self, fp, **options):
Parser.__init__(self)
self.fp = fp
self.functions = {}
self.cycles = {}
def readline(self):
line = self.fp.readline()
if not line:
sys.stderr.write('error: unexpected end of file\n')
sys.exit(1)
line = line.rstrip('\r\n')
return line
_int_re = re.compile(r'^\d+$')
_float_re = re.compile(r'^\d+\.\d+$')
def translate(self, mo):
"""Extract a structure from a match object, while translating the types in the process."""
attrs = {}
groupdict = mo.groupdict()
for name, value in compat_iteritems(groupdict):
if value is None:
value = None
elif self._int_re.match(value):
value = int(value)
elif self._float_re.match(value):
value = float(value)
attrs[name] = (value)
return Struct(attrs)
_cg_header_re = re.compile(
'^Index |'
'^-----+ '
)
_cg_footer_re = re.compile('^Index\s+Function\s*$')
_cg_primary_re = re.compile(
r'^\[(?P<index>\d+)\]?' +
r'\s+(?P<percentage_time>\d+\.\d+)' +
r'\s+(?P<self>\d+\.\d+)' +
r'\s+(?P<descendants>\d+\.\d+)' +
r'\s+(?P<name>\S.*?)' +
r'(?:\s+<cycle\s(?P<cycle>\d+)>)?' +
r'\s+\[(\d+)\]$'
)
_cg_parent_re = re.compile(
r'^\s+(?P<self>\d+\.\d+)?' +
r'\s+(?P<descendants>\d+\.\d+)?' +
r'\s+(?P<name>\S.*?)' +
r'(?:\s+<cycle\s(?P<cycle>\d+)>)?' +
r'\s+\[(?P<index>\d+)\]$'
)
_cg_child_re = _cg_parent_re
_cg_cycle_header_re = re.compile(
r'^\[(?P<index>\d+)\]?' +
r'\s+(?P<percentage_time>\d+\.\d+)' +
r'\s+(?P<self>\d+\.\d+)' +
r'\s+(?P<descendants>\d+\.\d+)' +
r'\s+<cycle\s(?P<cycle>\d+)\sas\sa\swhole>' +
r'\s+\[(\d+)\]$'
)
_cg_cycle_member_re = re.compile(
r'^\s+(?P<self>\d+\.\d+)?' +
r'\s+(?P<descendants>\d+\.\d+)?' +
r'\s+(?P<name>\S.*?)' +
r'(?:\s+<cycle\s(?P<cycle>\d+)>)?' +
r'\s+\[(?P<index>\d+)\]$'
)
def parse_function_entry(self, lines):
parents = []
children = []
while True:
if not lines:
sys.stderr.write('warning: unexpected end of entry\n')
return
line = lines.pop(0)
if line.startswith('['):
break
# read function parent line
mo = self._cg_parent_re.match(line)
if not mo:
sys.stderr.write('warning: unrecognized call graph entry (1): %r\n' % line)
else:
parent = self.translate(mo)
if parent.name != '<spontaneous>':
parents.append(parent)
# read primary line
mo = self._cg_primary_re.match(line)
if not mo:
sys.stderr.write('warning: unrecognized call graph entry (2): %r\n' % line)
return
else:
function = self.translate(mo)
while lines:
line = lines.pop(0)
# read function subroutine line
mo = self._cg_child_re.match(line)
if not mo:
sys.stderr.write('warning: unrecognized call graph entry (3): %r\n' % line)
else:
child = self.translate(mo)
if child.name != '<spontaneous>':
children.append(child)
if function.name != '<spontaneous>':
function.parents = parents
function.children = children
self.functions[function.index] = function
def parse_cycle_entry(self, lines):
# Process the parents that were not there in gprof format.
parents = []
while True:
if not lines:
sys.stderr.write('warning: unexpected end of cycle entry\n')
return
line = lines.pop(0)
if line.startswith('['):
break
mo = self._cg_parent_re.match(line)
if not mo:
sys.stderr.write('warning: unrecognized call graph entry (6): %r\n' % line)
else:
parent = self.translate(mo)
if parent.name != '<spontaneous>':
parents.append(parent)
# read cycle header line
mo = self._cg_cycle_header_re.match(line)
if not mo:
sys.stderr.write('warning: unrecognized call graph entry (4): %r\n' % line)
return
cycle = self.translate(mo)
# read cycle member lines
cycle.functions = []
for line in lines[1:]:
mo = self._cg_cycle_member_re.match(line)
if not mo:
sys.stderr.write('warning: unrecognized call graph entry (5): %r\n' % line)
continue
call = self.translate(mo)
cycle.functions.append(call)
cycle.parents = parents
self.cycles[cycle.cycle] = cycle
def parse_cg_entry(self, lines):
if any("as a whole" in linelooper for linelooper in lines):
self.parse_cycle_entry(lines)
else:
self.parse_function_entry(lines)
def parse_cg(self):
"""Parse the call graph."""
# skip call graph header
line = self.readline()
while self._cg_header_re.match(line):
line = self.readline()
# process call graph entries
entry_lines = []
# An EOF in readline terminates the program without returning.
while not self._cg_footer_re.match(line):
if line.isspace():
self.parse_cg_entry(entry_lines)
entry_lines = []
else:
entry_lines.append(line)
line = self.readline()
def parse(self):
sys.stderr.write('warning: for axe format, edge weights are unreliable estimates derived from\nfunction total times.\n')
self.parse_cg()
self.fp.close()
profile = Profile()
profile[TIME] = 0.0
cycles = {}
for index in self.cycles:
cycles[index] = Cycle()
for entry in compat_itervalues(self.functions):
# populate the function
function = Function(entry.index, entry.name)
function[TIME] = entry.self
function[TOTAL_TIME_RATIO] = entry.percentage_time / 100.0
# populate the function calls
for child in entry.children:
call = Call(child.index)
# The following bogus value affects only the weighting of
# the calls.
call[TOTAL_TIME_RATIO] = function[TOTAL_TIME_RATIO]
if child.index not in self.functions:
# NOTE: functions that were never called but were discovered by gprof's
# static call graph analysis dont have a call graph entry so we need
# to add them here
# FIXME: Is this applicable?
missing = Function(child.index, child.name)
function[TIME] = 0.0
profile.add_function(missing)
function.add_call(call)
profile.add_function(function)
if entry.cycle is not None:
try:
cycle = cycles[entry.cycle]
except KeyError:
sys.stderr.write('warning: <cycle %u as a whole> entry missing\n' % entry.cycle)
cycle = Cycle()
cycles[entry.cycle] = cycle
cycle.add_function(function)
profile[TIME] = profile[TIME] + function[TIME]
for cycle in compat_itervalues(cycles):
profile.add_cycle(cycle)
# Compute derived events.
profile.validate()
profile.ratio(TIME_RATIO, TIME)
# Lacking call counts, fake call ratios based on total times.
profile.call_ratios(TOTAL_TIME_RATIO)
# The TOTAL_TIME_RATIO of functions is already set. Propagate that
# total time to the calls. (TOTAL_TIME is neither set nor used.)
for function in compat_itervalues(profile.functions):
for call in compat_itervalues(function.calls):
if call.ratio is not None:
callee = profile.functions[call.callee_id]
call[TOTAL_TIME_RATIO] = call.ratio * callee[TOTAL_TIME_RATIO];
return profile
class CallgrindParser(LineParser):
"""Parser for valgrind's callgrind tool.
See also:
- http://valgrind.org/docs/manual/cl-format.html
"""
_call_re = re.compile('^calls=\s*(\d+)\s+((\d+|\+\d+|-\d+|\*)\s+)+$')
def __init__(self, infile, event_selected = None, **options):
LineParser.__init__(self, infile)
# Textual positions
self.position_ids = {}
self.positions = {}
# Numeric positions
self.num_positions = 1
self.cost_positions = ['line']
self.last_positions = [0]
# Events
self.num_events = 0
self.cost_events = []
self.event_selected = event_selected
self.event_selected_idx = 0
self.profile = Profile()
self.profile[SAMPLES] = 0
def parse(self):
# read lookahead
self.readline()
self.parse_key('version')
self.parse_key('creator')
while self.parse_part():
pass
if not self.eof():
sys.stderr.write('warning: line %u: unexpected line\n' % self.line_no)
sys.stderr.write('%s\n' % self.lookahead())
# compute derived data
self.profile.validate()
self.profile.find_cycles()
self.profile.ratio(TIME_RATIO, SAMPLES)
self.profile.call_ratios(CALLS)
self.profile.integrate(TOTAL_TIME_RATIO, TIME_RATIO)
return self.profile
def parse_part(self):
if not self.parse_header_line():
return False
while self.parse_header_line():
pass
if not self.parse_body_line():
return False
while self.parse_body_line():
pass
return True
def parse_header_line(self):
return \
self.parse_empty() or \
self.parse_comment() or \
self.parse_part_detail() or \
self.parse_description() or \
self.parse_event_specification() or \
self.parse_cost_line_def() or \
self.parse_cost_summary()
_detail_keys = set(('cmd', 'pid', 'thread', 'part'))
def parse_part_detail(self):
return self.parse_keys(self._detail_keys)
def parse_description(self):
return self.parse_key('desc') is not None
def parse_event_specification(self):
event = self.parse_key('event')
if event is None:
return False
return True
def parse_cost_line_def(self):
pair = self.parse_keys(('events', 'positions'))
if pair is None:
return False
key, value = pair
items = value.split()
if key == 'events':
self.num_events = len(items)
self.cost_events = items
if self.event_selected:
try:
self.event_selected_idx = self.cost_events.index(self.event_selected)
except ValueError:
sys.stderr.write('Invalid event name %s, valid options are: %s\n' % (self.event_selected, ', '.join(self.cost_events)))
sys.exit(1)
if key == 'positions':
self.num_positions = len(items)
self.cost_positions = items
self.last_positions = [0]*self.num_positions
return True
def parse_cost_summary(self):
pair = self.parse_keys(('summary', 'totals'))
if pair is None:
return False
return True
def parse_body_line(self):
return \
self.parse_empty() or \
self.parse_comment() or \
self.parse_cost_line() or \
self.parse_position_spec() or \
self.parse_association_spec()
__subpos_re = r'(0x[0-9a-fA-F]+|\d+|\+\d+|-\d+|\*)'
_cost_re = re.compile(r'^' +
__subpos_re + r'( +' + __subpos_re + r')*' +
r'( +\d+)*' +
'$')
def parse_cost_line(self, calls=None):
line = self.lookahead().rstrip()
mo = self._cost_re.match(line)
if not mo:
return False
function = self.get_function()
if calls is None:
# Unlike other aspects, call object (cob) is relative not to the
# last call object, but to the caller's object (ob), so try to
# update it when processing a functions cost line
try:
self.positions['cob'] = self.positions['ob']
except KeyError:
pass
values = line.split()
assert len(values) <= self.num_positions + self.num_events
positions = values[0 : self.num_positions]
events = values[self.num_positions : ]
events += ['0']*(self.num_events - len(events))
for i in range(self.num_positions):
position = positions[i]
if position == '*':
position = self.last_positions[i]
elif position[0] in '-+':
position = self.last_positions[i] + int(position)
elif position.startswith('0x'):
position = int(position, 16)
else:
position = int(position)
self.last_positions[i] = position
events = [float(event) for event in events]
event = events[self.event_selected_idx]
if calls is None:
function[SAMPLES] += event
self.profile[SAMPLES] += event
else:
callee = self.get_callee()
callee.called += calls
try:
call = function.calls[callee.id]
except KeyError:
call = Call(callee.id)
call[CALLS] = calls
call[SAMPLES] = event
function.add_call(call)
else:
call[CALLS] += calls
call[SAMPLES] += event
self.consume()
return True
def parse_association_spec(self):
line = self.lookahead()
if not line.startswith('calls='):
return False
_, values = line.split('=', 1)
values = values.strip().split()
calls = int(values[0])
call_position = values[1:]
self.consume()
self.parse_cost_line(calls)
return True
_position_re = re.compile('^(?P<position>[cj]?(?:ob|fl|fi|fe|fn))=\s*(?:\((?P<id>\d+)\))?(?:\s*(?P<name>.+))?')
_position_table_map = {
'ob': 'ob',
'fl': 'fl',
'fi': 'fl',
'fe': 'fl',
'fn': 'fn',
'cob': 'ob',
'cfl': 'fl',
'cfi': 'fl',
'cfe': 'fl',
'cfn': 'fn',
'jfi': 'fl',
}
_position_map = {
'ob': 'ob',
'fl': 'fl',
'fi': 'fl',
'fe': 'fl',
'fn': 'fn',
'cob': 'cob',
'cfl': 'cfl',
'cfi': 'cfl',
'cfe': 'cfl',
'cfn': 'cfn',
'jfi': 'jfi',
}
def parse_position_spec(self):
line = self.lookahead()
if line.startswith('jump=') or line.startswith('jcnd='):
self.consume()
return True
mo = self._position_re.match(line)
if not mo:
return False
position, id, name = mo.groups()
if id:
table = self._position_table_map[position]
if name:
self.position_ids[(table, id)] = name
else:
name = self.position_ids.get((table, id), '')
self.positions[self._position_map[position]] = name
self.consume()
return True
def parse_empty(self):
if self.eof():
return False
line = self.lookahead()
if line.strip():
return False
self.consume()
return True
def parse_comment(self):
line = self.lookahead()
if not line.startswith('#'):
return False
self.consume()
return True
_key_re = re.compile(r'^(\w+):')
def parse_key(self, key):
pair = self.parse_keys((key,))
if not pair:
return None
key, value = pair
return value
line = self.lookahead()
mo = self._key_re.match(line)
if not mo:
return None
key, value = line.split(':', 1)
if key not in keys:
return None
value = value.strip()
self.consume()
return key, value
def parse_keys(self, keys):
line = self.lookahead()
mo = self._key_re.match(line)
if not mo:
return None
key, value = line.split(':', 1)
if key not in keys:
return None
value = value.strip()
self.consume()
return key, value
def make_function(self, module, filename, name):
# FIXME: module and filename are not being tracked reliably
#id = '|'.join((module, filename, name))
id = name
try:
function = self.profile.functions[id]
except KeyError:
function = Function(id, name)
if module:
function.module = os.path.basename(module)
function[SAMPLES] = 0
function.called = 0
self.profile.add_function(function)
return function
def get_function(self):
module = self.positions.get('ob', '')
filename = self.positions.get('fl', '')
function = self.positions.get('fn', '')
return self.make_function(module, filename, function)
def get_callee(self):
module = self.positions.get('cob', '')
filename = self.positions.get('cfi', '')
function = self.positions.get('cfn', '')
return self.make_function(module, filename, function)
class PerfParser(LineParser):
"""Parser for linux perf callgraph output.
It expects output generated with
perf record -g
perf script | gprof2dot.py --format=perf
"""
def __init__(self, infile, **options):
LineParser.__init__(self, infile)
self.profile = Profile()
def readline(self):
# Override LineParser.readline to ignore comment lines
while True:
LineParser.readline(self)
if self.eof() or not self.lookahead().startswith('#'):
break
def parse(self):
# read lookahead
self.readline()
profile = self.profile
profile[SAMPLES] = 0
while not self.eof():
self.parse_event()
# compute derived data
profile.validate()
profile.find_cycles()
profile.ratio(TIME_RATIO, SAMPLES)
profile.call_ratios(SAMPLES2)
if totalMethod == "callratios":
# Heuristic approach. TOTAL_SAMPLES is unused.
profile.integrate(TOTAL_TIME_RATIO, TIME_RATIO)
elif totalMethod == "callstacks":
# Use the actual call chains for functions.
profile[TOTAL_SAMPLES] = profile[SAMPLES]
profile.ratio(TOTAL_TIME_RATIO, TOTAL_SAMPLES)
# Then propagate that total time to the calls.
for function in compat_itervalues(profile.functions):
for call in compat_itervalues(function.calls):
if call.ratio is not None:
callee = profile.functions[call.callee_id]
call[TOTAL_TIME_RATIO] = call.ratio * callee[TOTAL_TIME_RATIO];
else:
assert False
return profile
def parse_event(self):
if self.eof():
return
line = self.consume()
assert line
callchain = self.parse_callchain()
if not callchain:
return
callee = callchain[0]
callee[SAMPLES] += 1
self.profile[SAMPLES] += 1
for caller in callchain[1:]:
try:
call = caller.calls[callee.id]
except KeyError:
call = Call(callee.id)
call[SAMPLES2] = 1
caller.add_call(call)
else:
call[SAMPLES2] += 1
callee = caller
# Increment TOTAL_SAMPLES only once on each function.
stack = set(callchain)
for function in stack:
function[TOTAL_SAMPLES] += 1
def parse_callchain(self):
callchain = []
while self.lookahead():
function = self.parse_call()
if function is None:
break
callchain.append(function)
if self.lookahead() == '':
self.consume()
return callchain
call_re = re.compile(r'^\s+(?P<address>[0-9a-fA-F]+)\s+(?P<symbol>.*)\s+\((?P<module>[^)]*)\)$')
def parse_call(self):
line = self.consume()
mo = self.call_re.match(line)
assert mo
if not mo:
return None
function_name = mo.group('symbol')
if not function_name:
function_name = mo.group('address')
module = mo.group('module')
function_id = function_name + ':' + module
try:
function = self.profile.functions[function_id]
except KeyError:
function = Function(function_id, function_name)
function.module = os.path.basename(module)
function[SAMPLES] = 0
function[TOTAL_SAMPLES] = 0
self.profile.add_function(function)
return function
class OprofileParser(LineParser):
"""Parser for oprofile callgraph output.
See also:
- http://oprofile.sourceforge.net/doc/opreport.html#opreport-callgraph
"""
_fields_re = {
'samples': r'(\d+)',
'%': r'(\S+)',
'linenr info': r'(?P<source>\(no location information\)|\S+:\d+)',
'image name': r'(?P<image>\S+(?:\s\(tgid:[^)]*\))?)',
'app name': r'(?P<application>\S+)',
'symbol name': r'(?P<symbol>\(no symbols\)|.+?)',
}
def __init__(self, infile, **options):
LineParser.__init__(self, infile)
self.entries = {}
self.entry_re = None
def add_entry(self, callers, function, callees):
try:
entry = self.entries[function.id]
except KeyError:
self.entries[function.id] = (callers, function, callees)
else:
callers_total, function_total, callees_total = entry
self.update_subentries_dict(callers_total, callers)
function_total.samples += function.samples
self.update_subentries_dict(callees_total, callees)
def update_subentries_dict(self, totals, partials):
for partial in compat_itervalues(partials):
try:
total = totals[partial.id]
except KeyError:
totals[partial.id] = partial
else:
total.samples += partial.samples
def parse(self):
# read lookahead
self.readline()
self.parse_header()
while self.lookahead():
self.parse_entry()
profile = Profile()
reverse_call_samples = {}
# populate the profile
profile[SAMPLES] = 0
for _callers, _function, _callees in compat_itervalues(self.entries):
function = Function(_function.id, _function.name)
function[SAMPLES] = _function.samples
profile.add_function(function)
profile[SAMPLES] += _function.samples
if _function.application:
function.process = os.path.basename(_function.application)
if _function.image:
function.module = os.path.basename(_function.image)
total_callee_samples = 0
for _callee in compat_itervalues(_callees):
total_callee_samples += _callee.samples
for _callee in compat_itervalues(_callees):
if not _callee.self:
call = Call(_callee.id)
call[SAMPLES2] = _callee.samples
function.add_call(call)
# compute derived data
profile.validate()
profile.find_cycles()
profile.ratio(TIME_RATIO, SAMPLES)
profile.call_ratios(SAMPLES2)
profile.integrate(TOTAL_TIME_RATIO, TIME_RATIO)
return profile
def parse_header(self):
while not self.match_header():
self.consume()
line = self.lookahead()
fields = re.split(r'\s\s+', line)
entry_re = r'^\s*' + r'\s+'.join([self._fields_re[field] for field in fields]) + r'(?P<self>\s+\[self\])?$'
self.entry_re = re.compile(entry_re)
self.skip_separator()
def parse_entry(self):
callers = self.parse_subentries()
if self.match_primary():
function = self.parse_subentry()
if function is not None:
callees = self.parse_subentries()
self.add_entry(callers, function, callees)
self.skip_separator()
def parse_subentries(self):
subentries = {}
while self.match_secondary():
subentry = self.parse_subentry()
subentries[subentry.id] = subentry
return subentries
def parse_subentry(self):
entry = Struct()
line = self.consume()
mo = self.entry_re.match(line)
if not mo:
raise ParseError('failed to parse', line)
fields = mo.groupdict()
entry.samples = int(mo.group(1))
if 'source' in fields and fields['source'] != '(no location information)':
source = fields['source']
filename, lineno = source.split(':')
entry.filename = filename
entry.lineno = int(lineno)
else:
source = ''
entry.filename = None
entry.lineno = None
entry.image = fields.get('image', '')
entry.application = fields.get('application', '')
if 'symbol' in fields and fields['symbol'] != '(no symbols)':
entry.symbol = fields['symbol']
else:
entry.symbol = ''
if entry.symbol.startswith('"') and entry.symbol.endswith('"'):
entry.symbol = entry.symbol[1:-1]
entry.id = ':'.join((entry.application, entry.image, source, entry.symbol))
entry.self = fields.get('self', None) != None
if entry.self:
entry.id += ':self'
if entry.symbol:
entry.name = entry.symbol
else:
entry.name = entry.image
return entry
def skip_separator(self):
while not self.match_separator():
self.consume()
self.consume()
def match_header(self):
line = self.lookahead()
return line.startswith('samples')
def match_separator(self):
line = self.lookahead()
return line == '-'*len(line)
def match_primary(self):
line = self.lookahead()
return not line[:1].isspace()
def match_secondary(self):
line = self.lookahead()
return line[:1].isspace()
class HProfParser(LineParser):
"""Parser for java hprof output
See also:
- http://java.sun.com/developer/technicalArticles/Programming/HPROF.html
"""
trace_re = re.compile(r'\t(.*)\((.*):(.*)\)')
trace_id_re = re.compile(r'^TRACE (\d+):$')
def __init__(self, infile, **options):
LineParser.__init__(self, infile)
self.traces = {}
self.samples = {}
def parse(self):
# read lookahead
self.readline()
while not self.lookahead().startswith('------'): self.consume()
while not self.lookahead().startswith('TRACE '): self.consume()
self.parse_traces()
while not self.lookahead().startswith('CPU'):
self.consume()
self.parse_samples()
# populate the profile
profile = Profile()
profile[SAMPLES] = 0
functions = {}
# build up callgraph
for id, trace in compat_iteritems(self.traces):
if not id in self.samples: continue
mtime = self.samples[id][0]
last = None
for func, file, line in trace:
if not func in functions:
function = Function(func, func)
function[SAMPLES] = 0
profile.add_function(function)
functions[func] = function
function = functions[func]
# allocate time to the deepest method in the trace
if not last:
function[SAMPLES] += mtime
profile[SAMPLES] += mtime
else:
c = function.get_call(last)
c[SAMPLES2] += mtime
last = func
# compute derived data
profile.validate()
profile.find_cycles()
profile.ratio(TIME_RATIO, SAMPLES)
profile.call_ratios(SAMPLES2)
profile.integrate(TOTAL_TIME_RATIO, TIME_RATIO)
return profile
def parse_traces(self):
while self.lookahead().startswith('TRACE '):
self.parse_trace()
def parse_trace(self):
l = self.consume()
mo = self.trace_id_re.match(l)
tid = mo.group(1)
last = None
trace = []
while self.lookahead().startswith('\t'):
l = self.consume()
match = self.trace_re.search(l)
if not match:
#sys.stderr.write('Invalid line: %s\n' % l)
break
else:
function_name, file, line = match.groups()
trace += [(function_name, file, line)]
self.traces[int(tid)] = trace
def parse_samples(self):
self.consume()
self.consume()
while not self.lookahead().startswith('CPU'):
rank, percent_self, percent_accum, count, traceid, method = self.lookahead().split()
self.samples[int(traceid)] = (int(count), method)
self.consume()
class SysprofParser(XmlParser):
def __init__(self, stream, **options):
XmlParser.__init__(self, stream)
def parse(self):
objects = {}
nodes = {}
self.element_start('profile')
while self.token.type == XML_ELEMENT_START:
if self.token.name_or_data == 'objects':
assert not objects
objects = self.parse_items('objects')
elif self.token.name_or_data == 'nodes':
assert not nodes
nodes = self.parse_items('nodes')
else:
self.parse_value(self.token.name_or_data)
self.element_end('profile')
return self.build_profile(objects, nodes)
def parse_items(self, name):
assert name[-1] == 's'
items = {}
self.element_start(name)
while self.token.type == XML_ELEMENT_START:
id, values = self.parse_item(name[:-1])
assert id not in items
items[id] = values
self.element_end(name)
return items
def parse_item(self, name):
attrs = self.element_start(name)
id = int(attrs['id'])
values = self.parse_values()
self.element_end(name)
return id, values
def parse_values(self):
values = {}
while self.token.type == XML_ELEMENT_START:
name = self.token.name_or_data
value = self.parse_value(name)
assert name not in values
values[name] = value
return values
def parse_value(self, tag):
self.element_start(tag)
value = self.character_data()
self.element_end(tag)
if value.isdigit():
return int(value)
if value.startswith('"') and value.endswith('"'):
return value[1:-1]
return value
def build_profile(self, objects, nodes):
profile = Profile()
profile[SAMPLES] = 0
for id, object in compat_iteritems(objects):
# Ignore fake objects (process names, modules, "Everything", "kernel", etc.)
if object['self'] == 0:
continue
function = Function(id, object['name'])
function[SAMPLES] = object['self']
profile.add_function(function)
profile[SAMPLES] += function[SAMPLES]
for id, node in compat_iteritems(nodes):
# Ignore fake calls
if node['self'] == 0:
continue
# Find a non-ignored parent
parent_id = node['parent']
while parent_id != 0:
parent = nodes[parent_id]
caller_id = parent['object']
if objects[caller_id]['self'] != 0:
break
parent_id = parent['parent']
if parent_id == 0:
continue
callee_id = node['object']
assert objects[caller_id]['self']
assert objects[callee_id]['self']
function = profile.functions[caller_id]
samples = node['self']
try:
call = function.calls[callee_id]
except KeyError:
call = Call(callee_id)
call[SAMPLES2] = samples
function.add_call(call)
else:
call[SAMPLES2] += samples
# Compute derived events
profile.validate()
profile.find_cycles()
profile.ratio(TIME_RATIO, SAMPLES)
profile.call_ratios(SAMPLES2)
profile.integrate(TOTAL_TIME_RATIO, TIME_RATIO)
return profile
class XPerfParser(Parser):
"""Parser for CSVs generted by XPerf, from Microsoft Windows Performance Tools.
"""
def __init__(self, stream, **options):
Parser.__init__(self)
self.stream = stream
self.profile = Profile()
self.profile[SAMPLES] = 0
self.column = {}
def parse(self):
import csv
reader = csv.reader(
self.stream,
delimiter = ',',
quotechar = None,
escapechar = None,
doublequote = False,
skipinitialspace = True,
lineterminator = '\r\n',
quoting = csv.QUOTE_NONE)
header = True
for row in reader:
if header:
self.parse_header(row)
header = False
else:
self.parse_row(row)
# compute derived data
self.profile.validate()
self.profile.find_cycles()
self.profile.ratio(TIME_RATIO, SAMPLES)
self.profile.call_ratios(SAMPLES2)
self.profile.integrate(TOTAL_TIME_RATIO, TIME_RATIO)
return self.profile
def parse_header(self, row):
for column in range(len(row)):
name = row[column]
assert name not in self.column
self.column[name] = column
def parse_row(self, row):
fields = {}
for name, column in compat_iteritems(self.column):
value = row[column]
for factory in int, float:
try:
value = factory(value)
except ValueError:
pass
else:
break
fields[name] = value
process = fields['Process Name']
symbol = fields['Module'] + '!' + fields['Function']
weight = fields['Weight']
count = fields['Count']
if process == 'Idle':
return
function = self.get_function(process, symbol)
function[SAMPLES] += weight * count
self.profile[SAMPLES] += weight * count
stack = fields['Stack']
if stack != '?':
stack = stack.split('/')
assert stack[0] == '[Root]'
if stack[-1] != symbol:
# XXX: some cases the sampled function does not appear in the stack
stack.append(symbol)
caller = None
for symbol in stack[1:]:
callee = self.get_function(process, symbol)
if caller is not None:
try:
call = caller.calls[callee.id]
except KeyError:
call = Call(callee.id)
call[SAMPLES2] = count
caller.add_call(call)
else:
call[SAMPLES2] += count
caller = callee
def get_function(self, process, symbol):
function_id = process + '!' + symbol
try:
function = self.profile.functions[function_id]
except KeyError:
module, name = symbol.split('!', 1)
function = Function(function_id, name)
function.process = process
function.module = module
function[SAMPLES] = 0
self.profile.add_function(function)
return function
class SleepyParser(Parser):
"""Parser for GNU gprof output.
See also:
- http://www.codersnotes.com/sleepy/
- http://sleepygraph.sourceforge.net/
"""
stdinInput = False
def __init__(self, filename, **options):
Parser.__init__(self)
from zipfile import ZipFile
self.database = ZipFile(filename)
self.symbols = {}
self.calls = {}
self.profile = Profile()
_symbol_re = re.compile(
r'^(?P<id>\w+)' +
r'\s+"(?P<module>[^"]*)"' +
r'\s+"(?P<procname>[^"]*)"' +
r'\s+"(?P<sourcefile>[^"]*)"' +
r'\s+(?P<sourceline>\d+)$'
)
def openEntry(self, name):
# Some versions of verysleepy use lowercase filenames
for database_name in self.database.namelist():
if name.lower() == database_name.lower():
name = database_name
break
return self.database.open(name, 'rU')
def parse_symbols(self):
for line in self.openEntry('Symbols.txt'):
line = line.decode('UTF-8')
mo = self._symbol_re.match(line)
if mo:
symbol_id, module, procname, sourcefile, sourceline = mo.groups()
function_id = ':'.join([module, procname])
try:
function = self.profile.functions[function_id]
except KeyError:
function = Function(function_id, procname)
function.module = module
function[SAMPLES] = 0
self.profile.add_function(function)
self.symbols[symbol_id] = function
def parse_callstacks(self):
for line in self.openEntry('Callstacks.txt'):
line = line.decode('UTF-8')
fields = line.split()
samples = float(fields[0])
callstack = fields[1:]
callstack = [self.symbols[symbol_id] for symbol_id in callstack]
callee = callstack[0]
callee[SAMPLES] += samples
self.profile[SAMPLES] += samples
for caller in callstack[1:]:
try:
call = caller.calls[callee.id]
except KeyError:
call = Call(callee.id)
call[SAMPLES2] = samples
caller.add_call(call)
else:
call[SAMPLES2] += samples
callee = caller
def parse(self):
profile = self.profile
profile[SAMPLES] = 0
self.parse_symbols()
self.parse_callstacks()
# Compute derived events
profile.validate()
profile.find_cycles()
profile.ratio(TIME_RATIO, SAMPLES)
profile.call_ratios(SAMPLES2)
profile.integrate(TOTAL_TIME_RATIO, TIME_RATIO)
return profile
class AQtimeTable:
def __init__(self, name, fields, **options):
self.name = name
self.fields = fields
self.field_column = {}
for column in range(len(fields)):
self.field_column[fields[column]] = column
self.rows = []
def __len__(self):
return len(self.rows)
def __iter__(self):
for values, children in self.rows:
fields = {}
for name, value in zip(self.fields, values):
fields[name] = value
children = dict([(child.name, child) for child in children])
yield fields, children
raise StopIteration
def add_row(self, values, children=()):
self.rows.append((values, children))
class AQtimeParser(XmlParser):
def __init__(self, stream, **options):
XmlParser.__init__(self, stream)
self.tables = {}
def parse(self):
self.element_start('AQtime_Results')
self.parse_headers()
results = self.parse_results()
self.element_end('AQtime_Results')
return self.build_profile(results)
def parse_headers(self):
self.element_start('HEADERS')
while self.token.type == XML_ELEMENT_START:
self.parse_table_header()
self.element_end('HEADERS')
def parse_table_header(self):
attrs = self.element_start('TABLE_HEADER')
name = attrs['NAME']
id = int(attrs['ID'])
field_types = []
field_names = []
while self.token.type == XML_ELEMENT_START:
field_type, field_name = self.parse_table_field()
field_types.append(field_type)
field_names.append(field_name)
self.element_end('TABLE_HEADER')
self.tables[id] = name, field_types, field_names
def parse_table_field(self):
attrs = self.element_start('TABLE_FIELD')
type = attrs['TYPE']
name = self.character_data()
self.element_end('TABLE_FIELD')
return type, name
def parse_results(self):
self.element_start('RESULTS')
table = self.parse_data()
self.element_end('RESULTS')
return table
def parse_data(self):
rows = []
attrs = self.element_start('DATA')
table_id = int(attrs['TABLE_ID'])
table_name, field_types, field_names = self.tables[table_id]
table = AQtimeTable(table_name, field_names)
while self.token.type == XML_ELEMENT_START:
row, children = self.parse_row(field_types)
table.add_row(row, children)
self.element_end('DATA')
return table
def parse_row(self, field_types):
row = [None]*len(field_types)
children = []
self.element_start('ROW')
while self.token.type == XML_ELEMENT_START:
if self.token.name_or_data == 'FIELD':
field_id, field_value = self.parse_field(field_types)
row[field_id] = field_value
elif self.token.name_or_data == 'CHILDREN':
children = self.parse_children()
else:
raise XmlTokenMismatch("<FIELD ...> or <CHILDREN ...>", self.token)
self.element_end('ROW')
return row, children
def parse_field(self, field_types):
attrs = self.element_start('FIELD')
id = int(attrs['ID'])
type = field_types[id]
value = self.character_data()
if type == 'Integer':
value = int(value)
elif type == 'Float':
value = float(value)
elif type == 'Address':
value = int(value)
elif type == 'String':
pass
else:
assert False
self.element_end('FIELD')
return id, value
def parse_children(self):
children = []
self.element_start('CHILDREN')
while self.token.type == XML_ELEMENT_START:
table = self.parse_data()
assert table.name not in children
children.append(table)
self.element_end('CHILDREN')
return children
def build_profile(self, results):
assert results.name == 'Routines'
profile = Profile()
profile[TIME] = 0.0
for fields, tables in results:
function = self.build_function(fields)
children = tables['Children']
for fields, _ in children:
call = self.build_call(fields)
function.add_call(call)
profile.add_function(function)
profile[TIME] = profile[TIME] + function[TIME]
profile[TOTAL_TIME] = profile[TIME]
profile.ratio(TOTAL_TIME_RATIO, TOTAL_TIME)
return profile
def build_function(self, fields):
function = Function(self.build_id(fields), self.build_name(fields))
function[TIME] = fields['Time']
function[TOTAL_TIME] = fields['Time with Children']
#function[TIME_RATIO] = fields['% Time']/100.0
#function[TOTAL_TIME_RATIO] = fields['% with Children']/100.0
return function
def build_call(self, fields):
call = Call(self.build_id(fields))
call[TIME] = fields['Time']
call[TOTAL_TIME] = fields['Time with Children']
#call[TIME_RATIO] = fields['% Time']/100.0
#call[TOTAL_TIME_RATIO] = fields['% with Children']/100.0
return call
def build_id(self, fields):
return ':'.join([fields['Module Name'], fields['Unit Name'], fields['Routine Name']])
def build_name(self, fields):
# TODO: use more fields
return fields['Routine Name']
class PstatsParser:
"""Parser python profiling statistics saved with te pstats module."""
stdinInput = False
multipleInput = True
def __init__(self, *filename, **options):
import pstats
try:
self.stats = pstats.Stats(*filename)
except ValueError:
if sys.version_info[0] >= 3:
raise
import hotshot.stats
self.stats = hotshot.stats.load(filename[0])
self.profile = Profile()
self.function_ids = {}
def get_function_name(self, key):
filename, line, name = key
module = os.path.splitext(filename)[0]
module = os.path.basename(module)
return "%s:%d:%s" % (module, line, name)
def get_function(self, key):
try:
id = self.function_ids[key]
except KeyError:
id = len(self.function_ids)
name = self.get_function_name(key)
function = Function(id, name)
self.profile.functions[id] = function
self.function_ids[key] = id
else:
function = self.profile.functions[id]
return function
def parse(self):
self.profile[TIME] = 0.0
self.profile[TOTAL_TIME] = self.stats.total_tt
for fn, (cc, nc, tt, ct, callers) in compat_iteritems(self.stats.stats):
callee = self.get_function(fn)
callee.called = nc
callee[TOTAL_TIME] = ct
callee[TIME] = tt
self.profile[TIME] += tt
self.profile[TOTAL_TIME] = max(self.profile[TOTAL_TIME], ct)
for fn, value in compat_iteritems(callers):
caller = self.get_function(fn)
call = Call(callee.id)
if isinstance(value, tuple):
for i in xrange(0, len(value), 4):
nc, cc, tt, ct = value[i:i+4]
if CALLS in call:
call[CALLS] += cc
else:
call[CALLS] = cc
if TOTAL_TIME in call:
call[TOTAL_TIME] += ct
else:
call[TOTAL_TIME] = ct
else:
call[CALLS] = value
call[TOTAL_TIME] = ratio(value, nc)*ct
caller.add_call(call)
#self.stats.print_stats()
#self.stats.print_callees()
# Compute derived events
self.profile.validate()
self.profile.ratio(TIME_RATIO, TIME)
self.profile.ratio(TOTAL_TIME_RATIO, TOTAL_TIME)
return self.profile
class Theme:
def __init__(self,
bgcolor = (0.0, 0.0, 1.0),
mincolor = (0.0, 0.0, 0.0),
maxcolor = (0.0, 0.0, 1.0),
fontname = "Arial",
fontcolor = "white",
nodestyle = "filled",
minfontsize = 10.0,
maxfontsize = 10.0,
minpenwidth = 0.5,
maxpenwidth = 4.0,
gamma = 2.2,
skew = 1.0):
self.bgcolor = bgcolor
self.mincolor = mincolor
self.maxcolor = maxcolor
self.fontname = fontname
self.fontcolor = fontcolor
self.nodestyle = nodestyle
self.minfontsize = minfontsize
self.maxfontsize = maxfontsize
self.minpenwidth = minpenwidth
self.maxpenwidth = maxpenwidth
self.gamma = gamma
self.skew = skew
def graph_bgcolor(self):
return self.hsl_to_rgb(*self.bgcolor)
def graph_fontname(self):
return self.fontname
def graph_fontcolor(self):
return self.fontcolor
def graph_fontsize(self):
return self.minfontsize
def node_bgcolor(self, weight):
return self.color(weight)
def node_fgcolor(self, weight):
if self.nodestyle == "filled":
return self.graph_bgcolor()
else:
return self.color(weight)
def node_fontsize(self, weight):
return self.fontsize(weight)
def node_style(self):
return self.nodestyle
def edge_color(self, weight):
return self.color(weight)
def edge_fontsize(self, weight):
return self.fontsize(weight)
def edge_penwidth(self, weight):
return max(weight*self.maxpenwidth, self.minpenwidth)
def edge_arrowsize(self, weight):
return 0.5 * math.sqrt(self.edge_penwidth(weight))
def fontsize(self, weight):
return max(weight**2 * self.maxfontsize, self.minfontsize)
def color(self, weight):
weight = min(max(weight, 0.0), 1.0)
hmin, smin, lmin = self.mincolor
hmax, smax, lmax = self.maxcolor
if self.skew < 0:
raise ValueError("Skew must be greater than 0")
elif self.skew == 1.0:
h = hmin + weight*(hmax - hmin)
s = smin + weight*(smax - smin)
l = lmin + weight*(lmax - lmin)
else:
base = self.skew
h = hmin + ((hmax-hmin)*(-1.0 + (base ** weight)) / (base - 1.0))
s = smin + ((smax-smin)*(-1.0 + (base ** weight)) / (base - 1.0))
l = lmin + ((lmax-lmin)*(-1.0 + (base ** weight)) / (base - 1.0))
return self.hsl_to_rgb(h, s, l)
def hsl_to_rgb(self, h, s, l):
"""Convert a color from HSL color-model to RGB.
See also:
- http://www.w3.org/TR/css3-color/#hsl-color
"""
h = h % 1.0
s = min(max(s, 0.0), 1.0)
l = min(max(l, 0.0), 1.0)
if l <= 0.5:
m2 = l*(s + 1.0)
else:
m2 = l + s - l*s
m1 = l*2.0 - m2
r = self._hue_to_rgb(m1, m2, h + 1.0/3.0)
g = self._hue_to_rgb(m1, m2, h)
b = self._hue_to_rgb(m1, m2, h - 1.0/3.0)
# Apply gamma correction
r **= self.gamma
g **= self.gamma
b **= self.gamma
return (r, g, b)
def _hue_to_rgb(self, m1, m2, h):
if h < 0.0:
h += 1.0
elif h > 1.0:
h -= 1.0
if h*6 < 1.0:
return m1 + (m2 - m1)*h*6.0
elif h*2 < 1.0:
return m2
elif h*3 < 2.0:
return m1 + (m2 - m1)*(2.0/3.0 - h)*6.0
else:
return m1
TEMPERATURE_COLORMAP = Theme(
mincolor = (2.0/3.0, 0.80, 0.25), # dark blue
maxcolor = (0.0, 1.0, 0.5), # satured red
gamma = 1.0
)
PINK_COLORMAP = Theme(
mincolor = (0.0, 1.0, 0.90), # pink
maxcolor = (0.0, 1.0, 0.5), # satured red
)
GRAY_COLORMAP = Theme(
mincolor = (0.0, 0.0, 0.85), # light gray
maxcolor = (0.0, 0.0, 0.0), # black
)
BW_COLORMAP = Theme(
minfontsize = 8.0,
maxfontsize = 24.0,
mincolor = (0.0, 0.0, 0.0), # black
maxcolor = (0.0, 0.0, 0.0), # black
minpenwidth = 0.1,
maxpenwidth = 8.0,
)
PRINT_COLORMAP = Theme(
minfontsize = 18.0,
maxfontsize = 30.0,
fontcolor = "black",
nodestyle = "solid",
mincolor = (0.0, 0.0, 0.0), # black
maxcolor = (0.0, 0.0, 0.0), # black
minpenwidth = 0.1,
maxpenwidth = 8.0,
)
class DotWriter:
"""Writer for the DOT language.
See also:
- "The DOT Language" specification
http://www.graphviz.org/doc/info/lang.html
"""
strip = False
wrap = False
def __init__(self, fp):
self.fp = fp
def wrap_function_name(self, name):
"""Split the function name on multiple lines."""
if len(name) > 32:
ratio = 2.0/3.0
height = max(int(len(name)/(1.0 - ratio) + 0.5), 1)
width = max(len(name)/height, 32)
# TODO: break lines in symbols
name = textwrap.fill(name, width, break_long_words=False)
# Take away spaces
name = name.replace(", ", ",")
name = name.replace("> >", ">>")
name = name.replace("> >", ">>") # catch consecutive
return name
show_function_events = [TOTAL_TIME_RATIO, TIME_RATIO]
show_edge_events = [TOTAL_TIME_RATIO, CALLS]
def graph(self, profile, theme):
self.begin_graph()
fontname = theme.graph_fontname()
fontcolor = theme.graph_fontcolor()
nodestyle = theme.node_style()
self.attr('graph', fontname=fontname, ranksep=0.25, nodesep=0.125)
self.attr('node', fontname=fontname, shape="box", style=nodestyle, fontcolor=fontcolor, width=0, height=0)
self.attr('edge', fontname=fontname)
for function in compat_itervalues(profile.functions):
labels = []
if function.process is not None:
labels.append(function.process)
if function.module is not None:
labels.append(function.module)
if self.strip:
function_name = function.stripped_name()
else:
function_name = function.name
if self.wrap:
function_name = self.wrap_function_name(function_name)
labels.append(function_name)
for event in self.show_function_events:
if event in function.events:
label = event.format(function[event])
labels.append(label)
if function.called is not None:
labels.append("%u%s" % (function.called, MULTIPLICATION_SIGN))
if function.weight is not None:
weight = function.weight
else:
weight = 0.0
label = '\n'.join(labels)
self.node(function.id,
label = label,
color = self.color(theme.node_bgcolor(weight)),
fontcolor = self.color(theme.node_fgcolor(weight)),
fontsize = "%.2f" % theme.node_fontsize(weight),
)
for call in compat_itervalues(function.calls):
callee = profile.functions[call.callee_id]
labels = []
for event in self.show_edge_events:
if event in call.events:
label = event.format(call[event])
labels.append(label)
if call.weight is not None:
weight = call.weight
elif callee.weight is not None:
weight = callee.weight
else:
weight = 0.0
label = '\n'.join(labels)
self.edge(function.id, call.callee_id,
label = label,
color = self.color(theme.edge_color(weight)),
fontcolor = self.color(theme.edge_color(weight)),
fontsize = "%.2f" % theme.edge_fontsize(weight),
penwidth = "%.2f" % theme.edge_penwidth(weight),
labeldistance = "%.2f" % theme.edge_penwidth(weight),
arrowsize = "%.2f" % theme.edge_arrowsize(weight),
)
self.end_graph()
def begin_graph(self):
self.write('digraph {\n')
def end_graph(self):
self.write('}\n')
def attr(self, what, **attrs):
self.write("\t")
self.write(what)
self.attr_list(attrs)
self.write(";\n")
def node(self, node, **attrs):
self.write("\t")
self.id(node)
self.attr_list(attrs)
self.write(";\n")
def edge(self, src, dst, **attrs):
self.write("\t")
self.id(src)
self.write(" -> ")
self.id(dst)
self.attr_list(attrs)
self.write(";\n")
def attr_list(self, attrs):
if not attrs:
return
self.write(' [')
first = True
for name, value in compat_iteritems(attrs):
if first:
first = False
else:
self.write(", ")
self.id(name)
self.write('=')
self.id(value)
self.write(']')
def id(self, id):
if isinstance(id, (int, float)):
s = str(id)
elif isinstance(id, basestring):
if id.isalnum() and not id.startswith('0x'):
s = id
else:
s = self.escape(id)
else:
raise TypeError
self.write(s)
def color(self, rgb):
r, g, b = rgb
def float2int(f):
if f <= 0.0:
return 0
if f >= 1.0:
return 255
return int(255.0*f + 0.5)
return "#" + "".join(["%02x" % float2int(c) for c in (r, g, b)])
def escape(self, s):
if not PYTHON_3:
s = s.encode('utf-8')
s = s.replace('\\', r'\\')
s = s.replace('\n', r'\n')
s = s.replace('\t', r'\t')
s = s.replace('"', r'\"')
return '"' + s + '"'
def write(self, s):
self.fp.write(s)
class Main:
"""Main program."""
themes = {
"color": TEMPERATURE_COLORMAP,
"pink": PINK_COLORMAP,
"gray": GRAY_COLORMAP,
"bw": BW_COLORMAP,
"print": PRINT_COLORMAP,
}
formats = {
"aqtime": AQtimeParser,
"axe": AXEParser,
"callgrind": CallgrindParser,
"hprof": HProfParser,
"oprofile": OprofileParser,
"perf": PerfParser,
"prof": GprofParser,
"pstats": PstatsParser,
"sleepy": SleepyParser,
"sysprof": SysprofParser,
"xperf": XPerfParser,
}
def naturalJoin(self, values):
if len(values) >= 2:
return ', '.join(values[:-1]) + ' or ' + values[-1]
else:
return ''.join(values)
def main(self):
"""Main program."""
global totalMethod
formatNames = list(self.formats.keys())
formatNames.sort()
optparser = optparse.OptionParser(
usage="\n\t%prog [options] [file] ...")
optparser.add_option(
'-o', '--output', metavar='FILE',
type="string", dest="output",
help="output filename [stdout]")
optparser.add_option(
'-n', '--node-thres', metavar='PERCENTAGE',
type="float", dest="node_thres", default=0.5,
help="eliminate nodes below this threshold [default: %default]")
optparser.add_option(
'-e', '--edge-thres', metavar='PERCENTAGE',
type="float", dest="edge_thres", default=0.1,
help="eliminate edges below this threshold [default: %default]")
optparser.add_option(
'-f', '--format',
type="choice", choices=formatNames,
dest="format", default="prof",
help="profile format: %s [default: %%default]" % self.naturalJoin(formatNames))
optparser.add_option(
'--total',
type="choice", choices=('callratios', 'callstacks'),
dest="totalMethod", default=totalMethod,
help="preferred method of calculating total time: callratios or callstacks (currently affects only perf format) [default: %default]")
optparser.add_option(
'-c', '--colormap',
type="choice", choices=('color', 'pink', 'gray', 'bw', 'print'),
dest="theme", default="color",
help="color map: color, pink, gray, bw, or print [default: %default]")
optparser.add_option(
'-s', '--strip',
action="store_true",
dest="strip", default=False,
help="strip function parameters, template parameters, and const modifiers from demangled C++ function names")
optparser.add_option(
'-w', '--wrap',
action="store_true",
dest="wrap", default=False,
help="wrap function names")
optparser.add_option(
'--show-samples',
action="store_true",
dest="show_samples", default=False,
help="show function samples")
optparser.add_option(
'--event',
type="string", dest="event_selected", default=False,
help="event name (callgrind format)")
# add option to create subtree or show paths
optparser.add_option(
'-z', '--root',
type="string",
dest="root", default="",
help="prune call graph to show only descendants of specified root function")
optparser.add_option(
'-l', '--leaf',
type="string",
dest="leaf", default="",
help="prune call graph to show only ancestors of specified leaf function")
# add a new option to control skew of the colorization curve
optparser.add_option(
'--skew',
type="float", dest="theme_skew", default=1.0,
help="skew the colorization curve. Values < 1.0 give more variety to lower percentages. Values > 1.0 give less variety to lower percentages")
(self.options, self.args) = optparser.parse_args(sys.argv[1:])
if len(self.args) > 1 and self.options.format != 'pstats':
optparser.error('incorrect number of arguments')
try:
self.theme = self.themes[self.options.theme]
except KeyError:
optparser.error('invalid colormap \'%s\'' % self.options.theme)
# set skew on the theme now that it has been picked.
if self.options.theme_skew:
self.theme.skew = self.options.theme_skew
totalMethod = self.options.totalMethod
try:
Format = self.formats[self.options.format]
except KeyError:
optparser.error('invalid format \'%s\'' % self.options.format)
if Format.stdinInput:
if not self.args:
fp = sys.stdin
else:
fp = open(self.args[0], 'rt')
parser = Format(fp, event_selected = self.options.event_selected)
elif Format.multipleInput:
if not self.args:
optparser.error('at least a file must be specified for %s input' % self.options.format)
parser = Format(*self.args, event_selected = self.options.event_selected)
else:
if len(self.args) != 1:
optparser.error('exactly one file must be specified for %s input' % self.options.format)
parser = Format(self.args[0], event_selected = self.options.event_selected)
self.profile = parser.parse()
if self.options.output is None:
self.output = sys.stdout
else:
if PYTHON_3:
self.output = open(self.options.output, 'wt', encoding='UTF-8')
else:
self.output = open(self.options.output, 'wt')
self.write_graph()
def write_graph(self):
dot = DotWriter(self.output)
dot.strip = self.options.strip
dot.wrap = self.options.wrap
if self.options.show_samples:
dot.show_function_events.append(SAMPLES)
profile = self.profile
profile.prune(self.options.node_thres/100.0, self.options.edge_thres/100.0)
if self.options.root:
rootId = profile.getFunctionId(self.options.root)
if not rootId:
sys.stderr.write('root node ' + self.options.root + ' not found (might already be pruned : try -e0 -n0 flags)\n')
sys.exit(1)
profile.prune_root(rootId)
if self.options.leaf:
leafId = profile.getFunctionId(self.options.leaf)
if not leafId:
sys.stderr.write('leaf node ' + self.options.leaf + ' not found (maybe already pruned : try -e0 -n0 flags)\n')
sys.exit(1)
profile.prune_leaf(leafId)
dot.graph(profile, self.theme)
if __name__ == '__main__':
Main().main()
| {
"content_hash": "a0ff6e922ff9ecb5ecab47d863f4343e",
"timestamp": "",
"source": "github",
"line_count": 3261,
"max_line_length": 155,
"avg_line_length": 32.50843299601349,
"alnum_prop": 0.5356287142722385,
"repo_name": "yonggang985/Sniper",
"id": "90790d299e2e2551d968710f4134e3b185fab4b4",
"size": "106729",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/gprof2dot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "90653"
},
{
"name": "C++",
"bytes": "1722452"
},
{
"name": "Makefile",
"bytes": "21654"
},
{
"name": "Objective-C",
"bytes": "645"
},
{
"name": "Python",
"bytes": "103923"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('resources', '0002_resource_thumbnail'),
]
operations = [
migrations.RenameField(
model_name='resource',
old_name='thumbnail',
new_name='image_thumbnail',
),
migrations.RenameField(
model_name='topic',
old_name='thumbnail',
new_name='image_thumbnail',
),
migrations.AddField(
model_name='resource',
name='video_embed',
field=models.CharField(max_length=255, null=True, blank=True),
),
migrations.AddField(
model_name='resource',
name='video_upload',
field=models.FileField(null=True, upload_to=b'resources/%Y/%m/%d/', blank=True),
),
]
| {
"content_hash": "47f2104c0ca556d272e3dcac00b48115",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 92,
"avg_line_length": 27.606060606060606,
"alnum_prop": 0.5521405049396267,
"repo_name": "SFvue/sfvue3",
"id": "aef85d4bb687f09352d91a968accb11a8628ee61",
"size": "935",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "resources/migrations/0003_auto_20160116_1815.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "51810"
},
{
"name": "HTML",
"bytes": "116821"
},
{
"name": "JavaScript",
"bytes": "75416"
},
{
"name": "Python",
"bytes": "183907"
}
],
"symlink_target": ""
} |
"""This module contains the classes JobQueue and Job."""
import logging
import time
import warnings
import datetime
import weakref
from numbers import Number
from threading import Thread, Lock, Event
from queue import PriorityQueue, Empty
class Days(object):
MON, TUE, WED, THU, FRI, SAT, SUN = range(7)
EVERY_DAY = tuple(range(7))
class JobQueue(object):
"""This class allows you to periodically perform tasks with the bot.
Attributes:
queue (PriorityQueue):
bot (telegram.Bot):
Args:
bot (telegram.Bot): The bot instance that should be passed to the jobs
Deprecated: 5.2
prevent_autostart (Optional[bool]): Thread does not start during initialisation.
Use `start` method instead.
"""
def __init__(self, bot, prevent_autostart=None):
if prevent_autostart is not None:
warnings.warn("prevent_autostart is being deprecated, use `start` method instead.")
self.queue = PriorityQueue()
self.bot = bot
self.logger = logging.getLogger(self.__class__.__name__)
self.__start_lock = Lock()
self.__next_peek_lock = Lock() # to protect self._next_peek & self.__tick
self.__tick = Event()
self.__thread = None
""":type: Thread"""
self._next_peek = None
""":type: float"""
self._running = False
def put(self, job, next_t=None):
"""Queue a new job.
Args:
job (telegram.ext.Job): The ``Job`` instance representing the new job
next_t (Optional[int, float, datetime.timedelta, datetime.datetime, datetime.time]):
Time in or at which the job should run for the first time. This parameter will be
interpreted depending on its type.
``int`` or ``float`` will be interpreted as "seconds from now" in which the job
should run.
``datetime.timedelta`` will be interpreted as "time from now" in which the job
should run.
``datetime.datetime`` will be interpreted as a specific date and time at which the
job should run.
``datetime.time`` will be interpreted as a specific time at which the job should
run. This could be either today or, if the time has already passed, tomorrow.
"""
warnings.warn("'JobQueue.put' is being deprecated, use 'JobQueue.run_once', "
"'JobQueue.run_daily' or 'JobQueue.run_repeating' instead")
if job.job_queue is None:
job.job_queue = self
self._put(job, next_t=next_t)
def _put(self, job, next_t=None, last_t=None):
"""Queue a new job.
Args:
job (telegram.ext.Job): The ``Job`` instance representing the new job
next_t (Optional[int, float, datetime.timedelta, datetime.datetime, datetime.time]):
Time in or at which the job should run for the first time. This parameter will be
interpreted depending on its type.
* ``int`` or ``float`` will be interpreted as "seconds from now" in which the job
should run.
* ``datetime.timedelta`` will be interpreted as "time from now" in which the job
should run.
* ``datetime.datetime`` will be interpreted as a specific date and time at which
the job should run.
* ``datetime.time`` will be interpreted as a specific time of day at which the job
should run. This could be either today or, if the time has already passed,
tomorrow.
last_t (Optional[float]): Timestamp of the time when ``job`` was scheduled for in the
last ``put`` call. If provided, it will be used to calculate the next timestamp
more accurately by accounting for the execution time of the job (and possibly
others). If None, `now` will be assumed.
"""
if next_t is None:
next_t = job.interval
if next_t is None:
raise ValueError('next_t is None')
if isinstance(next_t, datetime.datetime):
next_t = (next_t - datetime.datetime.now()).total_seconds()
elif isinstance(next_t, datetime.time):
next_datetime = datetime.datetime.combine(datetime.date.today(), next_t)
if datetime.datetime.now().time() > next_t:
next_datetime += datetime.timedelta(days=1)
next_t = (next_datetime - datetime.datetime.now()).total_seconds()
elif isinstance(next_t, datetime.timedelta):
next_t = next_t.total_seconds()
next_t += last_t or time.time()
self.logger.debug('Putting job %s with t=%f', job.name, next_t)
self.queue.put((next_t, job))
# Wake up the loop if this job should be executed next
self._set_next_peek(next_t)
def run_once(self, callback, when, context=None, name=None):
"""Creates a new ``Job`` that runs once and adds it to the queue.
Args:
callback (function): The callback function that should be executed by the new job. It
should take two parameters ``bot`` and ``job``, where ``job`` is the ``Job``
instance. It can be used to access it's ``context`` or change it to a repeating
job.
when (int, float, datetime.timedelta, datetime.datetime, datetime.time):
Time in or at which the job should run. This parameter will be interpreted
depending on its type.
* ``int`` or ``float`` will be interpreted as "seconds from now" in which the job
should run.
* ``datetime.timedelta`` will be interpreted as "time from now" in which the job
should run.
* ``datetime.datetime`` will be interpreted as a specific date and time at which
the job should run.
* ``datetime.time`` will be interpreted as a specific time of day at which the job
should run. This could be either today or, if the time has already passed,
tomorrow.
context (Optional[object]): Additional data needed for the callback function. Can be
accessed through ``job.context`` in the callback. Defaults to ``None``
name (Optional[str]): The name of the new job. Defaults to ``callback.__name__``
Returns:
telegram.ext.jobqueue.Job: The new ``Job`` instance that has been added to the
job queue.
"""
job = Job(callback, repeat=False, context=context, name=name, job_queue=self)
self._put(job, next_t=when)
return job
def run_repeating(self, callback, interval, first=None, context=None, name=None):
"""Creates a new ``Job`` that runs once and adds it to the queue.
Args:
callback (function): The callback function that should be executed by the new job. It
should take two parameters ``bot`` and ``job``, where ``job`` is the ``Job``
instance. It can be used to access it's ``context``, terminate the job or change
its interval.
interval (int, float, datetime.timedelta): The interval in which the job will run.
If it is an ``int`` or a ``float``, it will be interpreted as seconds.
first (int, float, datetime.timedelta, datetime.datetime, datetime.time):
* ``int`` or ``float`` will be interpreted as "seconds from now" in which the job
should run.
* ``datetime.timedelta`` will be interpreted as "time from now" in which the job
should run.
* ``datetime.datetime`` will be interpreted as a specific date and time at which
the job should run.
* ``datetime.time`` will be interpreted as a specific time of day at which the job
should run. This could be either today or, if the time has already passed,
tomorrow.
Defaults to ``interval``
context (Optional[object]): Additional data needed for the callback function. Can be
accessed through ``job.context`` in the callback. Defaults to ``None``
name (Optional[str]): The name of the new job. Defaults to ``callback.__name__``
Returns:
telegram.ext.jobqueue.Job: The new ``Job`` instance that has been added to the
job queue.
"""
job = Job(callback,
interval=interval,
repeat=True,
context=context,
name=name,
job_queue=self)
self._put(job, next_t=first)
return job
def run_daily(self, callback, time, days=Days.EVERY_DAY, context=None, name=None):
"""Creates a new ``Job`` that runs once and adds it to the queue.
Args:
callback (function): The callback function that should be executed by the new job. It
should take two parameters ``bot`` and ``job``, where ``job`` is the ``Job``
instance. It can be used to access it's ``context`` or terminate the job.
time (datetime.time): Time of day at which the job should run.
days (Optional[tuple[int]]): Defines on which days of the week the job should run.
Defaults to ``Days.EVERY_DAY``
context (Optional[object]): Additional data needed for the callback function. Can be
accessed through ``job.context`` in the callback. Defaults to ``None``
name (Optional[str]): The name of the new job. Defaults to ``callback.__name__``
Returns:
telegram.ext.jobqueue.Job: The new ``Job`` instance that has been added to the
job queue.
"""
job = Job(callback,
interval=datetime.timedelta(days=1),
repeat=True,
days=days,
context=context,
name=name,
job_queue=self)
self._put(job, next_t=time)
return job
def _set_next_peek(self, t):
"""
Set next peek if not defined or `t` is before next peek.
In case the next peek was set, also trigger the `self.__tick` event.
"""
with self.__next_peek_lock:
if not self._next_peek or self._next_peek > t:
self._next_peek = t
self.__tick.set()
def tick(self):
"""
Run all jobs that are due and re-enqueue them with their interval.
"""
now = time.time()
self.logger.debug('Ticking jobs with t=%f', now)
while True:
try:
t, job = self.queue.get(False)
except Empty:
break
self.logger.debug('Peeked at %s with t=%f', job.name, t)
if t > now:
# We can get here in two conditions:
# 1. At the second or later pass of the while loop, after we've already
# processed the job(s) we were supposed to at this time.
# 2. At the first iteration of the loop only if `self.put()` had triggered
# `self.__tick` because `self._next_peek` wasn't set
self.logger.debug("Next task isn't due yet. Finished!")
self.queue.put((t, job))
self._set_next_peek(t)
break
if job.removed:
self.logger.debug('Removing job %s', job.name)
continue
if job.enabled:
try:
current_week_day = datetime.datetime.now().weekday()
if any(day == current_week_day for day in job.days):
self.logger.debug('Running job %s', job.name)
job.run(self.bot)
except:
self.logger.exception('An uncaught error was raised while executing job %s',
job.name)
else:
self.logger.debug('Skipping disabled job %s', job.name)
if job.repeat and not job.removed:
self._put(job, last_t=t)
else:
self.logger.debug('Dropping non-repeating or removed job %s', job.name)
def start(self):
"""
Starts the job_queue thread.
"""
self.__start_lock.acquire()
if not self._running:
self._running = True
self.__start_lock.release()
self.__thread = Thread(target=self._main_loop, name="job_queue")
self.__thread.start()
self.logger.debug('%s thread started', self.__class__.__name__)
else:
self.__start_lock.release()
def _main_loop(self):
"""
Thread target of thread ``job_queue``. Runs in background and performs ticks on the job
queue.
"""
while self._running:
# self._next_peek may be (re)scheduled during self.tick() or self.put()
with self.__next_peek_lock:
tmout = self._next_peek - time.time() if self._next_peek else None
self._next_peek = None
self.__tick.clear()
self.__tick.wait(tmout)
# If we were woken up by self.stop(), just bail out
if not self._running:
break
self.tick()
self.logger.debug('%s thread stopped', self.__class__.__name__)
def stop(self):
"""
Stops the thread
"""
with self.__start_lock:
self._running = False
self.__tick.set()
if self.__thread is not None:
self.__thread.join()
def jobs(self):
"""Returns a tuple of all jobs that are currently in the ``JobQueue``"""
return tuple(job[1] for job in self.queue.queue if job)
class Job(object):
"""This class encapsulates a Job
Attributes:
callback (function): The function that the job executes when it's due
interval (int, float, datetime.timedelta): The interval in which the job runs
days (tuple[int]): A tuple of ``int`` values that determine on which days of the week the
job runs
repeat (bool): If the job runs periodically or only once
name (str): The name of this job
job_queue (telegram.ext.JobQueue): The ``JobQueue`` this job belongs to
enabled (bool): Boolean property that decides if this job is currently active
Args:
callback (function): The callback function that should be executed by the Job. It should
take two parameters ``bot`` and ``job``, where ``job`` is the ``Job`` instance. It
can be used to terminate the job or modify its interval.
interval (Optional[int, float, datetime.timedelta]): The interval in which the job will
execute its callback function. ``int`` and ``float`` will be interpreted as seconds.
If you don't set this value, you must set ``repeat=False`` and specify ``next_t`` when
you put the job into the job queue.
repeat (Optional[bool]): If this job should be periodically execute its callback function
(``True``) or only once (``False``). Defaults to ``True``
context (Optional[object]): Additional data needed for the callback function. Can be
accessed through ``job.context`` in the callback. Defaults to ``None``
days (Optional[tuple[int]]): Defines on which days of the week the job should run.
Defaults to ``Days.EVERY_DAY``
name (Optional[str]): The name of this job. Defaults to ``callback.__name__``
job_queue (Optional[class:`telegram.ext.JobQueue`]): The ``JobQueue`` this job belongs to.
Only optional for backward compatibility with ``JobQueue.put()``.
"""
def __init__(self,
callback,
interval=None,
repeat=True,
context=None,
days=Days.EVERY_DAY,
name=None,
job_queue=None):
self.callback = callback
self.context = context
self.name = name or callback.__name__
self._repeat = repeat
self._interval = None
self.interval = interval
self.repeat = repeat
self._days = None
self.days = days
self._job_queue = weakref.proxy(job_queue) if job_queue is not None else None
self._remove = Event()
self._enabled = Event()
self._enabled.set()
def run(self, bot):
"""Executes the callback function"""
self.callback(bot, self)
def schedule_removal(self):
"""
Schedules this job for removal from the ``JobQueue``. It will be removed without executing
its callback function again.
"""
self._remove.set()
@property
def removed(self):
return self._remove.is_set()
@property
def enabled(self):
return self._enabled.is_set()
@enabled.setter
def enabled(self, status):
if status:
self._enabled.set()
else:
self._enabled.clear()
@property
def interval(self):
return self._interval
@interval.setter
def interval(self, interval):
if interval is None and self.repeat:
raise ValueError("The 'interval' can not be 'None' when 'repeat' is set to 'True'")
if not (interval is None or isinstance(interval, (Number, datetime.timedelta))):
raise ValueError("The 'interval' must be of type 'datetime.timedelta',"
" 'int' or 'float'")
self._interval = interval
@property
def interval_seconds(self):
if isinstance(self.interval, datetime.timedelta):
return self.interval.total_seconds()
else:
return self.interval
@property
def repeat(self):
return self._repeat
@repeat.setter
def repeat(self, repeat):
if self.interval is None and repeat:
raise ValueError("'repeat' can not be set to 'True' when no 'interval' is set")
self._repeat = repeat
@property
def days(self):
return self._days
@days.setter
def days(self, days):
if not isinstance(days, tuple):
raise ValueError("The 'days' argument should be of type 'tuple'")
if not all(isinstance(day, int) for day in days):
raise ValueError("The elements of the 'days' argument should be of type 'int'")
if not all(0 <= day <= 6 for day in days):
raise ValueError("The elements of the 'days' argument should be from 0 up to and "
"including 6")
self._days = days
@property
def job_queue(self):
""" :rtype: JobQueue """
return self._job_queue
@job_queue.setter
def job_queue(self, job_queue):
# Property setter for backward compatibility with JobQueue.put()
if not self._job_queue:
self._job_queue = weakref.proxy(job_queue)
else:
raise RuntimeError("The 'job_queue' attribute can only be set once.")
def __lt__(self, other):
return False
| {
"content_hash": "bd01d59b37e940b23bccecffafa7012b",
"timestamp": "",
"source": "github",
"line_count": 494,
"max_line_length": 98,
"avg_line_length": 39.56275303643725,
"alnum_prop": 0.5698935734752354,
"repo_name": "thonkify/thonkify",
"id": "dbb5c2f452ef606e6513c0674ed243668c454f19",
"size": "20353",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/lib/telegram/ext/jobqueue.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10460214"
},
{
"name": "Shell",
"bytes": "1470"
}
],
"symlink_target": ""
} |
'''
Created on Apr 25, 2011
@author: Carsten Witzke
'''
from pydoc import deque
import os
import numpy
class LibsvmFileImporter(object):
'''
Imports files in LIBSVM format. Includes simple comment-handling.
'''
__file = None
__dataSet = None
def __init__(self, filename, binary=False):
'''
New file importer with file from given URL string
'''
try:
self.__file = open(os.path.abspath(filename), 'r')
except IOError:
raise IOError('No such file \'%s\'' % filename)
if binary:#TODO: find out if binary or not
self.__read_binary_data()
else:
self.__read_data()
def __read_data(self):
'''reads (multi)labeled data'''
dataList = []
max_f_index = 0
for line in self.__file:
try:
# strip comments, whitespaces and line breaks
line = line[:line.find('#')]
line = line.strip('\n')
line = line.strip()
if line == '':
continue
# something left? go!
data_ = {}
tokens = deque(line.split(' '))
data_['target'] = float(tokens.popleft())
for token in tokens:
t = token.split(':')
feature = int(t[0])
if feature > max_f_index:
max_f_index = feature
data_[feature] = float(t[1]) if '.' in t[1] else int(t[1])
dataList.append(data_)
except Exception as e:
print e
self.__dataSet = DataSet(dataList, max_f_index)
def __read_binary_data(self):
'''reads data and checks for binary classes'''
targetList = []
dataList = []
max_f_index = 0
for line in self.__file:
# strip comments, whitespaces and line breaks
line = line[:line.find('#')]
line = line.strip('\n')
line = line.strip()
if line == '':
continue
# something left? go!
data_ = {}
tokens = deque(line.split(' '))
data_['target'] = float(tokens.popleft())
if len(targetList) <= 2:
if data_['target'] not in targetList:
targetList.append(data_['target'])
else:
raise TypeError('Not a binary class file')
for token in tokens:
t = token.split(':')
feature = int(t[0])
if feature > max_f_index:
max_f_index = feature
data_[feature] = float(t[1]) if '.' in t[1] else int(t[1])
dataList.append(data_)
#normalization of targets: e.g.'0 & 1 ==> -1 & +1'
#bonus: handling of ordinal values like 'green & blue'
try:
a = int(targetList[0])
b = int(targetList[1])
#larger value becomes '+1'
if a > b:
for i in dataList:
i['target'] = +1 if int(i['target']) == a else -1
else:
for i in dataList:
i['target'] = -1 if int(i['target']) == a else +1
except ValueError:
#value is not int - set classes
for i in dataList:
dataList[i]['target'] = +1 if dataList[i]['target'] == targetList[0] else -1
self.__dataSet = DataSet(dataList, max_f_index)
def get_dataSet(self):
return self.__dataSet
class DataSet(object):
'''Internal representation of a data set'''
def __init__(self, data, max_f_index=None):#TODO: handle max_f_index internally!
#max_f_index = max feature index ---> defines the dimensionality of the features
# if x != None and y != None:
# self.__matrix = x
# self.__target = y
# self.__numInstances, self.__numFeatures = self.__matrix.shape
# return
self.__build(data, max_f_index)
self.__numInstances, self.__numFeatures = self.__matrix.shape
def __build(self, data, max_f_index):
'''build instance features and target vector'''
self.__matrix = numpy.zeros(shape=(len(data),len(range(max_f_index))))
self.__target = numpy.zeros(shape=(len(data),1))
for i in range(len(data)):
for key, value in data[i].iteritems():
# ignore label
if key == 'target':
#self.__matrix[i][0] = 1
self.__target[i] = value
continue
self.__matrix[i][key-1] = value
## getter / setter ##
def get_features(self):
return self.__matrix
def get_targets(self,index=None):
if index != None:
return int(self.__target[index])
return self.__target
def get_numInstances(self):
return self.__numInstances
def get_numFeatures(self):
return self.__numFeatures - 1
## properties
#data = property(doc='initial data format: list of dictionaries (will be deleted after features/vector initialization')
features = property(get_features, doc='features X (n x m)')
targets = property(get_targets, doc='target vector Y (n x 1)')
numInstances = property(get_numInstances)
numFeatures = property(get_numFeatures) | {
"content_hash": "cad74939765b10a70e86aafa93f3b49a",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 123,
"avg_line_length": 35.17834394904459,
"alnum_prop": 0.4975556762629006,
"repo_name": "whiskey/Machine-Learning",
"id": "e5db14d458a2e49dbe215572fb26bfda4d2219df",
"size": "5541",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "de/staticline/tools/libsvmtools.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31079"
},
{
"name": "Shell",
"bytes": "830"
}
],
"symlink_target": ""
} |
import re
import time
import autonetkit.ank_messaging as ank_messaging
import autonetkit.config as config
import autonetkit.log as log
try:
import Exscript
except ImportError:
log.warning('Deployment requires Exscript: pip install https://github.com/knipknap/exscript/tarball/master'
)
def deploy(
host,
username,
dst_folder,
key_filename=None,
parallel_count=5,
):
tar_file = package(dst_folder)
transfer(host, username, tar_file, key_filename=key_filename)
extract(
host,
username,
tar_file,
dst_folder,
key_filename=key_filename,
parallel_count=parallel_count,
)
def package(src_dir, target='netkit_lab'):
log.info('Packaging %s' % src_dir)
import tarfile
import os
tar_filename = '%s.tar.gz' % target
tar = tarfile.open(os.path.join(tar_filename), 'w:gz')
tar.add(src_dir)
tar.close()
return tar_filename
def transfer(
host,
username,
local,
remote=None,
key_filename=None,
):
log.debug('Transferring lab to %s' % host)
log.info('Transferring Netkit lab')
if not remote:
remote = local # same filename
import paramiko
# import logging
# logging.getLogger("paramiko").setLevel(logging.DEBUG)
ssh = paramiko.SSHClient()
# ssh.set_log_channel("ANK")
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if key_filename:
log.debug('Connecting to %s with %s and key %s' % (host,
username, key_filename))
ssh.connect(host, username=username, key_filename=key_filename)
else:
log.info('Connecting to %s with %s' % (host, username))
ssh.connect(host, username=username)
log.info('Opening SSH for SFTP')
ftp = ssh.open_sftp()
log.info('Putting file %s tspoto %s' % (local, remote))
ftp.put(local, remote)
log.info('Put file %s to %s' % (local, remote))
ftp.close()
def extract(
host,
username,
tar_file,
cd_dir,
timeout=45,
key_filename=None,
verbosity=0,
parallel_count=5,
):
"""Extract and start lab"""
log.debug('Extracting and starting lab on %s' % host)
log.info('Extracting and starting Netkit lab')
from Exscript import Account
from Exscript.util.start import start
from Exscript.util.match import first_match
from Exscript import PrivateKey
from Exscript.protocols.Exception import InvalidCommandException
messaging = ank_messaging
def starting_host(protocol, index, data):
log.info('Starting %s' % data.group(1))
def lab_started(protocol, index, data):
log.info('Lab started on %s' % host)
def make_not_found(protocol, index, data):
log.warning('Make not installed on remote host %s. Please install make and retry.'
% host)
return
def process_vlist(response):
"""Obtain VM to PID listing: required if terminating a numeric VM"""
# TODO: could process using textfsm template
vm_to_pid = {}
for line in response.splitlines():
match = re.match(r'^\w+\s+(\w+)\s+(\d+)', line)
if match:
vm = match.group(1)
pid = match.group(2)
vm_to_pid[vm] = pid
return vm_to_pid
def start_lab(thread, host, conn):
conn.set_timeout(timeout)
conn.add_monitor(r'Starting "(\w+)"', starting_host)
conn.add_monitor(r'The lab has been started', lab_started)
lab_vlist = []
# conn.add_monitor(r'Virtual machine "((\S*_*)+)" is already running. Please', already_running_b)
conn.add_monitor(r'make: not found', make_not_found)
# conn.data_received_event.connect(data_received)
conn.execute('cd %s' % cd_dir)
conn.execute('lcrash -k')
conn.execute('lclean')
conn.execute('cd') # back to home directory tar file copied to
conn.execute('tar -xzf %s' % tar_file)
conn.execute('cd %s' % cd_dir)
conn.execute('linfo')
linfo_response = str(conn.response)
vm_list = []
for line in linfo_response.splitlines():
if 'The lab is made up of' in line:
open_bracket = line.index('(')
close_bracket = line.index(')')
vm_list = line[open_bracket + 1:close_bracket]
vm_list = vm_list.split()
log.info('The lab contains VMs %s' % ', '.join(vm_list))
# now check if any vms are still running
conn.execute('vlist')
response = str(conn.response)
lab_vlist = process_vlist(response)
for virtual_machine in lab_vlist:
if virtual_machine in vm_list:
if virtual_machine.isdigit:
# convert to PID if numeric, as vcrash can't crash numeric ids (treats as PID)
crash_id = lab_vlist.get(virtual_machine)
else:
crash_id = virtual_machine # use name
if crash_id:
# crash_id may not be set, if machine not present in initial vlist, if so then ignore
log.info('Stopping running VM %s' % virtual_machine)
conn.execute('vcrash %s' % crash_id)
conn.execute('vlist')
conn.execute('lclean')
start_command = 'lstart -p%s -o --con0=none' % parallel_count
lab_is_started = False
while lab_is_started == False:
try:
log.info('Starting lab')
conn.execute(start_command)
except InvalidCommandException, error:
error_string = str(error)
if 'already running' in error_string:
conn.execute('vlist')
response = str(conn.response)
lab_vlist = process_vlist(response)
running_vms = []
for line in error_string.splitlines():
if 'already running' in line:
running_vm = line.split('"')[1]
running_vms.append(running_vm)
for virtual_machine in running_vms:
if virtual_machine.isdigit:
# convert to PID if numeric, as vcrash can't crash numeric ids (treats as PID)
crash_id = lab_vlist.get(virtual_machine)
else:
crash_id = virtual_machine # use name
if crash_id:
# crash_id may not be set, if machine not present in initial vlist, if so then ignore
log.info('Stopping running VM %s'
% virtual_machine)
conn.execute('vcrash %s' % crash_id)
time.sleep(1)
else:
# conn.execute(start_command)
lab_is_started = True
first_match(conn, r'^The lab has been started')
log.info('Lab started') # TODO: make this captured - need to debug capturing
conn.send('exit')
if key_filename:
key = PrivateKey.from_file(key_filename)
log.debug('Connecting to %s with username %s and key %s'
% (host, username, key_filename))
accounts = [Account(username, key=key)]
else:
log.debug('Connecting to %s with username %s' % (host,
username))
accounts = [Account(username)]
hosts = ['ssh://%s' % host]
verbosity = -1
start(accounts, hosts, start_lab, verbose=verbosity)
| {
"content_hash": "01cb765deb2097adf217a64af32b87d8",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 113,
"avg_line_length": 31.765432098765434,
"alnum_prop": 0.559528436325949,
"repo_name": "sysbot/autonetkit",
"id": "e2aa3e544ab173fa7255cd5251b02ffaaf38bb63",
"size": "7761",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "autonetkit/deploy/netkit.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
I am really *Markdown* but I am set as Code. Fix me!
# <markdowncell>
# Python in Fifteen Minutes
# -------------------------
# <markdowncell>
# ### Why?
#
# * Python is easy to learn
# * Python is easy to read
# * Python has lots of great tools to do things quickly
# <markdowncell>
# ### How?
# <markdowncell>
# This will be a very quick introduction to Python.
#
# We will focus on:
# * Setting variables
# * Writing comments
# * Python and spaces
# * Outputing
# * Functions and Types
# * Basic math
# * Basic text manipulation
# * Some Data Structures (Lists, Sets, and Dictionaries)
# * Getting help
# * Writing Functions
# * Testing and Checking
# <markdowncell>
# ### Setting Variables
#
# A **variable** is basically a labeled box that you put things in. To set a variable in Python, you use the equal sign. Let's try.
# <codecell>
X = 1
Y=2
# <markdowncell>
# ### Writing Comments
#
# You can make notes to yourself and others in Python by starting or ending a line with `#`.
#
# <codecell>
# Here is an example
X = 1
X=1 # This line is considered less readable than the above.
# <markdowncell>
# ## Python and Spaces
# <markdowncell>
# One of the features of Python that makes it so readable is that it
# cares very deeply about the space at the beginning of lines (indentation), so
# make sure you don't have a space at the beginning of the line.
# Otherwise, you will get an error.
# <markdowncell>
# #### Your Turn
# An IndentationError occurs below due to an extra space at the beginning.
# Fix it.
# <codecell>
X = 1 #This is okay
# An IndentationError here due to the space
X = 2
# <markdowncell>
# ### Outputing
#
# There are different ways to output your variables and so forth.
# The IPython Notebook will output a variable or answer to an equation
# if it is the last command that is executing in the particular cell.
# <codecell>
# Here, nothing is displayed because there is just a variable set.
X=1
# <codecell>
# Here X is the last declaration in the cell, so X is displayed.
X=1
X
# <codecell>
# Here we have a declaration without a variable.
1
# <markdowncell>
# You can also use the command `print` followed by a variable or declaration.
# <codecell>
X=1
print 1
print X
# <markdowncell>
# You can `print` multiple variables or declarations together by using a comma.
# <codecell>
print 1,1,X,X
# <markdowncell>
# There are also ways to output to files and so forth. You can also produce HTML within the notebook.
# <markdowncell>
# #### Your Turn
#
# Fill in the following 3 cells with the code you need to do the commands listed in the comments.
# <codecell>
# 1. In this cell, write the code to output the number 3. Do not use print
#YOURCODEHERE
# <codecell>
# 2. In this cell, do the same using print.
#YOURCODEHERE
# <codecell>
# 3. Do the following
# print the number 4 on its own line.
#YOURCODEHERE
# next, set X to 5
#YOURCODEHERE
# print X followed by 3
# <markdowncell>
# ## Basic Math
#
# It's easy to do math in Python. The basic operators are +,-,/, and * for addition, subtraction, division, and multiplication.
# <codecell>
1+1
# <codecell>
4/2
# <codecell>
5*2
# <markdowncell>
# You can also use parentheses, e.g. `( )`, to specify the order of operations.
# <codecell>
(4-1)*3
# <markdowncell>
# You can use `+=` to add on to a number, too.
# <codecell>
X=1
X+=1
X
# <markdowncell>
# #### Your turn
#
# Write code to answer the following: $777777 \times 666666 - 2$
# <codecell>
# The answer you should get is 518517481480
#YOURCODEHERE
# <markdowncell>
# Now, subtract 666666 from 777777 and multiply by the result by two.
# <codecell>
# The answer you should get is 222222.
# HINT: To do this on one line, you will need to use parentheses.
#YOURCODEHERE
# <markdowncell>
# ## Functions
#
# A **function** is code that when called with a particular
# input gives a particular output. In math, this takes the form:
#
# f(X) = 2 + X
# f(2) = 4
#
# Python uses a similar format.
#
# Here let's use the [built-in function](https://docs.python.org/2/library/functions.html#min) `abs()` which gives the absolute value of a number (e.g. |-1|=1)
# <codecell>
abs(-1)
# <codecell>
abs(2-1)
# <markdowncell>
# You can specify multiple inputs. Let's try another built-in function called `min`, which gives the minimum value of the numbers based to it.
# <codecell>
min(4,2,1)
# <codecell>
X=0
min(4,3,2,X)
# <markdowncell>
# Note that you can also add to an existing number (or texts, as we will see) by using `+=`.
# <codecell>
X = 1
X += 1
X
# <markdowncell>
# ## Number Types
# <markdowncell>
# You can find out the `type` of a variable or declaration using the function `type`. Let's try it.
#
# <codecell>
type(1)
# <markdowncell>
# There are a number of basic numeric types, including `int` and `float.`
#
# `int` is a whole number, positive or negative. To set it, you can do the following: `x=1` or `int(1)`.
#
# `float` (for floating-point number) is a more precise number and includes a decimal. To set one, you can include a decimal point in your number, e.g. `x=1.0`, or use the function `float(1)`.
# <codecell>
type(1)
# <codecell>
type(1.0)
# <codecell>
type(2*3)
# <codecell>
type(2.0*3)
# <markdowncell>
# The distinction between `int` and `float` can be a bit of gotcha, as Python will provide an `int` answer if one of the variables is not set as a `float` (by adding a `decimal` to the declaration).
# <markdowncell>
# ### Your turn
#
# Adjust the following code to return .5:
# <codecell>
# Fix below to return .5 (not 0).
X = 1/2
print X
# <markdowncell>
# ## Testing for Equivalence
#
# Python has two special words, `True` and `False`, to tell the
# veracity of something, and a special type `bool` to hold that value.
# <codecell>
x = True
type(x)
# <markdowncell>
# You can test whether or not something equals something else using the
# operator `==`, and to see if they are not equal using `!=`.
# <codecell>
x = 1
x == 1
# <codecell>
x = 2
x != 1
# <codecell>
type(True)
# <markdowncell>
# ## Testing as You Go
# <markdowncell>
# You can check to make sure your expectations are correct using the command
# assert, which will through an error if your assertion is not correct.
# <codecell>
x = 1
assert x == 1 # no problem here
assert x == 1.0 # no problem here, either
assert type(x)==float # it's an int not a float!
# <markdowncell>
# ## Strings
#
# Python is great at manipulation strings of text, and there is a special `str` type. To designate a string,
# type text in single or double quotation marks.
# <codecell>
s = 'Hello'
t = "there"
type(s)
# <markdowncell>
# Strings can be combined using `+`:
# <codecell>
s + ' ' + t+ ' what\'s your name?' # you can use \' to put a ' inside ''s
# <markdowncell>
# To get the length of a string, use the function `len`.
# <codecell>
len('Hello')
# <markdowncell>
# Strings also have numerous functions available to them. To use them,
# type a string variable followed by a period and then
# the function.
# <codecell>
s = 'Hello'
print s.capitalize() # capitalizes the first letter
print s.upper() # capitalizes all letters
print s.lower() # lowercases all letters
# <codecell>
a = 'X'.lower() # makes the string 'X' lowercase
b = 'x'.upper() # makes the strin g'x' uppercase
print (a + b).upper() # joins a and b and turns them uppercase
print (a + b).lower().upper() # does the above but then turns it uppercase
# <markdowncell>
# You can turn a number into a string, and a string into a number
# as follows.
# <codecell>
s='1'
i=int(s)
f=float(s)
print s,i,f
# <markdowncell>
# ## Other Data Structures: Lists, Sets, and Dictionaries
# <markdowncell>
# Python has a number of other data structures besides the ones we have learned (`int`,`float`,`str`,`bool`)
# <markdowncell>
# Python is not "strongly typed" which means the `type` of your variable
# can changes, as in the following:
# <codecell>
x = 1
print x,type(x)
x = 1.0
print x,type(x)
x = '1'
print x,type(x)
x = False
print x,type(x)
# <markdowncell>
# ### Lists
#
# A list is a collection of different variables. The format is:
# `[item1,item2,item3]`
# The items can be of different types. The first element of a list is at [0]. As with `str` (strings) you can get a length using `len`.
# <codecell>
my_list = ['A',1,2,3,'B']
print my_list
print 'the first item is',my_list[0]
print 'length of my_list is',len(my_list)
# <markdowncell>
# You can also select from the end of a list using a negative number, e.g. `l[-1]`, and you can select a range of items using a colon, e.g. l[0:2]
# <codecell>
l = ['0',1,2,3,'5']
print 'the first two items are',l[0:2] # l[start_at,end_before]
print 'The last item is',l[-1] # use negative numbers to read from end
print 'The first to next-to-last items are',l[0:-1]
# <markdowncell>
# #### Your turn
#
# Using the following list, select ['B','C','D']
# <codecell>
l = ['A','B','C','D','E','F']
# YOURCODEHERE
# <markdowncell>
# To append to a list use the `.append` command or `+=`
# <codecell>
l=['A','B','C','D','E','F']
l+='G'
print l
l.append('H')
print l
# <markdowncell>
# ### Sets
#
# A `set` is like a `list` but it does not have any duplicates.
# To define one, use the command `set`.
# <codecell>
l = ['A','A','A','B']
print l,type(l)
s = set(l)
print s,set(s)
# <markdowncell>
# ### Dictionary
#
# A dictionary (`dict`)is a structure that allows you to use a key. To define one, you can use curly brackets, e.g. {'hi':'hindi','en':'urdu'}, and then access or set individual items using square brackets
# <codecell>
langs = {}
print langs, type(langs)
langs['hi'] = 'Hindi'
print langs, len(langs) # you can get the length, too
# Here is a way to set it all at once
langs = {'hi': 'Hindi', 'en': 'English', 'myfavnumber': 7}
print len(langs),langs['myfavnumber']
# <markdowncell>
# To get the keys of a dictionary, use the funciton `.keys`, e.g. langs.keys()
# <codecell>
print langs.keys()
# <markdowncell>
# ### Iterating (through lists, etc.)
#
# Iterating moves going one-my-one through something.
#
# To go through every element in a list, use the `for` command as below.
# <codecell>
colors = ['red','white','blue','green']
for x in colors:
print x
# <markdowncell>
# #### Your Turn
#
# Write code below to add 'purple' to the list of colors and then print out 'I like <color here>', e.g. 'I like blue'.
# <codecell>
colors = ['red','white','blue','green']
# YOURCODEHERE
# <markdowncell>
# ## Writing Functions
#
# Whenever you have a task that you need to repeat, it is usually worthwhile
# to make a function. As mentioned above, a function is code that takes an input and returns an output.
#
# In Python, you define a Python using the following pattern:
#
# ```
# def my_function(): # put your input inside the ()s
# # your code here
# return # your output here output
# ```
# Here is an example:
# ```
# def add_one(x)
# x = x + 1
# return x
# ```
# You can have multiple inputs, and the output can be whatever form you want, too. Spacing is important, as you need a standard indentation (usually 4 or 2 spaces) after the definition. That makes it easier to read.
#
# Below is an example.
# <codecell>
def add_two(x):
return x+2
def add_three(x):
o = x+2
return o
def add_five(x):
assert x
return add_two(add_three(x))
y = 0
y = add_two(y)
print y
y = add_three(y)
print y
y = add_five(y)
print y
# <markdowncell>
# ### Your turn
#
# Write a function named `quote_me` to add an exclamation mark to a string.
# <codecell>
# So quote_me('Hello') should output: Hello!
def quote_me(s):
# YOURCODEHERE
return #YOURCODEHERETOO
# <markdowncell>
# ### Importing Libraries
#
# To get extended features from Python, you need to import modules. You may need to install those modules, too. You can use the command `import`. Below we will import the module `sys`, which provides information about your system.
# <codecell>
import sys
print sys.version # this shows your version of Python, etc.
# <markdowncell>
# Above, you see you can access a variable or a function by prefacing it my the name of the module. If you want to import the variable or version into the 'name space' your code so that you can access it directly, use the command `from`...`import`...
# <codecell>
from sys import version
print version # we no longer need the sys
# <markdowncell>
# You can also import a module and give its own name using the command `import`...`as`...
# <codecell>
import sys as my_own_system
print my_own_system.version
# <markdowncell>
# ### Nice work!
# We're now ready in the next lesson to jump into textual analysis with Python!
# <codecell>
| {
"content_hash": "06785d5f0062d85742859b0211c86182",
"timestamp": "",
"source": "github",
"line_count": 663,
"max_line_length": 250,
"avg_line_length": 19.368024132730014,
"alnum_prop": 0.6720660384705242,
"repo_name": "seanpue/al340",
"id": "33f13bc25ab56d391bbf727164c8975a9525465c",
"size": "13816",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lessons/textanalysis/textanalysis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9969"
},
{
"name": "HTML",
"bytes": "68250"
},
{
"name": "Python",
"bytes": "32947"
},
{
"name": "Ruby",
"bytes": "3082"
},
{
"name": "Shell",
"bytes": "25"
}
],
"symlink_target": ""
} |
from google.appengine.ext import db
class Play(db.Model):
artist = db.StringProperty(required = True)
album_artist = db.StringProperty(required = True)
album = db.StringProperty(required = True)
title = db.StringProperty(required = True)
time = db.DateTimeProperty(auto_now_add = True) # in UTC
timezone = db.StringProperty()
user = db.UserProperty(required = True)
| {
"content_hash": "5886ddc62b0af1026ed33ff6365d8853",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 57,
"avg_line_length": 37.4,
"alnum_prop": 0.7486631016042781,
"repo_name": "mmjbot/muslogger",
"id": "d5da8ae2d864331c1b3cf7fdd28ea2cf641b76ce",
"size": "374",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6323"
},
{
"name": "HTML",
"bytes": "1249"
},
{
"name": "JavaScript",
"bytes": "6345"
},
{
"name": "Python",
"bytes": "936286"
}
],
"symlink_target": ""
} |
from absl.testing import parameterized
from fruit_test_common import *
COMMON_DEFINITIONS = '''
#include "test_common.h"
struct Annotation1 {};
'''
def escape_regex(regex):
# We un-escape the space because we strip the spaces in fruit_test_common, and this would otherwise leave a
# stray backslash.
return re.escape(regex).replace('\\ ', ' ')
class TestRegisterInstance(parameterized.TestCase):
def test_bind_instance_success(self):
source = '''
struct X {
int n;
X(int n)
: n(n) {
}
};
fruit::Component<X> getComponent(X* x) {
return fruit::createComponent()
.bindInstance(*x);
}
int main() {
X x(34);
fruit::Injector<X> injector(getComponent, &x);
X& x1 = injector.get<X&>();
Assert(&x == &x1);
}
'''
expect_success(COMMON_DEFINITIONS, source)
def test_bind_instance_annotated_success(self):
source = '''
struct X {
int n;
X(int n)
: n(n) {
}
};
fruit::Component<fruit::Annotated<Annotation1, X>> getComponent(X* x) {
return fruit::createComponent()
.bindInstance<fruit::Annotated<Annotation1, X>>(*x);
}
int main() {
X x(34);
fruit::Injector<fruit::Annotated<Annotation1, X>> injector(getComponent, &x);
X& x1 = injector.get<fruit::Annotated<Annotation1, X&>>();
Assert(&x == &x1);
}
'''
expect_success(COMMON_DEFINITIONS, source)
def test_bind_const_instance_success(self):
source = '''
struct X {
int n;
X(int n)
: n(n) {
}
};
fruit::Component<const X> getComponent(const X* x) {
return fruit::createComponent()
.bindInstance(*x);
}
const X x(34);
int main() {
fruit::Injector<const X> injector(getComponent, &x);
const X& x1 = injector.get<const X&>();
Assert(&x == &x1);
}
'''
expect_success(COMMON_DEFINITIONS, source)
def test_bind_const_instance_annotated_success(self):
source = '''
struct X {
int n;
X(int n)
: n(n) {
}
};
fruit::Component<fruit::Annotated<Annotation1, const X>> getComponent(const X* x) {
return fruit::createComponent()
.bindInstance<fruit::Annotated<Annotation1, X>>(*x);
}
const X x(34);
int main() {
fruit::Injector<fruit::Annotated<Annotation1, const X>> injector(getComponent, &x);
const X& x1 = injector.get<fruit::Annotated<Annotation1, const X&>>();
Assert(&x == &x1);
}
'''
expect_success(COMMON_DEFINITIONS, source)
@parameterized.parameters([
('X', 'X', 'X*', 'X*'),
('X', 'const X', 'const X*', 'const X*'),
('fruit::Annotated<Annotation1, X>', 'fruit::Annotated<Annotation1, X>', 'X*', 'fruit::Annotated<Annotation1, X*>'),
('fruit::Annotated<Annotation1, X>', 'fruit::Annotated<Annotation1, const X>', 'const X*', 'fruit::Annotated<Annotation1, const X*>'),
])
def test_bind_instance_two_explicit_type_arguments_success(self, XAnnot, MaybeConstXAnnot, XPtr, XPtrAnnot):
source = '''
struct X {
int n;
X(int n)
: n(n) {
}
};
fruit::Component<MaybeConstXAnnot> getComponent(XPtr x) {
return fruit::createComponent()
.bindInstance<XAnnot, X>(*x);
}
int main() {
X x(34);
fruit::Injector<MaybeConstXAnnot> injector(getComponent, &x);
XPtr x1 = injector.get<XPtrAnnot>();
Assert(&x == x1);
}
'''
expect_success(COMMON_DEFINITIONS, source, locals())
@parameterized.parameters([
'X',
'fruit::Annotated<Annotation1, X>',
])
def test_bind_instance_abstract_class_ok(self, XAnnot):
source = '''
struct X {
virtual void foo() = 0;
};
fruit::Component<> getComponentForInstanceHelper(X* x) {
return fruit::createComponent()
.bindInstance<XAnnot, X>(*x);
}
fruit::Component<XAnnot> getComponentForInstance(X* x) {
return fruit::createComponent()
.install(getComponentForInstanceHelper, x)
.bindInstance<XAnnot, X>(*x);
}
'''
expect_success(
COMMON_DEFINITIONS,
source,
locals())
@parameterized.parameters([
('int', 'int*'),
('fruit::Annotated<Annotation1, int>', 'fruit::Annotated<Annotation1, int*>'),
])
def test_bind_instance_multiple_equivalent_bindings_success(self, intAnnot, intPtrAnnot):
source = '''
fruit::Component<> getComponentForInstanceHelper(int* n) {
return fruit::createComponent()
.bindInstance<intAnnot, int>(*n);
}
fruit::Component<intAnnot> getComponentForInstance(int* n) {
return fruit::createComponent()
.install(getComponentForInstanceHelper, n)
.bindInstance<intAnnot, int>(*n);
}
int main() {
int n = 5;
fruit::Injector<intAnnot> injector(getComponentForInstance, &n);
if (injector.get<intPtrAnnot>() != &n)
abort();
}
'''
expect_success(
COMMON_DEFINITIONS,
source,
locals())
@parameterized.parameters([
('int', 'int*'),
('fruit::Annotated<Annotation1, int>', 'fruit::Annotated<Annotation1, int*>'),
])
def test_bind_instance_multiple_equivalent_bindings_different_constness_success(self, intAnnot, intPtrAnnot):
source = '''
fruit::Component<> getComponentForInstanceHelper(const int* n) {
return fruit::createComponent()
.bindInstance<intAnnot, int>(*n);
}
fruit::Component<intAnnot> getComponentForInstance(int* n) {
return fruit::createComponent()
.install(getComponentForInstanceHelper, n)
.bindInstance<intAnnot, int>(*n);
}
int main() {
int n = 5;
fruit::Injector<intAnnot> injector(getComponentForInstance, &n);
if (injector.get<intPtrAnnot>() != &n)
abort();
}
'''
expect_success(
COMMON_DEFINITIONS,
source,
locals())
@parameterized.parameters([
('int', 'int*'),
('fruit::Annotated<Annotation1, int>', 'fruit::Annotated<Annotation1, int*>'),
])
def test_bind_instance_multiple_equivalent_bindings_different_constness_other_order_success(self, intAnnot, intPtrAnnot):
source = '''
fruit::Component<> getComponentForInstanceHelper(const int* n) {
return fruit::createComponent()
.bindInstance<intAnnot, int>(*n);
}
fruit::Component<intAnnot> getComponentForInstance(int* n) {
return fruit::createComponent()
.bindInstance<intAnnot, int>(*n)
.install(getComponentForInstanceHelper, n);
}
int main() {
int n = 5;
fruit::Injector<intAnnot> injector(getComponentForInstance, &n);
if (injector.get<intPtrAnnot>() != &n)
abort();
}
'''
expect_success(
COMMON_DEFINITIONS,
source,
locals())
@parameterized.parameters([
'X*',
'const X*',
'std::shared_ptr<X>',
])
def test_bind_instance_non_normalized_type_error(self, XVariant):
if XVariant.endswith('&'):
XVariantRegexp = escape_regex(XVariant[:-1])
else:
XVariantRegexp = escape_regex(XVariant)
source = '''
struct X {};
fruit::Component<> getComponent(XVariant x) {
return fruit::createComponent()
.bindInstance(x);
}
'''
expect_compile_error(
'NonClassTypeError<XVariantRegexp,X>',
'A non-class type T was specified. Use C instead.',
COMMON_DEFINITIONS,
source,
locals())
@parameterized.parameters([
('const X', r'const X'),
('X*', r'X\*'),
('const X*', r'const X\*'),
('X&', r'X&'),
('const X&', r'const X&'),
('std::shared_ptr<X>', r'std::shared_ptr<X>'),
])
def test_bind_instance_non_normalized_type_error_with_annotation(self, XVariant, XVariantRegexp):
source = '''
struct X {};
fruit::Component<> getComponent(XVariant x) {
return fruit::createComponent()
.bindInstance<fruit::Annotated<Annotation1, XVariant>>(x);
}
'''
expect_compile_error(
'NonClassTypeError<XVariantRegexp,X>',
'A non-class type T was specified. Use C instead.',
COMMON_DEFINITIONS,
source,
locals())
@parameterized.parameters([
('const X', 'const X'),
('X*', 'X*'),
('const X*', 'const X*'),
('X&', 'X&'),
('const X&', 'const X&'),
('std::shared_ptr<X>', 'std::shared_ptr<X>'),
('fruit::Annotated<Annotation1, const X>', 'const X'),
('fruit::Annotated<Annotation1, X*>', 'X*'),
('fruit::Annotated<Annotation1, const X*>', 'const X*'),
('fruit::Annotated<Annotation1, X&>', 'X&'),
('fruit::Annotated<Annotation1, const X&>', 'const X&'),
('fruit::Annotated<Annotation1, std::shared_ptr<X>>', 'std::shared_ptr<X>'),
('fruit::Annotated<Annotation1, X>', 'const X'),
('fruit::Annotated<Annotation1, X>', 'X*'),
('fruit::Annotated<Annotation1, X>', 'const X*'),
('fruit::Annotated<Annotation1, X>', 'X&'),
('fruit::Annotated<Annotation1, X>', 'const X&'),
('fruit::Annotated<Annotation1, X>', 'std::shared_ptr<X>'),
])
def test_bind_instance_non_normalized_type_error_two_explicit_type_arguments(self, XAnnotVariant, XVariant):
XVariantRegexp = escape_regex(XVariant)
source = '''
struct X {};
fruit::Component<> getComponent(XVariant x) {
return fruit::createComponent()
.bindInstance<XAnnotVariant, XVariant>(x);
}
'''
expect_compile_error(
'NonClassTypeError<XVariantRegexp,X>',
'A non-class type T was specified. Use C instead.',
COMMON_DEFINITIONS,
source,
locals())
@parameterized.parameters([
('X*', r'X\*'),
('const X*', r'const X\*'),
('std::shared_ptr<X>', r'std::shared_ptr<X>'),
])
def test_register_instance_error_must_be_reference(self, XVariant, XVariantRegex):
source = '''
struct X {};
fruit::Component<> getComponentForInstance(XVariant x) {
return fruit::createComponent()
.bindInstance(x);
}
'''
expect_compile_error(
'NonClassTypeError<XVariantRegex,X>',
'A non-class type T was specified. Use C instead.',
COMMON_DEFINITIONS,
source,
locals())
@parameterized.parameters([
('X*', r'X\*'),
('const X*', r'const X\*'),
('std::shared_ptr<X>', r'std::shared_ptr<X>'),
])
def test_register_instance_error_must_be_reference_with_annotation(self, XVariant, XVariantRegex):
source = '''
struct X {};
fruit::Component<> getComponentForInstance(XVariant x) {
return fruit::createComponent()
.bindInstance<fruit::Annotated<Annotation1, X>>(x);
}
'''
expect_compile_error(
'NonClassTypeError<XVariantRegex,X>',
'A non-class type T was specified. Use C instead.',
COMMON_DEFINITIONS,
source,
locals())
@parameterized.parameters([
'X',
'fruit::Annotated<Annotation1, X>',
])
def test_bind_instance_mismatched_type_arguments(self, XAnnot):
source = '''
struct X {};
fruit::Component<> getComponent(int* n) {
return fruit::createComponent()
.bindInstance<XAnnot, int>(*n);
}
'''
expect_compile_error(
'TypeMismatchInBindInstanceError<X,int>',
'A type parameter was specified in bindInstance.. but it doesn.t match the value type',
COMMON_DEFINITIONS,
source,
locals())
@parameterized.parameters([
('Base', 'Base*'),
('fruit::Annotated<Annotation1, Base>', 'fruit::Annotated<Annotation1, Base*>'),
])
def test_bind_instance_to_subclass(self, BaseAnnot, BasePtrAnnot):
source = '''
struct Base {
virtual void f() = 0;
virtual ~Base() {
}
};
struct Derived : public Base {
void f() override {
}
};
fruit::Component<BaseAnnot> getComponent(Derived* derived) {
return fruit::createComponent()
.bindInstance<BaseAnnot, Base>(*derived);
}
int main() {
Derived derived;
fruit::Injector<BaseAnnot> injector(getComponent, &derived);
Base* base = injector.get<BasePtrAnnot>();
base->f();
}
'''
expect_success(COMMON_DEFINITIONS, source, locals())
@parameterized.parameters([
('X**', r'X\*\*'),
('std::shared_ptr<X>*', r'std::shared_ptr<X>\*'),
('X*&', r'X\*&'),
('fruit::Annotated<Annotation1, X**>', r'X\*\*'),
])
def test_bind_instance_type_not_normalized(self, XVariant, XVariantRegex):
source = '''
struct X {};
using XVariantT = XVariant;
fruit::Component<> getComponent(XVariantT x) {
return fruit::createComponent()
.bindInstance<XVariant, XVariant>(x);
}
'''
expect_compile_error(
'NonClassTypeError<XVariantRegex,X>',
'A non-class type T was specified.',
COMMON_DEFINITIONS,
source,
locals())
@parameterized.parameters([
('X(*)()', r'X(\((__cdecl)?\*\))?\((void)?\)'),
])
def test_bind_instance_type_not_injectable_error(self, XVariant, XVariantRegex):
source = '''
struct X {};
using XVariantT = XVariant;
fruit::Component<> getComponent(XVariantT x) {
return fruit::createComponent()
.bindInstance<XVariant, XVariant>(x);
}
'''
expect_compile_error(
'NonInjectableTypeError<XVariantRegex>',
'The type T is not injectable.',
COMMON_DEFINITIONS,
source,
locals())
if __name__ == '__main__':
absltest.main()
| {
"content_hash": "63101450a2901eef5b84b5595249c88b",
"timestamp": "",
"source": "github",
"line_count": 482,
"max_line_length": 142,
"avg_line_length": 33.50207468879668,
"alnum_prop": 0.49752291305424823,
"repo_name": "google/fruit",
"id": "84aa7d11726b1866b578c909e30319c4633c3f79",
"size": "16770",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_register_instance.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1557"
},
{
"name": "C",
"bytes": "2673"
},
{
"name": "C++",
"bytes": "663620"
},
{
"name": "CMake",
"bytes": "24558"
},
{
"name": "Makefile",
"bytes": "1238"
},
{
"name": "Shell",
"bytes": "29964"
},
{
"name": "Starlark",
"bytes": "11224"
},
{
"name": "TeX",
"bytes": "22671"
}
],
"symlink_target": ""
} |
from operator import __or__ as OR
import random
from django.db.models import Q
from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from .models import Product
class ProductDetailView(DetailView):
"""Return product detail."""
template_name = 'products/detail.html'
model = Product
queryset = Product.objects.prefetch_related('variations', 'photos')
context_object_name = 'product'
def get_context_data(self, *args, **kwargs):
context = super(ProductDetailView, self) \
.get_context_data(*args, **kwargs)
context["related_products"] = sorted(
context['product'].get_related(),
key=lambda x: random.random()
)[:5]
return context
class ProductListlView(ListView):
"""Return product list."""
template_name = 'products/list.html'
model = Product
context_object_name = 'products'
def get_queryset(self, *args, **kwargs):
"""Return query search."""
queryset = super(ProductListlView, self) \
.get_queryset(*args, **kwargs).prefetch_related('photos')
query = self.request.GET.get('q')
if query:
q_objects = [Q(name__icontains=query),
Q(description__icontains=query)]
if query.isdigit():
q_objects.append(Q(price=query))
queryset = queryset.filter(reduce(OR, q_objects))
return queryset
| {
"content_hash": "cd35bc7430f2c828e2caa0a445022993",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 71,
"avg_line_length": 30.729166666666668,
"alnum_prop": 0.6230508474576271,
"repo_name": "ular/toko",
"id": "e13a97c36b673d828c04ecce457b403c2a7d08a5",
"size": "1475",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toko/apps/products/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3820"
},
{
"name": "HTML",
"bytes": "25312"
},
{
"name": "JavaScript",
"bytes": "3243"
},
{
"name": "Nginx",
"bytes": "1095"
},
{
"name": "Python",
"bytes": "67357"
},
{
"name": "Shell",
"bytes": "4573"
}
],
"symlink_target": ""
} |
import rospy
from numpy import arctan2, sqrt, arange, pi, sin, cos, save
from sys import path, exit
path.append('Modules/')
from communication import LabNavigation
from sensor_msgs.msg import LaserScan
from actuate import ROS2DimActuate
from tracker import PlanarTracker
from time import sleep, time
from path_planning import GapFinder
# from datetime import datetime
every_other = 3
increment = pi * .5 / 180
angles = arange(-3 * pi / 4, 3 * pi / 4 + increment, increment)[0::every_other]
kp = .4 / 1
kd = .3
log_length = 4096
log = [[]] * log_length
i = 0
finished_edge = False
temp_var = 5
temp_var_2 = temp_var * log_length
class Navigation(object):
def __init__(self):
self.gap = .6
# self.space = .9
self.target_x = -.5
self.target_y = 2.7
self.agent_id = 0
self.connection = LabNavigation()
self.path_planner = GapFinder(self.gap)
self.actuation = ROS2DimActuate()
self.tracker = PlanarTracker(self.actuation.actuate, self.connection.getStates)
self.tracker.setID(self.agent_id)
sleep(7)
self.distance = []
self.prev_closest_reading = 0.0
self.prev_time = time()
self.crash_avert_velocity = 0.0
print 'Starting the Navigation'
self.subscriber = rospy.Subscriber('/scan', LaserScan, self.move, queue_size=1)
rospy.spin()
def move(self, data):
agent_id, x, y, z, yaw, pitch, roll = self.connection.getStates(self.agent_id)
# sleep(1)
print '-----------------------------'
global i
global finished_edge
distances = list(data.ranges)[0::every_other]
self.path_planner.filterReadings(distances, angles)
closest_reading, closest_reading_angle = self.path_planner.getMinimumReading()
closest_reading = min(closest_reading, 2 * self.gap)
time_now = time()
self.crash_avert_velocity = (self.crash_avert_velocity + (closest_reading - self.prev_closest_reading) * kd / (time() - self.prev_time)) / 2
self.crash_avert_velocity = min(0.0, self.crash_avert_velocity)
# print 'Crash avert velocity:% 4.2f'%self.crash_avert_velocity
controlled_velocity = (closest_reading) * kp + self.crash_avert_velocity
controlled_velocity = max(0.0, min(controlled_velocity, 1.0))
# print 'Controlled Velocity:', controlled_velocity,
# print 'closest_reading:',closest_reading,
# print 'Crash avert velocity:',self.crash_avert_velocity
self.actuation.setTangentialVelocityLimit(min(1, controlled_velocity))
i += 1
if i % temp_var is 0 and i < temp_var_2:
log[i / temp_var] = [x, y, yaw, self.path_planner.readings_polar]
diff_x = self.target_x - x
diff_y = self.target_y - y
self.distance = sqrt(diff_x**2 + diff_y**2)
# print 'distance',self.distance
if self.distance < .1:
print 'ARRIVED!!!!!!!!!!'
if finished_edge is False:
self.tracker.saveLog()
save('/home/administrator/barzin_catkin_ws/src/path_tracking/scripts/experimental_results/env', log)
finished_edge = True
# self.target_y = self.target_y * -1
exit()
angle = arctan2(diff_y, diff_x) - yaw
subgoal_distance, subgoal_angle = self.path_planner.planPath(self.distance, -angle)
subgoal_angle2 = -subgoal_angle
# print angle,subgoal_angle2
# faz = 1
# var = min(max(0,self.gap*(1+faz)-closest_reading),faz)
# offset = var*pi/faz/4
# subgoal_angle2 = subgoal_angle2+offset*sign(subgoal_angle2-(-closest_reading_angle))
# print '% 4.2f, % 4.2f, % 4.2f' % (var, offset,offset*sign(subgoal_angle2-(-closest_reading_angle)))
# print self.distance,-angle,subgoal_distance,subgoal_angle2
self.tracker.moveTowardsDynamicPoint(subgoal_distance, subgoal_angle2)
# print 'target angle:',yaw+subgoal_angle2
self.prev_closest_reading = closest_reading
self.prev_time = time_now
# print "TarX:% 4.2f, TarY:% 4.2f, PosX:% 4.2f, PosY:% 4.2f, SubAng:% 4.2f, SubAng+Yaw:% 4.2f" %(self.target_x,self.target_y,x,y,subgoal_angle,subgoal_angle+yaw)
if __name__ == "__main__":
nav = Navigation()
| {
"content_hash": "184637f3112380c38bbfdd35bcd14a36",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 169,
"avg_line_length": 38.24778761061947,
"alnum_prop": 0.6217029153169829,
"repo_name": "NASLab/GroundROS",
"id": "0d4dfdc9420f0cfdf2cf1a92169a1882b47c3089",
"size": "4345",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/avoid_obstacles_route4.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "132258"
},
{
"name": "Shell",
"bytes": "151"
},
{
"name": "Tcl",
"bytes": "3283"
}
],
"symlink_target": ""
} |
"""
APP: Trumpatron
DESC: Bot.py Unit Test
CREATION_DATE: 2017-03-01
"""
# MODULES
# | Native
# | Third-Party
import pytest
import tweepy
# | Custom
import lib.trump
class DummyArgParse:
"""
A dummy object that mimics the object returned from ArgParse
"""
log_file = ''
log_level = ''
config_file = ''
num_clauses = 2
assume_yes = False
config_check = False
test_run = False
daemon_mode = False
sleep_delay = 5
random_sleep = False
class DummyArgParse_MISSING_PARAMS:
"""
A dummy object that mimics the object returned from ArgParse. Purposely doesn't include random variables for testing
"""
log_file = ''
config_file = ''
assume_yes = False
config_check = False
test_run = False
daemon_mode = False
random_sleep = False
# FIXTURES
@pytest.fixture()
def validCliParser():
# create dummy argparse obj
return DummyArgParse()
@pytest.fixture()
def invalidCliParser():
# create a purposefully invalid dummy argparse obj
DAP = DummyArgParse_MISSING_PARAMS()
return DAP
@pytest.fixture()
def validCfgParams():
# generate dummy config file data
cfg = {
'AUTH': {
'consumerkey': '',
'consumersecret': '',
'accesstoken': '',
'accesstokensecret': ''
},
'GENERAL': {
'numclauses': 2,
'sleepdelay': 5
},
'LOGGING': {
'logfile': '/var/log/trumpatron/app_test.log',
'debuglevel': 'INFO'
}
}
return cfg
@pytest.fixture()
def roCfgParams(validCfgParams):
# generate dummy config file data w/ RO twitter API creds
cfg = {
'AUTH': {
'consumerkey': '965vXNOAJBIQQK71ggCTGTyfU',
'consumersecret': 'kPjkIqEm6MvB4xljH8Vlp0RfNlx1WzwpOZF9hPQFlQLEIY4SGA',
'accesstoken': '836257267485921284-p0M6jYJ7P3j4KniNnrbOfLZJWTd3bTy',
'accesstokensecret': 'waKbSHeMps0Y8pCLLAVvduwC0cMbjQuqLohaGERJvyVQT'
},
'GENERAL': {
'numclauses': 2,
'sleepdelay': 5
},
'LOGGING': {
'logfile': '/var/log/trumpatron/app_test.log',
'debuglevel': 'INFO'
}
}
return cfg
def testCreateTwitterAPIConn(validCliParser, validCfgParams):
# create Trump instance
tBot = lib.trump.Trump(validCliParser, validCfgParams)
tBot.createTwitterAPIConn()
assert tBot.apiConn != None
def testGetTweets_INVALID_TWITTER_API_CREDS(validCliParser, validCfgParams):
# create Trump instance
tBot = lib.trump.Trump(validCliParser, validCfgParams)
# get tweets from specified username
# should cause a Tweepy exception
with pytest.raises(tweepy.TweepError):
tBot.getTweets('realdonaldtrump')
def testGetTweets_VALID_TWITTER_USERNAME(validCliParser, roCfgParams):
# create Trump instance
tBot = lib.trump.Trump(validCliParser, roCfgParams)
# get tweets from specified username
tBot.getTweets('realdonaldtrump')
assert tBot.tweetSet != []
def testGetTweets_INVALID_TWITTER_USERNAME(validCliParser, roCfgParams):
# create Trump instance
tBot = lib.trump.Trump(validCliParser, roCfgParams)
# get tweets from specified username
# should cause a Tweepy exception
with pytest.raises(tweepy.TweepError):
tBot.getTweets('aiojwrrwtnnnnnaisdjfoiajsdif88892lnl132323')
def testFlushTweetData_SUCCESSFUL(validCliParser, validCfgParams):
# create Trump instance
tBot = lib.trump.Trump(validCliParser, validCfgParams)
# set tweets, tweet clauses, and generated tweet
# create test set of Status objs
status1 = 'This is a test. Testing.'
status2 = 'Test #2. Unit. Test.'
testTweetSet = [status1, status2]
tBot.tweetSet = testTweetSet
tBot.tweetClauses = ['This is a test', 'Testing.', 'Test #2', 'Unit', 'Test.']
tBot.generatedTweet = 'Test tweet!'
# flush tweet data
tBot.flushTweetData()
assert tBot.tweetSet == []
assert tBot.tweetClauses == []
assert tBot.generatedTweet == ''
def testSpliceTweets(validCliParser, validCfgParams):
# create Trump instance
tBot = lib.trump.Trump(validCliParser, validCfgParams)
# create test set of Status objs
status1 = 'This is a test. Testing.'
status2 = 'Test #2. Unit. Test.'
testTweetSet = [status1, status2]
tBot.tweetSet = testTweetSet
# splice tweets
tBot.spliceTweets()
assert tBot.tweetClauses == ['This is a test', 'Testing.', 'Test #2', 'Unit', 'Test.']
def testFormatTweet_BLANK_TWEET(validCliParser, validCfgParams):
# create Trump instance
tBot = lib.trump.Trump(validCliParser, validCfgParams)
with pytest.raises(ValueError):
tBot.formatTweet('')
def testFormatTweet_PLAIN_TWEET(validCliParser, validCfgParams):
# create Trump instance
tBot = lib.trump.Trump(validCliParser, validCfgParams)
# set tweet
tweet = 'Test of normal tweet!'
assert tweet == tBot.formatTweet(tweet)
def testFormatTweet_FORMATTED_TWEET(validCliParser, validCfgParams):
# create Trump instance
tBot = lib.trump.Trump(validCliParser, validCfgParams)
# set tweet(s)
origTweet = 'Test of tweet that needs formatting! https://t.co/RDO6Jt2pip '
postFormattingTweet = 'Test of tweet that needs formatting!'
assert postFormattingTweet == tBot.formatTweet(origTweet)
def testPruneTweetClauses(validCliParser, validCfgParams):
# create Trump instance
tBot = lib.trump.Trump(validCliParser, validCfgParams)
# pruning should: dedupe clauses, strip URLs, and sort the clauses by length
# set clauses
origTestTweetClauses = ['This is a test', 'Testing.', 'This is a test, too', 'Test #2', 'Unit', 'Testing.', 'w/ URL https://t.co/qCDljfF3wN']
tBot.tweetClauses = origTestTweetClauses
# prune clauses
tBot.pruneTweetClauses()
# check thaat tweetClauses post-pruning are correct
assert tBot.tweetClauses == ['Unit', 'w/ URL', 'Test #2', 'Testing.', 'This is a test', 'This is a test, too']
def testDivideClausesIntoSlices(validCliParser, validCfgParams):
# create Trump instance
tBot = lib.trump.Trump(validCliParser, validCfgParams)
# set clauses
tBot.tweetClauses = ['Unit', 'w/ URL', 'Test #2', 'Testing.', 'This is a test', 'This is a test, too']
# test 2 and 3 clause slices
# 2 clauses
tBot.config['numClauses'] = 2
assert tBot.divideClausesIntoSlices() == [['Unit', 'w/ URL', 'Test #2'], ['Testing.', 'This is a test', 'This is a test, too']]
# 3 clauses
tBot.config['numClauses'] = 3
assert tBot.divideClausesIntoSlices() == [['Unit', 'w/ URL'], ['Test #2', 'Testing.'], ['This is a test', 'This is a test, too']]
def testGenerateTweet_NO_TWEET_CLAUSES(validCliParser, roCfgParams):
# create Trump instance
tBot = lib.trump.Trump(validCliParser, roCfgParams)
with pytest.raises(ValueError):
# generate tweet
tBot.generateTweet()
def testGenerateTweet_VALID(validCliParser, roCfgParams):
# create Trump instance
tBot = lib.trump.Trump(validCliParser, roCfgParams)
# set clauses
tBot.tweetClauses = ['Unit', 'w/ URL', 'Test #2', 'Testing.', 'This is a test', 'This is a test, too']
# generate tweet
tBot.generateTweet()
assert tBot.generatedTweet != ''
def testSendTweet_BLANK_TWEET(validCliParser, roCfgParams):
# create Trump instance
tBot = lib.trump.Trump(validCliParser, roCfgParams)
with pytest.raises(ValueError):
tBot.sendTweet()
def testStartBot_TEST_RUN(validCliParser, roCfgParams):
# create Trump instance
tBot = lib.trump.Trump(validCliParser, roCfgParams)
# set config to enable test run mode
tBot.config['testRun'] = True
# start test run
tBot.startBot('realdonaldtrump')
| {
"content_hash": "2b8517e88c871f232347c9f4afefb70b",
"timestamp": "",
"source": "github",
"line_count": 289,
"max_line_length": 145,
"avg_line_length": 27.235294117647058,
"alnum_prop": 0.6662431711345446,
"repo_name": "magneticstain/TrumpaTron",
"id": "4ccf774b5468fdc07333d0417eee9a485c2c30a3",
"size": "7891",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build/tests/test_trump.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33252"
},
{
"name": "Shell",
"bytes": "333"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, url
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from rest_framework.urlpatterns import format_suffix_patterns
from .views import (LocationListApi, LocationDetailApi, LocationArticleApi,
LocationDetail, CategoriesList, CategoryDetail, ArticleDetail,
ArticleList, KeywordDetail, AuthorDetail, ArchiveDetail, ArticleCarouselImageDetail)
root_patterns = patterns('pari.article.views',
url(r'^categories/(?P<slug>.+)/$', CategoryDetail.as_view(), name='category-detail'),
url(r'^categories/$', CategoriesList.as_view(), name='category-list'),
url(r'^authors/(?P<slug>.+)/$', AuthorDetail.as_view(), name='author-detail'),
url(r'^articles/(?P<slug>.+)/$', ArticleDetail.as_view(), name='article-detail'),
url(r'^articles/(?P<slug>.+)/(?P<order>\d+)/$', ArticleCarouselImageDetail.as_view(), name='article-image-detail'),
url(r'^articles/$', ArticleList.as_view(), name='article-list'),
url(r'^topics/(?P<slug>.+)/$', AuthorDetail.as_view(), name='topic-detail'),
url(r'^locations/(?P<slug>.+)/$', LocationDetail.as_view(), name='location-detail'),
url(r'^keywords/(?P<slug>.+)/$', KeywordDetail.as_view(template_name="article/keyword_detail.html"), name='keyword-detail'),
url(r'^archive/(?P<year>\d{4})/(?P<month>\d+)/$', ArchiveDetail.as_view(), name='archive-detail'),
)
urlpatterns = patterns('pari.article.views',
url(r'^api/$', 'api_root'),
url(r'^api/locations/$', LocationListApi.as_view(), name='api-location-list'),
url(r'^api/locations/(?P<pk>\d+)/$', LocationDetailApi.as_view(), name='api-location-detail'),
url(r'^api/locations/(?P<pk>\d+)/article/$', LocationArticleApi.as_view(), name='api-location-article'),
)
urlpatterns = format_suffix_patterns(urlpatterns, allowed=['json', 'api'])
urlpatterns += staticfiles_urlpatterns()
| {
"content_hash": "10943cc68c684abf63b43ef18fee3eb3",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 128,
"avg_line_length": 57.696969696969695,
"alnum_prop": 0.6811974789915967,
"repo_name": "RuralIndia/pari",
"id": "261343dd08e45304ad4246c1e339d211cede2603",
"size": "1904",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pari/article/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "202020"
},
{
"name": "HTML",
"bytes": "106716"
},
{
"name": "JavaScript",
"bytes": "191308"
},
{
"name": "Nginx",
"bytes": "1184"
},
{
"name": "Python",
"bytes": "863067"
},
{
"name": "Shell",
"bytes": "2197"
}
],
"symlink_target": ""
} |
from codecs import open
from setuptools import setup
long_description = open('README.rst', 'r', encoding='utf-8').read()
setup(
name='flask-talisman',
version='0.7.0',
description='HTTP security headers for Flask.',
long_description=long_description,
url='https://github.com/GoogleCloudPlatform/flask-talisman',
author='Thea Flowers',
author_email='theaflowers@google.com',
license='Apache Software License',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Operating System :: POSIX',
'Operating System :: MacOS',
'Operating System :: Unix',
],
keywords='flask security https xss',
packages=['flask_talisman'],
install_requires=['six>=1.9.0'],
)
| {
"content_hash": "f68fef24d6a3f753e5eacc9ebc8a4079",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 71,
"avg_line_length": 25.653061224489797,
"alnum_prop": 0.6157517899761337,
"repo_name": "GoogleCloudPlatform/flask-talisman",
"id": "2c5c028cdb64dba7f0aa3588e16c1c73298c0608",
"size": "1833",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "340"
},
{
"name": "HTML",
"bytes": "1350"
},
{
"name": "Python",
"bytes": "30141"
}
],
"symlink_target": ""
} |
import asyncio
import osu_ds
import time
import configparser
parser = configparser.ConfigParser()
parser.read("config.ini")
# You can get your api key from https://osu.ppy.sh/p/api
key = parser.get("osu", "key")
osu = osu_ds.OsuApi(api_key=key)
loop = asyncio.get_event_loop()
async def show_user_info(name):
t = time.clock()
ds = await osu.get_user(name)
print("Search took {}".format(time.clock() - t))
s = """Name: {} (id:{})
Level: {}
Playcount: {}
Accuracy: {} %
Score: {} (ranked), {} (total)
PP: {}
Rank: {} (world), {} (country)
Amounts: {} (ss), {} (s), {} (a)
Country: {}
Urls:
{} (profile)
{} (avatar)
""".format(ds.name, ds.id, ds.level, ds.playcount, ds.accuracy,
ds.ranked_score, ds.total_score, ds.pp, ds.world_rank, ds.country_rank,
ds.ss_amount, ds.s_amount, ds.a_amount, ds.country, ds.profile_url, ds.avatar_url)
print(s)
while True:
name = input(">")
loop.run_until_complete(show_user_info(name))
| {
"content_hash": "acb59cf43f823e4b4495a3a75bb3a43b",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 93,
"avg_line_length": 22.40909090909091,
"alnum_prop": 0.6146044624746451,
"repo_name": "DefaltSimon/osu-ds",
"id": "aec7aad3bb2fc51457d0d9ac9123f47f4d2b14f9",
"size": "1001",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/user.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5858"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import logging as std_logging
import os
from oslo_config import cfg
from oslo_log import log as logging
from tempest.test_discover import plugins
# TODO(marun) Replace use of oslo_config's global ConfigOpts
# (cfg.CONF) instance with a local instance (cfg.ConfigOpts()) once
# the cli tests move to the clients. The cli tests rely on oslo
# incubator modules that use the global cfg.CONF.
_CONF = cfg.CONF
def register_opt_group(conf, opt_group, options):
if opt_group:
conf.register_group(opt_group)
for opt in options:
conf.register_opt(opt, group=getattr(opt_group, 'name', None))
auth_group = cfg.OptGroup(name='auth',
title="Options for authentication and credentials")
AuthGroup = [
cfg.StrOpt('test_accounts_file',
help="Path to the yaml file that contains the list of "
"credentials to use for running tests. If used when "
"running in parallel you have to make sure sufficient "
"credentials are provided in the accounts file. For "
"example if no tests with roles are being run it requires "
"at least `2 * CONC` distinct accounts configured in "
" the `test_accounts_file`, with CONC == the "
"number of concurrent test processes."),
cfg.BoolOpt('allow_tenant_isolation',
default=True,
help="Allows test cases to create/destroy tenants and "
"users. This option requires that OpenStack Identity "
"API admin credentials are known. If false, isolated "
"test cases and parallel execution, can still be "
"achieved configuring a list of test accounts",
deprecated_opts=[cfg.DeprecatedOpt('allow_tenant_isolation',
group='compute'),
cfg.DeprecatedOpt('allow_tenant_isolation',
group='orchestration')]),
cfg.ListOpt('tempest_roles',
help="Roles to assign to all users created by tempest",
default=[]),
cfg.StrOpt('default_credentials_domain_name',
default='Default',
help="Default domain used when getting v3 credentials. "
"This is the name keystone uses for v2 compatibility.",
deprecated_opts=[cfg.DeprecatedOpt(
'tenant_isolation_domain_name',
group='auth')]),
cfg.BoolOpt('create_isolated_networks',
default=True,
help="If allow_tenant_isolation is set to True and Neutron is "
"enabled Tempest will try to create a usable network, "
"subnet, and router when needed for each tenant it "
"creates. However in some neutron configurations, like "
"with VLAN provider networks, this doesn't work. So if "
"set to False the isolated networks will not be created"),
]
identity_group = cfg.OptGroup(name='identity',
title="Keystone Configuration Options")
IdentityGroup = [
cfg.StrOpt('catalog_type',
default='identity',
help="Catalog type of the Identity service."),
cfg.BoolOpt('disable_ssl_certificate_validation',
default=False,
help="Set to True if using self-signed SSL certificates."),
cfg.StrOpt('ca_certificates_file',
default=None,
help='Specify a CA bundle file to use in verifying a '
'TLS (https) server certificate.'),
cfg.StrOpt('uri',
help="Full URI of the OpenStack Identity API (Keystone), v2"),
cfg.StrOpt('uri_v3',
help='Full URI of the OpenStack Identity API (Keystone), v3'),
cfg.StrOpt('auth_version',
default='v2',
help="Identity API version to be used for authentication "
"for API tests."),
cfg.StrOpt('region',
default='RegionOne',
help="The identity region name to use. Also used as the other "
"services' region name unless they are set explicitly. "
"If no such region is found in the service catalog, the "
"first found one is used."),
cfg.StrOpt('v2_admin_endpoint_type',
default='adminURL',
choices=['public', 'admin', 'internal',
'publicURL', 'adminURL', 'internalURL'],
help="The admin endpoint type to use for OpenStack Identity "
"(Keystone) API v2"),
cfg.StrOpt('v2_public_endpoint_type',
default='publicURL',
choices=['public', 'admin', 'internal',
'publicURL', 'adminURL', 'internalURL'],
help="The public endpoint type to use for OpenStack Identity "
"(Keystone) API v2",
deprecated_opts=[cfg.DeprecatedOpt('endpoint_type',
group='identity')]),
cfg.StrOpt('v3_endpoint_type',
default='adminURL',
choices=['public', 'admin', 'internal',
'publicURL', 'adminURL', 'internalURL'],
help="The endpoint type to use for OpenStack Identity "
"(Keystone) API v3"),
cfg.StrOpt('username',
help="Username to use for Nova API requests."),
cfg.StrOpt('tenant_name',
help="Tenant name to use for Nova API requests."),
cfg.StrOpt('admin_role',
default='admin',
help="Role required to administrate keystone."),
cfg.StrOpt('password',
help="API key to use when authenticating.",
secret=True),
cfg.StrOpt('domain_name',
help="Domain name for authentication (Keystone V3)."
"The same domain applies to user and project"),
cfg.StrOpt('alt_username',
help="Username of alternate user to use for Nova API "
"requests."),
cfg.StrOpt('alt_tenant_name',
help="Alternate user's Tenant name to use for Nova API "
"requests."),
cfg.StrOpt('alt_password',
help="API key to use when authenticating as alternate user.",
secret=True),
cfg.StrOpt('alt_domain_name',
help="Alternate domain name for authentication (Keystone V3)."
"The same domain applies to user and project"),
cfg.StrOpt('admin_username',
help="Administrative Username to use for "
"Keystone API requests."),
cfg.StrOpt('admin_tenant_name',
help="Administrative Tenant name to use for Keystone API "
"requests."),
cfg.StrOpt('admin_password',
help="API key to use when authenticating as admin.",
secret=True),
cfg.StrOpt('admin_domain_name',
help="Admin domain name for authentication (Keystone V3)."
"The same domain applies to user and project"),
cfg.StrOpt('default_domain_id',
default='default',
help="ID of the default domain"),
]
identity_feature_group = cfg.OptGroup(name='identity-feature-enabled',
title='Enabled Identity Features')
IdentityFeatureGroup = [
cfg.BoolOpt('trust',
default=True,
help='Does the identity service have delegation and '
'impersonation enabled'),
cfg.BoolOpt('api_v2',
default=True,
help='Is the v2 identity API enabled'),
cfg.BoolOpt('api_v3',
default=True,
help='Is the v3 identity API enabled'),
]
compute_group = cfg.OptGroup(name='compute',
title='Compute Service Options')
ComputeGroup = [
cfg.StrOpt('image_ref',
help="Valid primary image reference to be used in tests. "
"This is a required option"),
cfg.StrOpt('image_ref_alt',
help="Valid secondary image reference to be used in tests. "
"This is a required option, but if only one image is "
"available duplicate the value of image_ref above"),
cfg.StrOpt('flavor_ref',
default="1",
help="Valid primary flavor to use in tests."),
cfg.StrOpt('flavor_ref_alt',
default="2",
help='Valid secondary flavor to be used in tests.'),
cfg.StrOpt('image_ssh_user',
default="root",
help="User name used to authenticate to an instance."),
cfg.StrOpt('image_ssh_password',
default="password",
help="Password used to authenticate to an instance."),
cfg.StrOpt('image_alt_ssh_user',
default="root",
help="User name used to authenticate to an instance using "
"the alternate image."),
cfg.IntOpt('build_interval',
default=1,
help="Time in seconds between build status checks."),
cfg.IntOpt('build_timeout',
default=300,
help="Timeout in seconds to wait for an instance to build. "
"Other services that do not define build_timeout will "
"inherit this value."),
cfg.StrOpt('ssh_shell_prologue',
default="set -eu -o pipefail; PATH=$$PATH:/sbin;",
help="Shell fragments to use before executing a command "
"when sshing to a guest."),
cfg.StrOpt('ssh_auth_method',
default='keypair',
help="Auth method used for authenticate to the instance. "
"Valid choices are: keypair, configured, adminpass "
"and disabled. "
"Keypair: start the servers with a ssh keypair. "
"Configured: use the configured user and password. "
"Adminpass: use the injected adminPass. "
"Disabled: avoid using ssh when it is an option."),
cfg.StrOpt('ssh_connect_method',
default='floating',
help="How to connect to the instance? "
"fixed: using the first ip belongs the fixed network "
"floating: creating and using a floating ip."),
cfg.StrOpt('ssh_user',
default='root',
help="User name used to authenticate to an instance."),
cfg.IntOpt('ping_timeout',
default=120,
help="Timeout in seconds to wait for ping to "
"succeed."),
cfg.IntOpt('ping_size',
default=56,
help="The packet size for ping packets originating "
"from remote linux hosts"),
cfg.IntOpt('ping_count',
default=1,
help="The number of ping packets originating from remote "
"linux hosts"),
cfg.IntOpt('ready_wait',
default=0,
help="Additional wait time for clean state, when there is "
"no OS-EXT-STS extension available"),
cfg.StrOpt('fixed_network_name',
help="Name of the fixed network that is visible to all test "
"tenants. If multiple networks are available for a tenant"
" this is the network which will be used for creating "
"servers if tempest does not create a network or a "
"network is not specified elsewhere. It may be used for "
"ssh validation only if floating IPs are disabled."),
cfg.StrOpt('network_for_ssh',
default='public',
help="Network used for SSH connections. Ignored if "
"use_floatingip_for_ssh=true or run_validation=false."),
cfg.BoolOpt('use_floatingip_for_ssh',
default=True,
help="Does SSH use Floating IPs?"),
cfg.StrOpt('catalog_type',
default='compute',
help="Catalog type of the Compute service."),
cfg.StrOpt('region',
default='',
help="The compute region name to use. If empty, the value "
"of identity.region is used instead. If no such region "
"is found in the service catalog, the first found one is "
"used."),
cfg.StrOpt('endpoint_type',
default='publicURL',
choices=['public', 'admin', 'internal',
'publicURL', 'adminURL', 'internalURL'],
help="The endpoint type to use for the compute service."),
cfg.StrOpt('volume_device_name',
default='vdb',
help="Expected device name when a volume is attached to "
"an instance"),
cfg.IntOpt('shelved_offload_time',
default=0,
help='Time in seconds before a shelved instance is eligible '
'for removing from a host. -1 never offload, 0 offload '
'when shelved. This time should be the same as the time '
'of nova.conf, and some tests will run for as long as the '
'time.'),
cfg.StrOpt('floating_ip_range',
default='10.0.0.0/29',
help='Unallocated floating IP range, which will be used to '
'test the floating IP bulk feature for CRUD operation. '
'This block must not overlap an existing floating IP '
'pool.')
]
compute_features_group = cfg.OptGroup(name='compute-feature-enabled',
title="Enabled Compute Service Features")
ComputeFeaturesGroup = [
cfg.BoolOpt('disk_config',
default=True,
help="If false, skip disk config tests"),
cfg.ListOpt('api_extensions',
default=['all'],
help='A list of enabled compute extensions with a special '
'entry all which indicates every extension is enabled. '
'Each extension should be specified with alias name. '
'Empty list indicates all extensions are disabled'),
cfg.BoolOpt('change_password',
default=False,
help="Does the test environment support changing the admin "
"password?"),
cfg.BoolOpt('console_output',
default=True,
help="Does the test environment support obtaining instance "
"serial console output?"),
cfg.BoolOpt('resize',
default=False,
help="Does the test environment support resizing?"),
cfg.BoolOpt('pause',
default=True,
help="Does the test environment support pausing?"),
cfg.BoolOpt('shelve',
default=True,
help="Does the test environment support shelving/unshelving?"),
cfg.BoolOpt('suspend',
default=True,
help="Does the test environment support suspend/resume?"),
cfg.BoolOpt('live_migration',
default=True,
help="Does the test environment support live migration "
"available?"),
cfg.BoolOpt('metadata_service',
default=True,
help="Does the test environment support metadata service? "
"Ignored unless validation.run_validation=true."),
cfg.BoolOpt('block_migration_for_live_migration',
default=False,
help="Does the test environment use block devices for live "
"migration"),
cfg.BoolOpt('block_migrate_cinder_iscsi',
default=False,
help="Does the test environment block migration support "
"cinder iSCSI volumes. Note, libvirt doesn't support this, "
"see https://bugs.launchpad.net/nova/+bug/1398999"),
# TODO(gilliard): Remove live_migrate_paused_instances at juno-eol.
cfg.BoolOpt('live_migrate_paused_instances',
default=False,
help="Does the test system allow live-migration of paused "
"instances? Note, this is more than just the ANDing of "
"paused and live_migrate, but all 3 should be set to True "
"to run those tests"),
cfg.BoolOpt('vnc_console',
default=False,
help='Enable VNC console. This configuration value should '
'be same as [nova.vnc]->vnc_enabled in nova.conf'),
cfg.BoolOpt('spice_console',
default=False,
help='Enable Spice console. This configuration value should '
'be same as [nova.spice]->enabled in nova.conf'),
cfg.BoolOpt('rdp_console',
default=False,
help='Enable RDP console. This configuration value should '
'be same as [nova.rdp]->enabled in nova.conf'),
cfg.BoolOpt('rescue',
default=True,
help='Does the test environment support instance rescue '
'mode?'),
cfg.BoolOpt('enable_instance_password',
default=True,
help='Enables returning of the instance password by the '
'relevant server API calls such as create, rebuild '
'or rescue.'),
cfg.BoolOpt('interface_attach',
default=True,
help='Does the test environment support dynamic network '
'interface attachment?'),
cfg.BoolOpt('snapshot',
default=True,
help='Does the test environment support creating snapshot '
'images of running instances?'),
cfg.BoolOpt('ec2_api',
default=True,
help='Does the test environment have the ec2 api running?'),
# TODO(mriedem): Remove preserve_ports once juno-eol happens.
cfg.BoolOpt('preserve_ports',
default=False,
help='Does Nova preserve preexisting ports from Neutron '
'when deleting an instance? This should be set to True '
'if testing Kilo+ Nova.'),
cfg.BoolOpt('attach_encrypted_volume',
default=True,
help='Does the test environment support attaching an '
'encrypted volume to a running server instance? This may '
'depend on the combination of compute_driver in nova and '
'the volume_driver(s) in cinder.'),
# TODO(mriedem): Remove allow_duplicate_networks once kilo-eol happens
# since the option was removed from nova in Liberty and is the default
# behavior starting in Liberty.
cfg.BoolOpt('allow_duplicate_networks',
default=False,
help='Does the test environment support creating instances '
'with multiple ports on the same network? This is only '
'valid when using Neutron.'),
]
image_group = cfg.OptGroup(name='image',
title="Image Service Options")
ImageGroup = [
cfg.StrOpt('catalog_type',
default='image',
help='Catalog type of the Image service.'),
cfg.StrOpt('region',
default='',
help="The image region name to use. If empty, the value "
"of identity.region is used instead. If no such region "
"is found in the service catalog, the first found one is "
"used."),
cfg.StrOpt('endpoint_type',
default='publicURL',
choices=['public', 'admin', 'internal',
'publicURL', 'adminURL', 'internalURL'],
help="The endpoint type to use for the image service."),
cfg.StrOpt('http_image',
default='http://download.cirros-cloud.net/0.3.1/'
'cirros-0.3.1-x86_64-uec.tar.gz',
help='http accessible image'),
cfg.IntOpt('build_timeout',
default=300,
help="Timeout in seconds to wait for an image to "
"become available."),
cfg.IntOpt('build_interval',
default=1,
help="Time in seconds between image operation status "
"checks.")
]
image_feature_group = cfg.OptGroup(name='image-feature-enabled',
title='Enabled image service features')
ImageFeaturesGroup = [
cfg.BoolOpt('api_v2',
default=True,
help="Is the v2 image API enabled"),
cfg.BoolOpt('api_v1',
default=True,
help="Is the v1 image API enabled"),
cfg.BoolOpt('deactivate_image',
default=False,
help="Is the deactivate-image feature enabled."
" The feature has been integrated since Kilo."),
]
network_group = cfg.OptGroup(name='network',
title='Network Service Options')
NetworkGroup = [
cfg.StrOpt('catalog_type',
default='network',
help='Catalog type of the Neutron service.'),
cfg.StrOpt('region',
default='',
help="The network region name to use. If empty, the value "
"of identity.region is used instead. If no such region "
"is found in the service catalog, the first found one is "
"used."),
cfg.StrOpt('endpoint_type',
default='publicURL',
choices=['public', 'admin', 'internal',
'publicURL', 'adminURL', 'internalURL'],
help="The endpoint type to use for the network service."),
cfg.StrOpt('tenant_network_cidr',
default="10.100.0.0/16",
help="The cidr block to allocate tenant ipv4 subnets from"),
cfg.IntOpt('tenant_network_mask_bits',
default=28,
help="The mask bits for tenant ipv4 subnets"),
cfg.StrOpt('tenant_network_v6_cidr',
default="2003::/48",
help="The cidr block to allocate tenant ipv6 subnets from"),
cfg.IntOpt('tenant_network_v6_mask_bits',
default=64,
help="The mask bits for tenant ipv6 subnets"),
cfg.BoolOpt('tenant_networks_reachable',
default=False,
help="Whether tenant networks can be reached directly from "
"the test client. This must be set to True when the "
"'fixed' ssh_connect_method is selected."),
cfg.StrOpt('public_network_id',
default="",
help="Id of the public network that provides external "
"connectivity"),
cfg.StrOpt('floating_network_name',
help="Default floating network name. Used to allocate floating "
"IPs when neutron is enabled."),
cfg.StrOpt('public_router_id',
default="",
help="Id of the public router that provides external "
"connectivity. This should only be used when Neutron's "
"'allow_overlapping_ips' is set to 'False' in "
"neutron.conf. usually not needed past 'Grizzly' release"),
cfg.IntOpt('build_timeout',
default=300,
help="Timeout in seconds to wait for network operation to "
"complete."),
cfg.IntOpt('build_interval',
default=1,
help="Time in seconds between network operation status "
"checks."),
cfg.ListOpt('dns_servers',
default=["8.8.8.8", "8.8.4.4"],
help="List of dns servers which should be used"
" for subnet creation"),
cfg.StrOpt('port_vnic_type',
choices=[None, 'normal', 'direct', 'macvtap'],
help="vnic_type to use when Launching instances"
" with pre-configured ports."
" Supported ports are:"
" ['normal','direct','macvtap']"),
]
network_feature_group = cfg.OptGroup(name='network-feature-enabled',
title='Enabled network service features')
NetworkFeaturesGroup = [
cfg.BoolOpt('ipv6',
default=True,
help="Allow the execution of IPv6 tests"),
cfg.ListOpt('api_extensions',
default=['all'],
help="A list of enabled network extensions with a special "
"entry all which indicates every extension is enabled. "
"Empty list indicates all extensions are disabled. "
"To get the list of extensions run: 'neutron ext-list'"),
cfg.BoolOpt('ipv6_subnet_attributes',
default=False,
help="Allow the execution of IPv6 subnet tests that use "
"the extended IPv6 attributes ipv6_ra_mode "
"and ipv6_address_mode"
),
cfg.BoolOpt('port_admin_state_change',
default=True,
help="Does the test environment support changing"
" port admin state"),
]
messaging_group = cfg.OptGroup(name='messaging',
title='Messaging Service')
MessagingGroup = [
cfg.StrOpt('catalog_type',
default='messaging',
help='Catalog type of the Messaging service.'),
cfg.IntOpt('max_queues_per_page',
default=20,
help='The maximum number of queue records per page when '
'listing queues'),
cfg.IntOpt('max_queue_metadata',
default=65536,
help='The maximum metadata size for a queue'),
cfg.IntOpt('max_messages_per_page',
default=20,
help='The maximum number of queue message per page when '
'listing (or) posting messages'),
cfg.IntOpt('max_message_size',
default=262144,
help='The maximum size of a message body'),
cfg.IntOpt('max_messages_per_claim',
default=20,
help='The maximum number of messages per claim'),
cfg.IntOpt('max_message_ttl',
default=1209600,
help='The maximum ttl for a message'),
cfg.IntOpt('max_claim_ttl',
default=43200,
help='The maximum ttl for a claim'),
cfg.IntOpt('max_claim_grace',
default=43200,
help='The maximum grace period for a claim'),
]
validation_group = cfg.OptGroup(name='validation',
title='SSH Validation options')
ValidationGroup = [
cfg.BoolOpt('run_validation',
default=False,
help='Enable ssh on created servers and creation of additional'
' validation resources to enable remote access',
deprecated_opts=[cfg.DeprecatedOpt('run_ssh',
group='compute')]),
cfg.StrOpt('connect_method',
default='floating',
choices=['fixed', 'floating'],
help='Default IP type used for validation: '
'-fixed: uses the first IP belonging to the fixed network '
'-floating: creates and uses a floating IP'),
cfg.StrOpt('auth_method',
default='keypair',
choices=['keypair'],
help='Default authentication method to the instance. '
'Only ssh via keypair is supported for now. '
'Additional methods will be handled in a separate spec.'),
cfg.IntOpt('ip_version_for_ssh',
default=4,
help='Default IP version for ssh connections.',
deprecated_opts=[cfg.DeprecatedOpt('ip_version_for_ssh',
group='compute')]),
cfg.IntOpt('ping_timeout',
default=120,
help='Timeout in seconds to wait for ping to succeed.'),
cfg.IntOpt('connect_timeout',
default=60,
help='Timeout in seconds to wait for the TCP connection to be '
'successful.',
deprecated_opts=[cfg.DeprecatedOpt('ssh_channel_timeout',
group='compute')]),
cfg.IntOpt('ssh_timeout',
default=300,
help='Timeout in seconds to wait for the ssh banner.',
deprecated_opts=[cfg.DeprecatedOpt('ssh_timeout',
group='compute')]),
]
volume_group = cfg.OptGroup(name='volume',
title='Block Storage Options')
VolumeGroup = [
cfg.IntOpt('build_interval',
default=1,
help='Time in seconds between volume availability checks.'),
cfg.IntOpt('build_timeout',
default=300,
help='Timeout in seconds to wait for a volume to become '
'available.'),
cfg.StrOpt('catalog_type',
default='volume',
help="Catalog type of the Volume Service"),
cfg.StrOpt('region',
default='',
help="The volume region name to use. If empty, the value "
"of identity.region is used instead. If no such region "
"is found in the service catalog, the first found one is "
"used."),
cfg.StrOpt('endpoint_type',
default='publicURL',
choices=['public', 'admin', 'internal',
'publicURL', 'adminURL', 'internalURL'],
help="The endpoint type to use for the volume service."),
cfg.StrOpt('backend1_name',
default='BACKEND_1',
help="Name of the backend1 (must be declared in cinder.conf)"),
cfg.StrOpt('backend2_name',
default='BACKEND_2',
help="Name of the backend2 (must be declared in cinder.conf)"),
cfg.StrOpt('storage_protocol',
default='iSCSI',
help='Backend protocol to target when creating volume types'),
cfg.StrOpt('vendor_name',
default='Open Source',
help='Backend vendor to target when creating volume types'),
cfg.StrOpt('disk_format',
default='raw',
help='Disk format to use when copying a volume to image'),
cfg.IntOpt('volume_size',
default=1,
help='Default size in GB for volumes created by volumes tests'),
]
volume_feature_group = cfg.OptGroup(name='volume-feature-enabled',
title='Enabled Cinder Features')
VolumeFeaturesGroup = [
cfg.BoolOpt('multi_backend',
default=False,
help="Runs Cinder multi-backend test (requires 2 backends)"),
cfg.BoolOpt('backup',
default=True,
help='Runs Cinder volumes backup test'),
cfg.BoolOpt('snapshot',
default=True,
help='Runs Cinder volume snapshot test'),
cfg.BoolOpt('clone',
default=True,
help='Runs Cinder volume clone test'),
cfg.ListOpt('api_extensions',
default=['all'],
help='A list of enabled volume extensions with a special '
'entry all which indicates every extension is enabled. '
'Empty list indicates all extensions are disabled'),
cfg.BoolOpt('api_v1',
default=True,
help="Is the v1 volume API enabled"),
cfg.BoolOpt('api_v2',
default=True,
help="Is the v2 volume API enabled"),
cfg.BoolOpt('bootable',
default=False,
help='Update bootable status of a volume '
'Not implemented on icehouse ')
]
object_storage_group = cfg.OptGroup(name='object-storage',
title='Object Storage Service Options')
ObjectStoreGroup = [
cfg.StrOpt('catalog_type',
default='object-store',
help="Catalog type of the Object-Storage service."),
cfg.StrOpt('region',
default='',
help="The object-storage region name to use. If empty, the "
"value of identity.region is used instead. If no such "
"region is found in the service catalog, the first found "
"one is used."),
cfg.StrOpt('endpoint_type',
default='publicURL',
choices=['public', 'admin', 'internal',
'publicURL', 'adminURL', 'internalURL'],
help="The endpoint type to use for the object-store service."),
cfg.IntOpt('container_sync_timeout',
default=600,
help="Number of seconds to time on waiting for a container "
"to container synchronization complete."),
cfg.IntOpt('container_sync_interval',
default=5,
help="Number of seconds to wait while looping to check the "
"status of a container to container synchronization"),
cfg.StrOpt('operator_role',
default='Member',
help="Role to add to users created for swift tests to "
"enable creating containers"),
cfg.StrOpt('reseller_admin_role',
default='ResellerAdmin',
help="User role that has reseller admin"),
cfg.StrOpt('realm_name',
default='realm1',
help="Name of sync realm. A sync realm is a set of clusters "
"that have agreed to allow container syncing with each "
"other. Set the same realm name as Swift's "
"container-sync-realms.conf"),
cfg.StrOpt('cluster_name',
default='name1',
help="One name of cluster which is set in the realm whose name "
"is set in 'realm_name' item in this file. Set the "
"same cluster name as Swift's container-sync-realms.conf"),
]
object_storage_feature_group = cfg.OptGroup(
name='object-storage-feature-enabled',
title='Enabled object-storage features')
ObjectStoreFeaturesGroup = [
cfg.ListOpt('discoverable_apis',
default=['all'],
help="A list of the enabled optional discoverable apis. "
"A single entry, all, indicates that all of these "
"features are expected to be enabled"),
cfg.BoolOpt('container_sync',
default=True,
help="Execute (old style) container-sync tests"),
cfg.BoolOpt('object_versioning',
default=True,
help="Execute object-versioning tests"),
cfg.BoolOpt('discoverability',
default=True,
help="Execute discoverability tests"),
]
database_group = cfg.OptGroup(name='database',
title='Database Service Options')
DatabaseGroup = [
cfg.StrOpt('catalog_type',
default='database',
help="Catalog type of the Database service."),
cfg.StrOpt('db_flavor_ref',
default="1",
help="Valid primary flavor to use in database tests."),
cfg.StrOpt('db_current_version',
default="v1.0",
help="Current database version to use in database tests."),
]
orchestration_group = cfg.OptGroup(name='orchestration',
title='Orchestration Service Options')
OrchestrationGroup = [
cfg.StrOpt('catalog_type',
default='orchestration',
help="Catalog type of the Orchestration service."),
cfg.StrOpt('region',
default='',
help="The orchestration region name to use. If empty, the "
"value of identity.region is used instead. If no such "
"region is found in the service catalog, the first found "
"one is used."),
cfg.StrOpt('endpoint_type',
default='publicURL',
choices=['public', 'admin', 'internal',
'publicURL', 'adminURL', 'internalURL'],
help="The endpoint type to use for the orchestration service."),
cfg.StrOpt('stack_owner_role', default='heat_stack_owner',
help='Role required for users to be able to manage stacks'),
cfg.IntOpt('build_interval',
default=1,
help="Time in seconds between build status checks."),
cfg.IntOpt('build_timeout',
default=1200,
help="Timeout in seconds to wait for a stack to build."),
cfg.StrOpt('instance_type',
default='m1.micro',
help="Instance type for tests. Needs to be big enough for a "
"full OS plus the test workload"),
cfg.StrOpt('keypair_name',
help="Name of existing keypair to launch servers with."),
cfg.IntOpt('max_template_size',
default=524288,
help="Value must match heat configuration of the same name."),
cfg.IntOpt('max_resources_per_stack',
default=1000,
help="Value must match heat configuration of the same name."),
]
telemetry_group = cfg.OptGroup(name='telemetry',
title='Telemetry Service Options')
TelemetryGroup = [
cfg.StrOpt('catalog_type',
default='metering',
help="Catalog type of the Telemetry service."),
cfg.StrOpt('endpoint_type',
default='publicURL',
choices=['public', 'admin', 'internal',
'publicURL', 'adminURL', 'internalURL'],
help="The endpoint type to use for the telemetry service."),
cfg.BoolOpt('too_slow_to_test',
default=True,
deprecated_for_removal=True,
help="This variable is used as flag to enable "
"notification tests")
]
telemetry_feature_group = cfg.OptGroup(name='telemetry-feature-enabled',
title='Enabled Ceilometer Features')
TelemetryFeaturesGroup = [
cfg.BoolOpt('events',
default=False,
help="Runs Ceilometer event-related tests"),
]
dashboard_group = cfg.OptGroup(name="dashboard",
title="Dashboard options")
DashboardGroup = [
cfg.StrOpt('dashboard_url',
default='http://localhost/',
help="Where the dashboard can be found"),
cfg.StrOpt('login_url',
default='http://localhost/auth/login/',
help="Login page for the dashboard",
deprecated_for_removal=True),
]
data_processing_group = cfg.OptGroup(name="data_processing",
title="Data Processing options")
DataProcessingGroup = [
cfg.StrOpt('catalog_type',
default='data_processing',
help="Catalog type of the data processing service."),
cfg.StrOpt('endpoint_type',
default='publicURL',
choices=['public', 'admin', 'internal',
'publicURL', 'adminURL', 'internalURL'],
help="The endpoint type to use for the data processing "
"service."),
]
data_processing_feature_group = cfg.OptGroup(
name="data_processing-feature-enabled",
title="Enabled Data Processing features")
DataProcessingFeaturesGroup = [
cfg.ListOpt('plugins',
default=["vanilla", "hdp"],
help="List of enabled data processing plugins")
]
boto_group = cfg.OptGroup(name='boto',
title='EC2/S3 options')
BotoGroup = [
cfg.StrOpt('ec2_url',
default="http://localhost:8773/services/Cloud",
help="EC2 URL"),
cfg.StrOpt('s3_url',
default="http://localhost:8080",
help="S3 URL"),
cfg.StrOpt('aws_secret',
help="AWS Secret Key",
secret=True),
cfg.StrOpt('aws_access',
help="AWS Access Key"),
cfg.StrOpt('aws_zone',
default="nova",
help="AWS Zone for EC2 tests"),
cfg.StrOpt('s3_materials_path',
default="/opt/stack/devstack/files/images/"
"s3-materials/cirros-0.3.0",
help="S3 Materials Path"),
cfg.StrOpt('ari_manifest',
default="cirros-0.3.0-x86_64-initrd.manifest.xml",
help="ARI Ramdisk Image manifest"),
cfg.StrOpt('ami_manifest',
default="cirros-0.3.0-x86_64-blank.img.manifest.xml",
help="AMI Machine Image manifest"),
cfg.StrOpt('aki_manifest',
default="cirros-0.3.0-x86_64-vmlinuz.manifest.xml",
help="AKI Kernel Image manifest"),
cfg.StrOpt('instance_type',
default="m1.tiny",
help="Instance type"),
cfg.IntOpt('http_socket_timeout',
default=3,
help="boto Http socket timeout"),
cfg.IntOpt('num_retries',
default=1,
help="boto num_retries on error"),
cfg.IntOpt('build_timeout',
default=60,
help="Status Change Timeout"),
cfg.IntOpt('build_interval',
default=1,
help="Status Change Test Interval"),
]
stress_group = cfg.OptGroup(name='stress', title='Stress Test Options')
StressGroup = [
cfg.StrOpt('nova_logdir',
help='Directory containing log files on the compute nodes'),
cfg.IntOpt('max_instances',
default=16,
help='Maximum number of instances to create during test.'),
cfg.StrOpt('controller',
help='Controller host.'),
# new stress options
cfg.StrOpt('target_controller',
help='Controller host.'),
cfg.StrOpt('target_ssh_user',
help='ssh user.'),
cfg.StrOpt('target_private_key_path',
help='Path to private key.'),
cfg.StrOpt('target_logfiles',
help='regexp for list of log files.'),
cfg.IntOpt('log_check_interval',
default=60,
help='time (in seconds) between log file error checks.'),
cfg.IntOpt('default_thread_number_per_action',
default=4,
help='The number of threads created while stress test.'),
cfg.BoolOpt('leave_dirty_stack',
default=False,
help='Prevent the cleaning (tearDownClass()) between'
' each stress test run if an exception occurs'
' during this run.'),
cfg.BoolOpt('full_clean_stack',
default=False,
help='Allows a full cleaning process after a stress test.'
' Caution : this cleanup will remove every objects of'
' every tenant.')
]
scenario_group = cfg.OptGroup(name='scenario', title='Scenario Test Options')
ScenarioGroup = [
cfg.StrOpt('img_dir',
default='/opt/stack/new/devstack/files/images/'
'cirros-0.3.1-x86_64-uec',
help='Directory containing image files'),
cfg.StrOpt('img_file', deprecated_name='qcow2_img_file',
default='cirros-0.3.1-x86_64-disk.img',
help='Image file name'),
cfg.StrOpt('img_disk_format',
default='qcow2',
help='Image disk format'),
cfg.StrOpt('img_container_format',
default='bare',
help='Image container format'),
cfg.DictOpt('img_properties', help='Glance image properties. '
'Use for custom images which require them'),
cfg.StrOpt('ami_img_file',
default='cirros-0.3.1-x86_64-blank.img',
help='AMI image file name'),
cfg.StrOpt('ari_img_file',
default='cirros-0.3.1-x86_64-initrd',
help='ARI image file name'),
cfg.StrOpt('aki_img_file',
default='cirros-0.3.1-x86_64-vmlinuz',
help='AKI image file name'),
cfg.StrOpt('ssh_user',
default='cirros',
help='ssh username for the image file'),
cfg.IntOpt(
'large_ops_number',
default=0,
help="specifies how many resources to request at once. Used "
"for large operations testing."),
# TODO(yfried): add support for dhcpcd
cfg.StrOpt('dhcp_client',
default='udhcpc',
choices=["udhcpc", "dhclient"],
help='DHCP client used by images to renew DCHP lease. '
'If left empty, update operation will be skipped. '
'Supported clients: "udhcpc", "dhclient"')
]
service_available_group = cfg.OptGroup(name="service_available",
title="Available OpenStack Services")
ServiceAvailableGroup = [
cfg.BoolOpt('cinder',
default=True,
help="Whether or not cinder is expected to be available"),
cfg.BoolOpt('neutron',
default=False,
help="Whether or not neutron is expected to be available"),
cfg.BoolOpt('glance',
default=True,
help="Whether or not glance is expected to be available"),
cfg.BoolOpt('swift',
default=True,
help="Whether or not swift is expected to be available"),
cfg.BoolOpt('nova',
default=True,
help="Whether or not nova is expected to be available"),
cfg.BoolOpt('heat',
default=False,
help="Whether or not Heat is expected to be available"),
cfg.BoolOpt('ceilometer',
default=True,
help="Whether or not Ceilometer is expected to be available"),
cfg.BoolOpt('horizon',
default=True,
help="Whether or not Horizon is expected to be available"),
cfg.BoolOpt('sahara',
default=False,
help="Whether or not Sahara is expected to be available"),
cfg.BoolOpt('ironic',
default=False,
help="Whether or not Ironic is expected to be available"),
cfg.BoolOpt('trove',
default=False,
help="Whether or not Trove is expected to be available"),
cfg.BoolOpt('zaqar',
default=False,
help="Whether or not Zaqar is expected to be available"),
]
debug_group = cfg.OptGroup(name="debug",
title="Debug System")
DebugGroup = [
cfg.StrOpt('trace_requests',
default='',
help="""A regex to determine which requests should be traced.
This is a regex to match the caller for rest client requests to be able to
selectively trace calls out of specific classes and methods. It largely
exists for test development, and is not expected to be used in a real deploy
of tempest. This will be matched against the discovered ClassName:method
in the test environment.
Expected values for this field are:
* ClassName:test_method_name - traces one test_method
* ClassName:setUp(Class) - traces specific setup functions
* ClassName:tearDown(Class) - traces specific teardown functions
* ClassName:_run_cleanups - traces the cleanup functions
If nothing is specified, this feature is not enabled. To trace everything
specify .* as the regex.
""")
]
input_scenario_group = cfg.OptGroup(name="input-scenario",
title="Filters and values for"
" input scenarios")
InputScenarioGroup = [
cfg.StrOpt('image_regex',
default='^cirros-0.3.1-x86_64-uec$',
help="Matching images become parameters for scenario tests"),
cfg.StrOpt('flavor_regex',
default='^m1.nano$',
help="Matching flavors become parameters for scenario tests"),
cfg.StrOpt('non_ssh_image_regex',
default='^.*[Ww]in.*$',
help="SSH verification in tests is skipped"
"for matching images"),
cfg.StrOpt('ssh_user_regex',
default="[[\"^.*[Cc]irros.*$\", \"cirros\"]]",
help="List of user mapped to regex "
"to matching image names."),
]
baremetal_group = cfg.OptGroup(name='baremetal',
title='Baremetal provisioning service options',
help='When enabling baremetal tests, Nova '
'must be configured to use the Ironic '
'driver. The following paremeters for the '
'[compute] section must be disabled: '
'console_output, interface_attach, '
'live_migration, pause, rescue, resize '
'shelve, snapshot, and suspend')
BaremetalGroup = [
cfg.StrOpt('catalog_type',
default='baremetal',
help="Catalog type of the baremetal provisioning service"),
cfg.BoolOpt('driver_enabled',
default=False,
help="Whether the Ironic nova-compute driver is enabled"),
cfg.StrOpt('driver',
default='fake',
help="Driver name which Ironic uses"),
cfg.StrOpt('endpoint_type',
default='publicURL',
choices=['public', 'admin', 'internal',
'publicURL', 'adminURL', 'internalURL'],
help="The endpoint type to use for the baremetal provisioning "
"service"),
cfg.IntOpt('active_timeout',
default=300,
help="Timeout for Ironic node to completely provision"),
cfg.IntOpt('association_timeout',
default=30,
help="Timeout for association of Nova instance and Ironic "
"node"),
cfg.IntOpt('power_timeout',
default=60,
help="Timeout for Ironic power transitions."),
cfg.IntOpt('unprovision_timeout',
default=300,
help="Timeout for unprovisioning an Ironic node. "
"Takes longer since Kilo as Ironic performs an extra "
"step in Node cleaning.")
]
negative_group = cfg.OptGroup(name='negative', title="Negative Test Options")
NegativeGroup = [
cfg.StrOpt('test_generator',
default='tempest.common.' +
'generator.negative_generator.NegativeTestGenerator',
help="Test generator class for all negative tests"),
]
DefaultGroup = [
cfg.StrOpt('resources_prefix',
default='tempest',
help="Prefix to be added when generating the name for "
"test resources. It can be used to discover all "
"resources associated with a specific test run when "
"running tempest on a real-life cloud"),
]
_opts = [
(auth_group, AuthGroup),
(compute_group, ComputeGroup),
(compute_features_group, ComputeFeaturesGroup),
(identity_group, IdentityGroup),
(identity_feature_group, IdentityFeatureGroup),
(image_group, ImageGroup),
(image_feature_group, ImageFeaturesGroup),
(network_group, NetworkGroup),
(network_feature_group, NetworkFeaturesGroup),
(messaging_group, MessagingGroup),
(validation_group, ValidationGroup),
(volume_group, VolumeGroup),
(volume_feature_group, VolumeFeaturesGroup),
(object_storage_group, ObjectStoreGroup),
(object_storage_feature_group, ObjectStoreFeaturesGroup),
(database_group, DatabaseGroup),
(orchestration_group, OrchestrationGroup),
(telemetry_group, TelemetryGroup),
(telemetry_feature_group, TelemetryFeaturesGroup),
(dashboard_group, DashboardGroup),
(data_processing_group, DataProcessingGroup),
(data_processing_feature_group, DataProcessingFeaturesGroup),
(boto_group, BotoGroup),
(stress_group, StressGroup),
(scenario_group, ScenarioGroup),
(service_available_group, ServiceAvailableGroup),
(debug_group, DebugGroup),
(baremetal_group, BaremetalGroup),
(input_scenario_group, InputScenarioGroup),
(negative_group, NegativeGroup),
(None, DefaultGroup)
]
def register_opts():
ext_plugins = plugins.TempestTestPluginManager()
# Register in-tree tempest config options
for g, o in _opts:
register_opt_group(_CONF, g, o)
# Call external plugin config option registration
ext_plugins.register_plugin_opts(_CONF)
def list_opts():
"""Return a list of oslo.config options available.
The purpose of this is to allow tools like the Oslo sample config file
generator to discover the options exposed to users.
"""
ext_plugins = plugins.TempestTestPluginManager()
opt_list = [(getattr(g, 'name', None), o) for g, o in _opts]
opt_list.extend(ext_plugins.get_plugin_options_list())
return opt_list
# this should never be called outside of this class
class TempestConfigPrivate(object):
"""Provides OpenStack configuration information."""
DEFAULT_CONFIG_DIR = os.path.join(
os.path.abspath(os.path.dirname(os.path.dirname(__file__))),
"etc")
DEFAULT_CONFIG_FILE = "tempest.conf"
def __getattr__(self, attr):
# Handles config options from the default group
return getattr(_CONF, attr)
def _set_attrs(self):
self.auth = _CONF.auth
self.compute = _CONF.compute
self.compute_feature_enabled = _CONF['compute-feature-enabled']
self.identity = _CONF.identity
self.identity_feature_enabled = _CONF['identity-feature-enabled']
self.image = _CONF.image
self.image_feature_enabled = _CONF['image-feature-enabled']
self.network = _CONF.network
self.network_feature_enabled = _CONF['network-feature-enabled']
self.validation = _CONF.validation
self.volume = _CONF.volume
self.volume_feature_enabled = _CONF['volume-feature-enabled']
self.object_storage = _CONF['object-storage']
self.object_storage_feature_enabled = _CONF[
'object-storage-feature-enabled']
self.database = _CONF.database
self.orchestration = _CONF.orchestration
self.messaging = _CONF.messaging
self.telemetry = _CONF.telemetry
self.telemetry_feature_enabled = _CONF['telemetry-feature-enabled']
self.dashboard = _CONF.dashboard
self.data_processing = _CONF.data_processing
self.data_processing_feature_enabled = _CONF[
'data_processing-feature-enabled']
self.boto = _CONF.boto
self.stress = _CONF.stress
self.scenario = _CONF.scenario
self.service_available = _CONF.service_available
self.debug = _CONF.debug
self.baremetal = _CONF.baremetal
self.input_scenario = _CONF['input-scenario']
self.negative = _CONF.negative
_CONF.set_default('domain_name',
self.auth.default_credentials_domain_name,
group='identity')
_CONF.set_default('alt_domain_name',
self.auth.default_credentials_domain_name,
group='identity')
def __init__(self, parse_conf=True, config_path=None):
"""Initialize a configuration from a conf directory and conf file."""
super(TempestConfigPrivate, self).__init__()
config_files = []
failsafe_path = "/etc/tempest/" + self.DEFAULT_CONFIG_FILE
if config_path:
path = config_path
else:
# Environment variables override defaults...
conf_dir = os.environ.get('TEMPEST_CONFIG_DIR',
self.DEFAULT_CONFIG_DIR)
conf_file = os.environ.get('TEMPEST_CONFIG',
self.DEFAULT_CONFIG_FILE)
path = os.path.join(conf_dir, conf_file)
if not os.path.isfile(path):
path = failsafe_path
# only parse the config file if we expect one to exist. This is needed
# to remove an issue with the config file up to date checker.
if parse_conf:
config_files.append(path)
logging.register_options(_CONF)
if os.path.isfile(path):
_CONF([], project='tempest', default_config_files=config_files)
else:
_CONF([], project='tempest')
logging.setup(_CONF, 'tempest')
LOG = logging.getLogger('tempest')
LOG.info("Using tempest config file %s" % path)
register_opts()
self._set_attrs()
if parse_conf:
_CONF.log_opt_values(LOG, std_logging.DEBUG)
class TempestConfigProxy(object):
_config = None
_path = None
_extra_log_defaults = [
('paramiko.transport', std_logging.INFO),
('requests.packages.urllib3.connectionpool', std_logging.WARN),
]
def _fix_log_levels(self):
"""Tweak the oslo log defaults."""
for name, level in self._extra_log_defaults:
std_logging.getLogger(name).setLevel(level)
def __getattr__(self, attr):
if not self._config:
self._fix_log_levels()
self._config = TempestConfigPrivate(config_path=self._path)
return getattr(self._config, attr)
def set_config_path(self, path):
self._path = path
CONF = TempestConfigProxy()
| {
"content_hash": "cf3b46712079dccb6710461dba1dfdf3",
"timestamp": "",
"source": "github",
"line_count": 1355,
"max_line_length": 79,
"avg_line_length": 43.16531365313653,
"alnum_prop": 0.5570791088922703,
"repo_name": "flyingfish007/tempest",
"id": "7655c75b91af728e487981642eca4df2c70eb243",
"size": "59125",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tempest/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2740403"
},
{
"name": "Shell",
"bytes": "8560"
}
],
"symlink_target": ""
} |
import copy
import functools
import inspect
import os
import time
from testtools import testcase
from oslotest import base
from oslo_log import log as logging
from oslo_config import cfg
from oslo_config import fixture as cfg_fixture
from oslo_messaging import conffixture as messaging_fixture
from designate import policy
from designate import utils
from designate import exceptions
from designate import objects
from designate import storage
from designate.context import DesignateContext
from designate.tests import fixtures
from designate.tests import resources
from designate.manage import database as manage_database
LOG = logging.getLogger(__name__)
cfg.CONF.import_opt('storage_driver', 'designate.central',
group='service:central')
cfg.CONF.import_opt('auth_strategy', 'designate.api',
group='service:api')
cfg.CONF.import_opt('connection', 'designate.storage.impl_sqlalchemy',
group='storage:sqlalchemy')
cfg.CONF.import_opt('cache_driver', 'designate.pool_manager',
group='service:pool_manager')
cfg.CONF.import_opt('connection',
'designate.pool_manager.cache.impl_sqlalchemy',
group='pool_manager_cache:sqlalchemy')
default_pool_id = cfg.CONF['service:central'].default_pool_id
_TRUE_VALUES = ('true', '1', 'yes', 'y')
class TestTimeoutError(Exception):
# Used in wait_for_condition
pass
class TestCase(base.BaseTestCase):
quota_fixtures = [{
'resource': 'zones',
'hard_limit': 5,
}, {
'resource': 'records',
'hard_limit': 50,
}]
server_fixtures = [{
'name': 'ns1.example.org.',
}, {
'name': 'ns2.example.org.',
}, {
'name': 'ns2.example.org.',
}]
# The last tld is invalid
tld_fixtures = [{
'name': 'com',
}, {
'name': 'co.uk',
}, {
'name': 'com.',
}]
default_tld_fixtures = [{
'name': 'com',
}, {
'name': 'org',
}, {
'name': 'net',
}]
tsigkey_fixtures = [{
'name': 'test-key-one',
'algorithm': 'hmac-md5',
'secret': 'SomeSecretKey',
'scope': 'POOL',
'resource_id': '6ca6baef-3305-4ad0-a52b-a82df5752b62',
}, {
'name': 'test-key-two',
'algorithm': 'hmac-sha256',
'secret': 'AnotherSecretKey',
'scope': 'ZONE',
'resource_id': '7fbb6304-5e74-4691-bd80-cef3cff5fe2f',
}]
# The last zone is invalid
zone_fixtures = {
'PRIMARY': [
{
'name': 'example.com.',
'type': 'PRIMARY',
'email': 'example@example.com',
}, {
'name': 'example.net.',
'type': 'PRIMARY',
'email': 'example@example.net',
}, {
'name': 'example.org.',
'type': 'PRIMARY',
'email': 'example@example.org',
}, {
'name': 'invalid.com.....',
'type': 'PRIMARY',
'email': 'example@invalid.com',
}
],
'SECONDARY': [
{
'name': 'example.com.',
'type': 'SECONDARY',
}, {
'name': 'example.net.',
'type': 'SECONDARY',
}, {
'name': 'example.org.',
'type': 'SECONDARY',
}, {
'name': 'invalid.com.....',
'type': 'SECONDARY',
}
]
}
recordset_fixtures = {
'A': [
{'name': 'mail.%s', 'type': 'A'},
{'name': 'www.%s', 'type': 'A'},
],
'MX': [
{'name': 'mail.%s', 'type': 'MX'},
],
'SRV': [
{'name': '_sip._tcp.%s', 'type': 'SRV'},
{'name': '_sip._udp.%s', 'type': 'SRV'},
],
'TXT': [
{'name': 'text.%s', 'type': 'TXT'},
],
'CNAME': [
{'name': 'www.%s', 'type': 'CNAME'},
{'name': 'sub1.%s', 'type': 'CNAME'},
]
}
record_fixtures = {
'A': [
{'data': '192.0.2.1'},
{'data': '192.0.2.2'}
],
'MX': [
{'data': '5 mail.example.org.'},
{'data': '10 mail.example.com.'},
],
'SRV': [
{'data': '5 0 5060 server1.example.org.'},
{'data': '10 1 5060 server2.example.org.'},
],
'CNAME': [
{'data': 'www.somezone.org.'},
{'data': 'www.someotherzone.com.'},
],
'TXT': [
{'data': 'footxtdata'}
]
}
ptr_fixtures = [
{'ptrdname': 'srv1.example.com.'},
{'ptrdname': 'srv1.example.net.'}
]
blacklist_fixtures = [{
'pattern': 'blacklisted.com.',
'description': u'This is a comment',
}, {
'pattern': 'blacklisted.net.'
}, {
'pattern': 'blacklisted.org.'
}]
pool_fixtures = [
{'name': 'Pool-One',
'description': u'Pool-One description',
'attributes': [{'key': 'scope', 'value': 'public'}],
'ns_records': [{'priority': 1, 'hostname': 'ns1.example.org.'},
{'priority': 2, 'hostname': 'ns2.example.org.'}]},
{'name': 'Pool-Two',
'description': u'Pool-Two description',
'attributes': [{'key': 'scope', 'value': 'public'}],
'ns_records': [{'priority': 1, 'hostname': 'ns1.example.org.'}]},
]
pool_attribute_fixtures = [
{'scope': 'public'},
{'scope': 'private'},
{'scope': 'unknown'}
]
pool_attributes_fixtures = [
{'pool_id': default_pool_id,
'key': 'continent',
'value': 'NA'},
{'pool_id': default_pool_id,
'key': 'scope',
'value': 'public'}
]
pool_manager_status_fixtures = [{
'server_id': '1d7a26e6-e604-4aa0-bbc5-d01081bf1f45',
'status': 'SUCCESS',
'serial_number': 1,
'action': 'CREATE',
}, {
'server_id': '1d7a26e6-e604-4aa0-bbc5-d01081bf1f45',
'status': 'ERROR',
'serial_number': 2,
'action': 'DELETE'
}]
zone_transfers_request_fixtures = [{
"description": "Test Transfer",
}, {
"description": "Test Transfer 2 - with target",
"target_tenant_id": "target_tenant_id"
}]
zone_import_fixtures = [{
'status': 'PENDING',
'zone_id': None,
'message': None,
'task_type': 'IMPORT'
}, {
'status': 'ERROR',
'zone_id': None,
'message': None,
'task_type': 'IMPORT'
}, {
'status': 'COMPLETE',
'zone_id': '6ca6baef-3305-4ad0-a52b-a82df5752b62',
'message': None,
'task_type': 'IMPORT'
}]
def setUp(self):
super(TestCase, self).setUp()
self.CONF = self.useFixture(cfg_fixture.Config(cfg.CONF)).conf
self.messaging_conf = messaging_fixture.ConfFixture(cfg.CONF)
self.messaging_conf.transport_driver = 'fake'
self.messaging_conf.response_timeout = 5
self.useFixture(self.messaging_conf)
self.config(notification_driver='test')
self.useFixture(fixtures.RPCFixture(cfg.CONF))
self.config(
storage_driver='sqlalchemy',
group='service:central'
)
self.config(
auth_strategy='noauth',
group='service:api'
)
# The database fixture needs to be set up here (as opposed to isolated
# in a storage test case) because many tests end up using storage.
REPOSITORY = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', 'storage',
'impl_sqlalchemy',
'migrate_repo'))
self.db_fixture = self.useFixture(
fixtures.DatabaseFixture.get_fixture(
REPOSITORY, manage_database.INIT_VERSION))
if os.getenv('DESIGNATE_SQL_DEBUG', "True").lower() in _TRUE_VALUES:
connection_debug = 50
else:
connection_debug = 0
self.config(
connection=self.db_fixture.url,
connection_debug=connection_debug,
group='storage:sqlalchemy'
)
self._setup_pool_manager_cache()
self.config(network_api='fake')
# "Read" Configuration
self.CONF([], project='designate')
utils.register_plugin_opts()
self.useFixture(fixtures.PolicyFixture())
self.network_api = fixtures.NetworkAPIFixture()
self.useFixture(self.network_api)
self.central_service = self.start_service('central')
self.admin_context = self.get_admin_context()
storage_driver = cfg.CONF['service:central'].storage_driver
self.storage = storage.get_storage(storage_driver)
# Setup the Default Pool with some useful settings
self._setup_default_pool()
def _setup_pool_manager_cache(self):
self.config(
cache_driver='sqlalchemy',
group='service:pool_manager')
repository = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..',
'pool_manager',
'cache',
'impl_sqlalchemy',
'migrate_repo'))
db_fixture = self.useFixture(
fixtures.DatabaseFixture.get_fixture(repository))
if os.getenv('DESIGNATE_SQL_DEBUG', "True").lower() in _TRUE_VALUES:
connection_debug = 50
else:
connection_debug = 0
self.config(
connection=db_fixture.url,
connection_debug=connection_debug,
group='pool_manager_cache:sqlalchemy')
def _setup_default_pool(self):
# Fetch the default pool
pool = self.storage.get_pool(self.admin_context, default_pool_id)
# Add a NS record to it
pool.ns_records.append(
objects.PoolNsRecord(priority=0, hostname='ns1.example.org.'))
# Save the default pool
self.storage.update_pool(self.admin_context, pool)
# Config Methods
def config(self, **kwargs):
group = kwargs.pop('group', None)
for k, v in kwargs.items():
cfg.CONF.set_override(k, v, group)
def policy(self, rules, default_rule='allow', overwrite=True):
# Inject an allow and deny rule
rules['allow'] = '@'
rules['deny'] = '!'
# Set the rules
policy.set_rules(rules, default_rule, overwrite)
def start_service(self, svc_name, *args, **kw):
"""
Convenience method for starting a service!
"""
fixture = fixtures.ServiceFixture(svc_name, *args, **kw)
self.useFixture(fixture)
return fixture.svc
# Context Methods
def get_context(self, **kwargs):
return DesignateContext(**kwargs)
def get_admin_context(self):
return DesignateContext.get_admin_context(
tenant=utils.generate_uuid(),
user=utils.generate_uuid())
# Fixture methods
def get_quota_fixture(self, fixture=0, values=None):
values = values or {}
_values = copy.copy(self.quota_fixtures[fixture])
_values.update(values)
return _values
def get_server_fixture(self, fixture=0, values=None):
values = values or {}
_values = copy.copy(self.server_fixtures[fixture])
_values.update(values)
return _values
def get_tld_fixture(self, fixture=0, values=None):
values = values or {}
_values = copy.copy(self.tld_fixtures[fixture])
_values.update(values)
return _values
def get_default_tld_fixture(self, fixture=0, values=None):
values = values or {}
_values = copy.copy(self.default_tld_fixtures[fixture])
_values.update(values)
return _values
def get_tsigkey_fixture(self, fixture=0, values=None):
values = values or {}
_values = copy.copy(self.tsigkey_fixtures[fixture])
_values.update(values)
return _values
def get_zone_fixture(self, zone_type=None, fixture=0, values=None):
zone_type = zone_type or 'PRIMARY'
_values = copy.copy(self.zone_fixtures[zone_type][fixture])
if values:
_values.update(values)
return _values
def get_recordset_fixture(self, zone_name, type='A', fixture=0,
values=None):
values = values or {}
_values = copy.copy(self.recordset_fixtures[type][fixture])
_values.update(values)
try:
_values['name'] = _values['name'] % zone_name
except TypeError:
pass
return _values
def get_record_fixture(self, recordset_type, fixture=0, values=None):
values = values or {}
_values = copy.copy(self.record_fixtures[recordset_type][fixture])
_values.update(values)
return _values
def get_ptr_fixture(self, fixture=0, values=None):
values = values or {}
_values = copy.copy(self.ptr_fixtures[fixture])
_values.update(values)
return objects.FloatingIP().from_dict(_values)
def get_zonefile_fixture(self, variant=None):
if variant is None:
f = 'example.com.zone'
else:
f = '%s_example.com.zone' % variant
path = os.path.join(resources.path, 'zonefiles', f)
with open(path) as zonefile:
return zonefile.read()
def get_blacklist_fixture(self, fixture=0, values=None):
values = values or {}
_values = copy.copy(self.blacklist_fixtures[fixture])
_values.update(values)
return _values
def get_pool_fixture(self, fixture=0, values=None):
values = values or {}
_values = copy.copy(self.pool_fixtures[fixture])
_values.update(values)
return _values
def get_pool_attribute_fixture(self, fixture=0, values=None):
values = values or {}
_values = copy.copy(self.pool_attribute_fixtures[fixture])
_values.update(values)
return _values
def get_pool_attributes_fixture(self, fixture=0, values=None):
# TODO(kiall): Remove this method, in favor of the
# get_pool_attribute_fixture method above.
values = values or {}
_values = copy.copy(self.pool_attributes_fixtures[fixture])
_values.update(values)
return _values
def get_pool_manager_status_fixture(self, fixture=0, values=None):
values = values or {}
_values = copy.copy(self.pool_manager_status_fixtures[fixture])
_values.update(values)
return _values
def get_zone_transfer_request_fixture(self, fixture=0, values=None):
values = values or {}
_values = copy.copy(self.zone_transfers_request_fixtures[fixture])
_values.update(values)
return _values
def get_zone_transfer_accept_fixture(self, fixture=0, values=None):
values = values or {}
_values = copy.copy(self.zone_transfers_accept_fixtures[fixture])
_values.update(values)
return _values
def get_zone_import_fixture(self, fixture=0, values=None):
values = values or {}
_values = copy.copy(self.zone_import_fixtures[fixture])
_values.update(values)
return _values
def create_tld(self, **kwargs):
context = kwargs.pop('context', self.admin_context)
fixture = kwargs.pop('fixture', 0)
values = self.get_tld_fixture(fixture=fixture, values=kwargs)
return self.central_service.create_tld(
context, objects.Tld.from_dict(values))
def create_default_tld(self, **kwargs):
context = kwargs.pop('context', self.admin_context)
fixture = kwargs.pop('fixture', 0)
values = self.get_default_tld_fixture(fixture=fixture, values=kwargs)
return self.central_service.create_tld(
context, objects.Tld.from_dict(values))
def create_default_tlds(self):
for index in range(len(self.default_tld_fixtures)):
try:
self.create_default_tld(fixture=index)
except exceptions.DuplicateTld:
pass
def create_tsigkey(self, **kwargs):
context = kwargs.pop('context', self.admin_context)
fixture = kwargs.pop('fixture', 0)
values = self.get_tsigkey_fixture(fixture=fixture, values=kwargs)
return self.central_service.create_tsigkey(
context, objects.TsigKey.from_dict(values))
def create_zone(self, **kwargs):
context = kwargs.pop('context', self.admin_context)
fixture = kwargs.pop('fixture', 0)
zone_type = kwargs.pop('type', None)
values = self.get_zone_fixture(zone_type=zone_type,
fixture=fixture, values=kwargs)
if 'tenant_id' not in values:
values['tenant_id'] = context.tenant
return self.central_service.create_zone(
context, objects.Zone.from_dict(values))
def create_recordset(self, zone, type='A', increment_serial=True,
**kwargs):
context = kwargs.pop('context', self.admin_context)
fixture = kwargs.pop('fixture', 0)
values = self.get_recordset_fixture(zone['name'], type=type,
fixture=fixture,
values=kwargs)
return self.central_service.create_recordset(
context, zone['id'], objects.RecordSet.from_dict(values),
increment_serial=increment_serial)
def create_record(self, zone, recordset, increment_serial=True,
**kwargs):
context = kwargs.pop('context', self.admin_context)
fixture = kwargs.pop('fixture', 0)
values = self.get_record_fixture(recordset['type'], fixture=fixture,
values=kwargs)
return self.central_service.create_record(
context, zone['id'], recordset['id'],
objects.Record.from_dict(values),
increment_serial=increment_serial)
def create_blacklist(self, **kwargs):
context = kwargs.pop('context', self.admin_context)
fixture = kwargs.pop('fixture', 0)
values = self.get_blacklist_fixture(fixture=fixture, values=kwargs)
return self.central_service.create_blacklist(
context, objects.Blacklist.from_dict(values))
def create_pool(self, **kwargs):
context = kwargs.pop('context', self.admin_context)
fixture = kwargs.pop('fixture', 0)
values = self.get_pool_fixture(fixture=fixture, values=kwargs)
if 'tenant_id' not in values:
values['tenant_id'] = context.tenant
return self.central_service.create_pool(
context, objects.Pool.from_dict(values))
def create_pool_attribute(self, **kwargs):
# TODO(kiall): This method should require a "pool" be passed in,
# rather than hardcoding the default pool ID into the
# fixture.
context = kwargs.pop('context', self.admin_context)
fixture = kwargs.pop('fixture', 0)
values = self.get_pool_attributes_fixture(fixture=fixture,
values=kwargs)
# TODO(kiall): We shouldn't be assuming the default_pool_id here
return self.storage.create_pool_attribute(
context, default_pool_id,
objects.PoolAttribute.from_dict(values))
def create_zone_transfer_request(self, zone, **kwargs):
context = kwargs.pop('context', self.admin_context)
fixture = kwargs.pop('fixture', 0)
values = self.get_zone_transfer_request_fixture(
fixture=fixture, values=kwargs)
if 'zone_id' not in values:
values['zone_id'] = zone.id
return self.central_service.create_zone_transfer_request(
context, objects.ZoneTransferRequest.from_dict(values))
def create_zone_transfer_accept(self, zone_transfer_request, **kwargs):
context = kwargs.pop('context', self.admin_context)
values = {}
if 'tenant_id' not in values:
values['tenant_id'] = context.tenant
if 'zone_transfer_request_id' not in values:
values['zone_transfer_request_id'] = zone_transfer_request.id
if 'zone_id' not in values:
values['zone_id'] = zone_transfer_request.zone_id
if 'key' not in values:
values['key'] = zone_transfer_request.key
return self.central_service.create_zone_transfer_accept(
context, objects.ZoneTransferAccept.from_dict(values))
def create_zone_import(self, **kwargs):
context = kwargs.pop('context', self.admin_context)
fixture = kwargs.pop('fixture', 0)
zone_import = self.get_zone_import_fixture(fixture=fixture,
values=kwargs)
return self.storage.create_zone_import(
context, objects.ZoneImport.from_dict(zone_import))
def wait_for_import(self, zone_import_id, errorok=False):
"""
Zone imports spawn a thread to parse the zone file and
insert the data. This waits for this process before continuing
"""
attempts = 0
while attempts < 20:
# Give the import a half second to complete
time.sleep(.5)
# Retrieve it, and ensure it's the same
zone_import = self.central_service.get_zone_import(
self.admin_context, zone_import_id)
# If the import is done, we're done
if zone_import.status == 'COMPLETE':
break
# If errors are allowed, just make sure that something completed
if errorok:
if zone_import.status != 'PENDING':
break
attempts += 1
if not errorok:
self.assertEqual('COMPLETE', zone_import.status)
def _ensure_interface(self, interface, implementation):
for name in interface.__abstractmethods__:
in_arginfo = inspect.getargspec(getattr(interface, name))
im_arginfo = inspect.getargspec(getattr(implementation, name))
self.assertEqual(
in_arginfo, im_arginfo,
"Method Signature for '%s' mismatched" % name)
def wait_for_condition(self, condition, interval=0.3, timeout=2):
"""Wait for a condition to be true or raise an exception after
`timeout` seconds.
Poll every `interval` seconds. `condition` can be a callable.
(Caution: some mocks behave both as values and callables.)
"""
t_max = time.time() + timeout
while time.time() < t_max:
if callable(condition):
result = condition()
else:
result = condition
if result:
return result
time.sleep(interval)
raise TestTimeoutError
def _skip_decorator(func):
@functools.wraps(func)
def skip_if_not_implemented(*args, **kwargs):
try:
return func(*args, **kwargs)
except NotImplementedError as e:
raise testcase.TestSkipped(str(e))
except Exception as e:
if 'not implemented' in str(e):
raise testcase.TestSkipped(str(e))
raise
return skip_if_not_implemented
class SkipNotImplementedMeta(type):
def __new__(cls, name, bases, local):
for attr in local:
value = local[attr]
if callable(value) and (
attr.startswith('test_') or attr == 'setUp'):
local[attr] = _skip_decorator(value)
return type.__new__(cls, name, bases, local)
| {
"content_hash": "cf877f70594f6a3d0e436d6237e35baa",
"timestamp": "",
"source": "github",
"line_count": 761,
"max_line_length": 78,
"avg_line_length": 31.921156373193167,
"alnum_prop": 0.5521570887534991,
"repo_name": "grahamhayes/designate",
"id": "68762e25bb2cbe5901134bb27621979e3b7a9643",
"size": "24919",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "designate/tests/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2292184"
},
{
"name": "Ruby",
"bytes": "3933"
},
{
"name": "Shell",
"bytes": "25961"
}
],
"symlink_target": ""
} |
import re
# syntax of poly files;
# name
# number
# indented list of longitude, latitude
# end
# possibly another number
# another end
class PolyfileParser(object):
newline = re.compile(r'\s*\n')
whitespace = re.compile(r'\s+')
end = re.compile(r'END')
word = re.compile(r'\w+')
number = re.compile(r'-?\d\.\d+E[+-]\d+')
identifier = re.compile(r'!?\d+')
class Error(Exception):
pass
def parse(self, buf):
self.buf = buf
self.position = 0
name = self.read(self.word)
sections = {}
self.read(self.newline)
while not self.peek(self.end):
# read section
identifier = self.read(self.identifier)
sequence = []
self.read(self.newline)
while not self.peek(self.end):
# read sequence
self.read(self.whitespace)
longitude = float(self.read(self.number))
self.read(self.whitespace)
latitude = float(self.read(self.number))
coordinates = (longitude, latitude)
sequence.append(coordinates)
self.read(self.newline)
self.read(self.end)
self.read(self.newline)
sections[identifier] = sequence
self.read(self.end)
if self.peek(self.newline):
self.read(self.newline)
return name, sections
def peek(self, expect):
return expect.match(self.buf, self.position) is not None
def read(self, expect):
match = expect.match(self.buf, self.position)
if match is None:
raise self.Error("%s was not matched (got %s...)" % (expect.pattern, self.buf[self.position:self.position+10]))
self.position = match.end()
return match.group()
| {
"content_hash": "8d5782394d47bafa9c9575f5a8cc99cf",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 123,
"avg_line_length": 31.862068965517242,
"alnum_prop": 0.5551948051948052,
"repo_name": "bdw/GridKit",
"id": "5ee5bd2b24996f563ab8161fb2ef747fab9d7b97",
"size": "1848",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "util/polyfile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PLpgSQL",
"bytes": "103536"
},
{
"name": "Python",
"bytes": "49777"
},
{
"name": "R",
"bytes": "790"
},
{
"name": "Shell",
"bytes": "4216"
}
],
"symlink_target": ""
} |
"""Dataset snapshot and related functionality."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
COMPRESSION_GZIP = "GZIP"
COMPRESSION_SNAPPY = "SNAPPY"
COMPRESSION_NONE = None
class _SnapshotDataset(dataset_ops.UnaryUnchangedStructureDataset):
"""A Dataset that captures a snapshot or reads from a snapshot."""
def __init__(self,
input_dataset,
path,
compression=None,
reader_path_prefix=None,
writer_path_prefix=None,
shard_size_bytes=None,
pending_snapshot_expiry_seconds=None,
num_reader_threads=None,
reader_buffer_size=None,
num_writer_threads=None,
writer_buffer_size=None,
shuffle_on_read=None,
shuffle_seed=None,
mode=None,
snapshot_name=None):
self._compression = compression if compression is not None else ""
self._reader_path_prefix = (
reader_path_prefix if reader_path_prefix is not None else "")
self._writer_path_prefix = (
writer_path_prefix if writer_path_prefix is not None else "")
self._shard_size_bytes = (
shard_size_bytes if shard_size_bytes is not None else -1)
self._pending_snapshot_expiry_seconds = (
pending_snapshot_expiry_seconds
if pending_snapshot_expiry_seconds is not None else -1)
self._num_reader_threads = (
num_reader_threads if num_reader_threads is not None else -1)
self._reader_buffer_size = (
reader_buffer_size if reader_buffer_size is not None else -1)
self._num_writer_threads = (
num_writer_threads if num_writer_threads is not None else -1)
self._writer_buffer_size = (
writer_buffer_size if writer_buffer_size is not None else -1)
self._shuffle_on_read = (
shuffle_on_read if shuffle_on_read is not None else False)
self._mode = (mode if mode is not None else "auto")
self._snapshot_name = (snapshot_name if snapshot_name is not None else "")
self._seed, self._seed2 = random_seed.get_seed(shuffle_seed)
self._input_dataset = input_dataset
self._path = ops.convert_to_tensor(path, dtype=dtypes.string, name="path")
variant_tensor = ged_ops.snapshot_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
path=self._path,
compression=self._compression,
reader_path_prefix=self._reader_path_prefix,
writer_path_prefix=self._writer_path_prefix,
shard_size_bytes=self._shard_size_bytes,
pending_snapshot_expiry_seconds=self._pending_snapshot_expiry_seconds,
num_reader_threads=self._num_reader_threads,
reader_buffer_size=self._reader_buffer_size,
num_writer_threads=self._num_writer_threads,
writer_buffer_size=self._writer_buffer_size,
shuffle_on_read=self._shuffle_on_read,
seed=self._seed,
seed2=self._seed2,
mode=self._mode,
snapshot_name=self._snapshot_name,
**self._flat_structure)
super(_SnapshotDataset, self).__init__(input_dataset, variant_tensor)
def snapshot(path,
compression=None,
reader_path_prefix=None,
writer_path_prefix=None,
shard_size_bytes=None,
pending_snapshot_expiry_seconds=None,
num_reader_threads=None,
reader_buffer_size=None,
num_writer_threads=None,
writer_buffer_size=None,
shuffle_on_read=None,
shuffle_seed=None,
mode=None,
snapshot_name=None):
"""Writes to/reads from a snapshot of a dataset.
This function attempts to determine whether a valid snapshot exists at the
`path`, and reads from the snapshot if so. If not, it will run the
preprocessing pipeline as usual, and write out a snapshot of the data
processed for future use.
Args:
path: A directory where we want to save our snapshots and/or read from a
previously saved snapshot.
compression: The type of compression to apply to the Dataset. Currently
supports "GZIP" or None. Defaults to None (no compression).
reader_path_prefix: A prefix to add to the path when reading from snapshots.
Defaults to None.
writer_path_prefix: A prefix to add to the path when writing to snapshots.
Defaults to None.
shard_size_bytes: The size of each shard to be written by the snapshot
dataset op. Defaults to 10 GiB.
pending_snapshot_expiry_seconds: How long to wait (in seconds) before the
snapshot op considers a previously unfinished snapshot to be stale.
num_reader_threads: Number of threads to parallelize reading from snapshot.
Especially useful if compression is turned on since the decompression
operation tends to be intensive. Defaults to 1. If > 1, then this might
introduce non-determinism i.e. the order in which the elements are read
from the snapshot are different from the order they're written.
reader_buffer_size: Maximum number of elements we can prefetch reading from
the snapshot. Defaults to 1. Increasing this might improve performance but
will increase memory consumption.
num_writer_threads: Number of threads to parallelize writing from snapshot.
We'll open up `num_writer_threads` files and write to them in parallel.
Especially useful if compression is turned on since the compression
operation tends to be intensive. Defaults to 1. If > 1, then this might
introduce non-determinism i.e. the order in which the elements are read
from the upstream iterator are different from the order they're written.
writer_buffer_size: Maximum number of pipeline elements to fill up the
buffer before writing them out using `num_writer_threads`.
shuffle_on_read: If this is True, then the order in which examples are
produced when reading from a snapshot will be random. Defaults to False.
shuffle_seed: Optional. If shuffle_seed is set, the random number generator
used for shuffling (when shuffle_on_read is turned on) is seeded by the
given seed. Otherwise, it is seeded by a random seed that differs for
every run.
mode: The mode at which snapshot should operate. Valid options are "auto",
"read", "write", and "passthrough". The default mode is "auto", where the
snapshot op will automatically determine what mode to operate in.
snapshot_name: If set, use the supplied string as a named snapshot name
instead of introspecting the data pipeline and automatically generating a
unique identifier for the snapshot.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return _SnapshotDataset(
input_dataset=dataset,
path=path,
compression=compression,
reader_path_prefix=reader_path_prefix,
writer_path_prefix=writer_path_prefix,
shard_size_bytes=shard_size_bytes,
pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds,
num_reader_threads=num_reader_threads,
reader_buffer_size=reader_buffer_size,
num_writer_threads=num_writer_threads,
writer_buffer_size=writer_buffer_size,
shuffle_on_read=shuffle_on_read,
shuffle_seed=shuffle_seed,
mode=mode,
snapshot_name=snapshot_name)
return _apply_fn
| {
"content_hash": "b8b4c24581c447d14e343d2a1695dde8",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 80,
"avg_line_length": 45.02873563218391,
"alnum_prop": 0.6816847479259732,
"repo_name": "gunan/tensorflow",
"id": "942aec712c3ad926c802046b936c590c5b6c8376",
"size": "8524",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/data/experimental/ops/snapshot.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5003"
},
{
"name": "Batchfile",
"bytes": "45924"
},
{
"name": "C",
"bytes": "774953"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "77908225"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "104215"
},
{
"name": "Go",
"bytes": "1841471"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "962443"
},
{
"name": "Jupyter Notebook",
"bytes": "556650"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1479029"
},
{
"name": "Makefile",
"bytes": "58603"
},
{
"name": "Objective-C",
"bytes": "104667"
},
{
"name": "Objective-C++",
"bytes": "297830"
},
{
"name": "PHP",
"bytes": "23994"
},
{
"name": "Pascal",
"bytes": "3739"
},
{
"name": "Pawn",
"bytes": "17039"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "39476740"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Roff",
"bytes": "2472"
},
{
"name": "Ruby",
"bytes": "7459"
},
{
"name": "Shell",
"bytes": "650007"
},
{
"name": "Smarty",
"bytes": "34649"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
import unittest
from utils import CensysTestCase
from censys.certificates import CensysCertificates
class CensysCertificatesTests(CensysTestCase):
CERT_SHA = "fce621c0dc1c666d03d660472f636ce91e66e96460545f0da7eb1a24873e2f70"
@classmethod
def setUpClass(cls):
cls._api = CensysCertificates()
def test_view(self):
res = self._api.view(self.CERT_SHA)
self.assertIsInstance(res, dict)
self.assertEqual(res["parsed"]["fingerprint_sha256"], self.CERT_SHA)
def test_search(self):
res = list(
self._api.search(
self.CERT_SHA,
fields=["parsed.subject_dn", "parsed.fingerprint_sha256"],
max_records=1,
)
)
self.assertEqual(len(res), 1)
self.assertIn("parsed.subject_dn", res[0])
self.assertIn("parsed.fingerprint_sha256", res[0])
def test_bulk(self):
res = self._api.bulk([self.CERT_SHA])
self.assertEqual(len(res.keys()), 1)
self.assertIn(self.CERT_SHA, res)
def test_report(self):
res = self._api.report(
"*", "parsed.issuer.organizational_unit", buckets=10
)
results = res.get("results")
self.assertEqual(len(results), 10)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "8d7b6b8b293513d61c6e47a3fa6ca5b4",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 81,
"avg_line_length": 27.97872340425532,
"alnum_prop": 0.6083650190114068,
"repo_name": "Censys/censys-python",
"id": "13dccd31d39725c0378f51e51785a9cf8f1dbef6",
"size": "1315",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_certificates.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "31496"
}
],
"symlink_target": ""
} |
import django.contrib.auth.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("users", "0002_auto_20160606_1320")]
operations = [
migrations.AlterField(
model_name="user",
name="username",
field=models.CharField(
error_messages={"unique": "A user with that username already exists."},
help_text="Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.",
max_length=150,
unique=True,
validators=[django.contrib.auth.validators.ASCIIUsernameValidator()],
verbose_name="username",
),
)
]
| {
"content_hash": "47db446121b7a9e632cde226578c49a9",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 99,
"avg_line_length": 33.31818181818182,
"alnum_prop": 0.5743519781718963,
"repo_name": "mimischi/django-clock",
"id": "5ba2079d8e433875ac23fb2442133f02fb74ec40",
"size": "808",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "clock/users/migrations/0003_auto_20170513_1059.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11956"
},
{
"name": "HTML",
"bytes": "83017"
},
{
"name": "JavaScript",
"bytes": "4192"
},
{
"name": "Makefile",
"bytes": "787"
},
{
"name": "Python",
"bytes": "184513"
},
{
"name": "Shell",
"bytes": "684"
}
],
"symlink_target": ""
} |
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.client import Client
# lfc imports
import lfc.utils.registration
from lfc.utils import import_module
from lfc.models import Application
from lfc.models import Portal
# lfc_page imports
from lfc_page.models import Page
class CopyTestCase(TestCase):
"""
"""
fixtures = ["superuser.xml"]
def setUp(self):
from lfc.utils.initialize import initialize
initialize()
import_module("lfc_page").install()
try:
Application.objects.create(name="lfc_page")
except Application.DoesNotExist:
pass
Portal.objects.create(id=1)
self.p1 = Page.objects.create(id=1, title="Page 1", slug="page-1")
self.p11 = Page.objects.create(id=11, title="Page 1-1", slug="page-1-1", parent=self.p1)
self.p2 = Page.objects.create(id=2, title="Page 2", slug="page-2")
self.client = Client()
self.client.login(username="admin", password="admin")
def test_cut(self):
"""Tests general cut and paste of objects.
"""
# P1 has no parent
p1 = lfc.utils.get_content_object(pk=1)
self.assertEqual(self.p1.parent, None)
# Cut
self.client.get(reverse("lfc_cut", kwargs={"id": 1}))
# Paste
result = self.client.get(reverse("lfc_paste", kwargs={"id": 2}))
# P1 has now p2 as parent
p1 = lfc.utils.get_content_object(pk=1)
self.assertEqual(p1.parent.id, 2)
# Portal has only p2 as child
portal = lfc.utils.get_portal()
self.assertEqual(len(portal.get_children()), 1)
self.assertEqual(portal.get_children()[0].id, 2)
def test_copy(self):
"""Tests general copy and paste of objects.
"""
p1 = lfc.utils.get_content_object(pk=1)
self.assertEqual(self.p1.parent, None)
# Copy
self.client.get(reverse("lfc_copy", kwargs={"id": 1}))
# Paste
self.client.get(reverse("lfc_paste", kwargs={"id": 2}))
# p2 has now a child
self.assertEqual(len(self.p2.children.all()), 1)
self.assertEqual(self.p2.children.all()[0].title, "Page 1")
# Paste again
result = self.client.get(reverse("lfc_paste", kwargs={"id": 2}))
# p2 has now a two children
self.assertEqual(len(self.p2.children.all()), 2)
self.assertEqual(self.p2.children.all()[0].slug, "page-1")
self.assertEqual(self.p2.children.all()[1].slug, "page-1-1")
# The portal has still both objects
portal = lfc.utils.get_portal()
self.assertEqual(len(portal.get_children()), 2)
self.assertEqual(portal.get_children()[0].id, 1)
self.assertEqual(portal.get_children()[1].id, 2)
def test_paste_dissallowed_type(self):
"""Tests to copy and paste an dissallowed content type.
"""
ctr = lfc.utils.registration.get_info("page")
ctr.subtypes = []
ctr.save()
# Cut
self.client.get(reverse("lfc_cut", kwargs={"id": 1}))
# Paste
self.client.get(reverse("lfc_paste", kwargs={"id": 2}))
# p2 has no children
self.assertEqual(len(self.p2.children.all()), 0)
# Cut
self.client.get(reverse("lfc_cut", kwargs={"id": 1}))
# Paste
self.client.get(reverse("lfc_paste", kwargs={"id": 2}))
# p2 has no children
self.assertEqual(len(self.p2.children.all()), 0)
def test_cut_and_paste_to_itself(self):
"""Cut and paste to itself is dissallowed.
"""
# Cut
self.client.get(reverse("lfc_cut", kwargs={"id": 2}))
# Paste to itself
self.client.get(reverse("lfc_paste", kwargs={"id": 2}))
# P2 has children
self.assertEqual(len(self.p2.children.all()), 0)
def test_cut_and_paste_to_descendant(self):
"""Cut and paste to descendant is dissallowed.
"""
# Cut
self.client.get(reverse("lfc_cut", kwargs={"id": 1}))
# Paste to descendant
self.client.get(reverse("lfc_paste", kwargs={"id": 11}))
# Portal has still 2 children
portal = lfc.utils.get_portal()
self.assertEqual(len(portal.get_children()), 2)
# P1 has still one children
self.assertEqual(len(self.p1.get_children()), 1)
def test_copy_and_paste_to_itself(self):
"""Copy and paste to itself is disallowed.
"""
# Copy
self.client.get(reverse("lfc_copy", kwargs={"id": 2}))
# Paste to itself is allowed
self.client.get(reverse("lfc_paste", kwargs={"id": 2}))
# P2 has children
self.assertEqual(len(self.p2.children.all()), 1)
def test_copy_and_paste_to_descendant(self):
"""Cut and paste to descendant is dissallowed.
"""
# Cut
self.client.get(reverse("lfc_copy", kwargs={"id": 1}))
# Paste to descendant
self.client.get(reverse("lfc_paste", kwargs={"id": 11}))
# Portal has still 2 children
portal = lfc.utils.get_portal()
self.assertEqual(len(portal.get_children()), 2)
# P1 has still one children
self.assertEqual(len(self.p1.get_children()), 1)
| {
"content_hash": "b2cef8c7a76feaddfb2d5750d2149b7f",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 96,
"avg_line_length": 31.319526627218934,
"alnum_prop": 0.5909692046098621,
"repo_name": "diefenbach/django-lfc",
"id": "bb8a8a529a44905178debb3350f280bbb7048051",
"size": "5310",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lfc/tests/copy_tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "11690"
},
{
"name": "HTML",
"bytes": "157911"
},
{
"name": "JavaScript",
"bytes": "25129"
},
{
"name": "Python",
"bytes": "416191"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class CategoryarrayValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(
self, plotly_name="categoryarray", parent_name="layout.scene.zaxis", **kwargs
):
super(CategoryarrayValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
**kwargs,
)
| {
"content_hash": "33292f5aade1587e38e0e5aaefa6148b",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 85,
"avg_line_length": 34.07692307692308,
"alnum_prop": 0.6252821670428894,
"repo_name": "plotly/plotly.py",
"id": "e422240b9c831645f39185fc366187c1808346a3",
"size": "443",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/layout/scene/zaxis/_categoryarray.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class CautoValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="cauto", parent_name="barpolar.marker.line", **kwargs
):
super(CautoValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {}),
role=kwargs.pop("role", "info"),
**kwargs
)
| {
"content_hash": "4ac59c6bef89835ec0e2c4ab04aa91c6",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 79,
"avg_line_length": 34.8,
"alnum_prop": 0.5881226053639846,
"repo_name": "plotly/python-api",
"id": "476270cabef641808d8ef51ea2d56006ebb2b4b7",
"size": "522",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/barpolar/marker/line/_cauto.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
def get_int(msg):
while True:
try:
i = int(input(msg))
return i
except ValueError as err:
print(err)
#
print("type integers, each followed by <enter>, or ^D or ^Z to finish")
#
total = 0
count = 0
#
while True:
try:
number = get_int("enter an integer: ")
total += number
count += 1
except EOFError:
break
#
if count:
print("count = ", count, ", total = ", total, ", mean = ", total/count)
#
exit(0);
| {
"content_hash": "cf759252eeeab82e9ed12f91b06ccf22",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 75,
"avg_line_length": 19.26923076923077,
"alnum_prop": 0.5169660678642715,
"repo_name": "ombt/analytics",
"id": "85f2ae2450c9f3cb41842722d83c2b944f75bbc7",
"size": "854",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "books/programming_in_python_3/mine/ch1/ex5.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "203"
},
{
"name": "C#",
"bytes": "167432"
},
{
"name": "CSS",
"bytes": "27541"
},
{
"name": "Dockerfile",
"bytes": "3588"
},
{
"name": "HTML",
"bytes": "1166436"
},
{
"name": "JavaScript",
"bytes": "1217784"
},
{
"name": "Makefile",
"bytes": "2310"
},
{
"name": "PLSQL",
"bytes": "10668"
},
{
"name": "PLpgSQL",
"bytes": "55402"
},
{
"name": "Perl",
"bytes": "33103233"
},
{
"name": "Perl 6",
"bytes": "3349"
},
{
"name": "Python",
"bytes": "748953"
},
{
"name": "R",
"bytes": "1326030"
},
{
"name": "Rebol",
"bytes": "1366"
},
{
"name": "SQLPL",
"bytes": "424169"
},
{
"name": "Shell",
"bytes": "340414"
}
],
"symlink_target": ""
} |
from ironic_lib import metrics_utils
from ironic_lib import utils as il_utils
from oslo_log import log
from oslo_utils import units
import six.moves.urllib_parse as urlparse
from ironic.common import dhcp_factory
from ironic.common import exception
from ironic.common.glance_service import service_utils
from ironic.common.i18n import _, _LI, _LW
from ironic.common import images
from ironic.common import raid
from ironic.common import states
from ironic.common import utils
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.conf import CONF
from ironic.drivers import base
from ironic.drivers.modules import agent_base_vendor
from ironic.drivers.modules import deploy_utils
LOG = log.getLogger(__name__)
METRICS = metrics_utils.get_metrics_logger(__name__)
REQUIRED_PROPERTIES = {
'deploy_kernel': _('UUID (from Glance) of the deployment kernel. '
'Required.'),
'deploy_ramdisk': _('UUID (from Glance) of the ramdisk with agent that is '
'used at deploy time. Required.'),
}
OPTIONAL_PROPERTIES = {
'image_http_proxy': _('URL of a proxy server for HTTP connections. '
'Optional.'),
'image_https_proxy': _('URL of a proxy server for HTTPS connections. '
'Optional.'),
'image_no_proxy': _('A comma-separated list of host names, IP addresses '
'and domain names (with optional :port) that will be '
'excluded from proxying. To denote a doman name, use '
'a dot to prefix the domain name. This value will be '
'ignored if ``image_http_proxy`` and '
'``image_https_proxy`` are not specified. Optional.'),
}
COMMON_PROPERTIES = REQUIRED_PROPERTIES.copy()
COMMON_PROPERTIES.update(OPTIONAL_PROPERTIES)
COMMON_PROPERTIES.update(agent_base_vendor.VENDOR_PROPERTIES)
PARTITION_IMAGE_LABELS = ('kernel', 'ramdisk', 'root_gb', 'root_mb', 'swap_mb',
'ephemeral_mb', 'ephemeral_format', 'configdrive',
'preserve_ephemeral', 'image_type',
'deploy_boot_mode')
@METRICS.timer('check_image_size')
def check_image_size(task, image_source):
"""Check if the requested image is larger than the ram size.
:param task: a TaskManager instance containing the node to act on.
:param image_source: href of the image.
:raises: InvalidParameterValue if size of the image is greater than
the available ram size.
"""
node = task.node
properties = node.properties
# skip check if 'memory_mb' is not defined
if 'memory_mb' not in properties:
LOG.warning(_LW('Skip the image size check as memory_mb is not '
'defined in properties on node %s.'), node.uuid)
return
image_show = images.image_show(task.context, image_source)
if CONF.agent.stream_raw_images and image_show.get('disk_format') == 'raw':
LOG.debug('Skip the image size check since the image is going to be '
'streamed directly onto the disk for node %s', node.uuid)
return
memory_size = int(properties.get('memory_mb'))
image_size = int(image_show['size'])
reserved_size = CONF.agent.memory_consumed_by_agent
if (image_size + (reserved_size * units.Mi)) > (memory_size * units.Mi):
msg = (_('Memory size is too small for requested image, if it is '
'less than (image size + reserved RAM size), will break '
'the IPA deployments. Image size: %(image_size)d MiB, '
'Memory size: %(memory_size)d MiB, Reserved size: '
'%(reserved_size)d MiB.')
% {'image_size': image_size / units.Mi,
'memory_size': memory_size,
'reserved_size': reserved_size})
raise exception.InvalidParameterValue(msg)
@METRICS.timer('validate_image_proxies')
def validate_image_proxies(node):
"""Check that the provided proxy parameters are valid.
:param node: an Ironic node.
:raises: InvalidParameterValue if any of the provided proxy parameters are
incorrect.
"""
invalid_proxies = {}
for scheme in ('http', 'https'):
proxy_param = 'image_%s_proxy' % scheme
proxy = node.driver_info.get(proxy_param)
if proxy:
chunks = urlparse.urlparse(proxy)
# NOTE(vdrok) If no scheme specified, this is still a valid
# proxy address. It is also possible for a proxy to have a
# scheme different from the one specified in the image URL,
# e.g. it is possible to use https:// proxy for downloading
# http:// image.
if chunks.scheme not in ('', 'http', 'https'):
invalid_proxies[proxy_param] = proxy
msg = ''
if invalid_proxies:
msg += _("Proxy URL should either have HTTP(S) scheme "
"or no scheme at all, the following URLs are "
"invalid: %s.") % invalid_proxies
no_proxy = node.driver_info.get('image_no_proxy')
if no_proxy is not None and not utils.is_valid_no_proxy(no_proxy):
msg += _(
"image_no_proxy should be a list of host names, IP addresses "
"or domain names to exclude from proxying, the specified list "
"%s is incorrect. To denote a domain name, prefix it with a dot "
"(instead of e.g. '.*').") % no_proxy
if msg:
raise exception.InvalidParameterValue(msg)
class AgentDeployMixin(agent_base_vendor.AgentDeployMixin):
@METRICS.timer('AgentDeployMixin.deploy_has_started')
def deploy_has_started(self, task):
commands = self._client.get_commands_status(task.node)
for command in commands:
if command['command_name'] == 'prepare_image':
# deploy did start at some point
return True
return False
@METRICS.timer('AgentDeployMixin.deploy_is_done')
def deploy_is_done(self, task):
commands = self._client.get_commands_status(task.node)
if not commands:
return False
last_command = commands[-1]
if last_command['command_name'] != 'prepare_image':
# catches race condition where prepare_image is still processing
# so deploy hasn't started yet
return False
if last_command['command_status'] != 'RUNNING':
return True
return False
@METRICS.timer('AgentDeployMixin.continue_deploy')
@task_manager.require_exclusive_lock
def continue_deploy(self, task):
task.process_event('resume')
node = task.node
image_source = node.instance_info.get('image_source')
LOG.debug('Continuing deploy for node %(node)s with image %(img)s',
{'node': node.uuid, 'img': image_source})
image_info = {
'id': image_source.split('/')[-1],
'urls': [node.instance_info['image_url']],
'checksum': node.instance_info['image_checksum'],
# NOTE(comstud): Older versions of ironic do not set
# 'disk_format' nor 'container_format', so we use .get()
# to maintain backwards compatibility in case code was
# upgraded in the middle of a build request.
'disk_format': node.instance_info.get('image_disk_format'),
'container_format': node.instance_info.get(
'image_container_format'),
'stream_raw_images': CONF.agent.stream_raw_images,
}
proxies = {}
for scheme in ('http', 'https'):
proxy_param = 'image_%s_proxy' % scheme
proxy = node.driver_info.get(proxy_param)
if proxy:
proxies[scheme] = proxy
if proxies:
image_info['proxies'] = proxies
no_proxy = node.driver_info.get('image_no_proxy')
if no_proxy is not None:
image_info['no_proxy'] = no_proxy
image_info['node_uuid'] = node.uuid
iwdi = node.driver_internal_info.get('is_whole_disk_image')
if not iwdi:
for label in PARTITION_IMAGE_LABELS:
image_info[label] = node.instance_info.get(label)
boot_option = deploy_utils.get_boot_option(node)
boot_mode = deploy_utils.get_boot_mode_for_deploy(node)
if boot_mode:
image_info['deploy_boot_mode'] = boot_mode
else:
image_info['deploy_boot_mode'] = 'bios'
image_info['boot_option'] = boot_option
disk_label = deploy_utils.get_disk_label(node)
if disk_label is not None:
image_info['disk_label'] = disk_label
# Tell the client to download and write the image with the given args
self._client.prepare_image(node, image_info)
task.process_event('wait')
def _get_uuid_from_result(self, task, type_uuid):
command = self._client.get_commands_status(task.node)[-1]
if command['command_result'] is not None:
words = command['command_result']['result'].split()
for word in words:
if type_uuid in word:
result = word.split('=')[1]
if not result:
msg = (_('Command result did not return %(type_uuid)s '
'for node %(node)s. The version of the IPA '
'ramdisk used in the deployment might not '
'have support for provisioning of '
'partition images.') %
{'type_uuid': type_uuid,
'node': task.node.uuid})
LOG.error(msg)
deploy_utils.set_failed_state(task, msg)
return
return result
@METRICS.timer('AgentDeployMixin.check_deploy_success')
def check_deploy_success(self, node):
# should only ever be called after we've validated that
# the prepare_image command is complete
command = self._client.get_commands_status(node)[-1]
if command['command_status'] == 'FAILED':
return command['command_error']
@METRICS.timer('AgentDeployMixin.reboot_to_instance')
def reboot_to_instance(self, task):
task.process_event('resume')
node = task.node
iwdi = task.node.driver_internal_info.get('is_whole_disk_image')
error = self.check_deploy_success(node)
if error is not None:
# TODO(jimrollenhagen) power off if using neutron dhcp to
# align with pxe driver?
msg = (_('node %(node)s command status errored: %(error)s') %
{'node': node.uuid, 'error': error})
LOG.error(msg)
deploy_utils.set_failed_state(task, msg)
return
if not iwdi:
root_uuid = self._get_uuid_from_result(task, 'root_uuid')
if deploy_utils.get_boot_mode_for_deploy(node) == 'uefi':
efi_sys_uuid = (
self._get_uuid_from_result(task,
'efi_system_partition_uuid'))
else:
efi_sys_uuid = None
driver_internal_info = task.node.driver_internal_info
driver_internal_info['root_uuid_or_disk_id'] = root_uuid
task.node.driver_internal_info = driver_internal_info
task.node.save()
self.prepare_instance_to_boot(task, root_uuid, efi_sys_uuid)
LOG.info(_LI('Image successfully written to node %s'), node.uuid)
LOG.debug('Rebooting node %s to instance', node.uuid)
if iwdi:
manager_utils.node_set_boot_device(task, 'disk', persistent=True)
self.reboot_and_finish_deploy(task)
# NOTE(TheJulia): If we deployed a whole disk image, we
# should expect a whole disk image and clean-up the tftp files
# on-disk incase the node is disregarding the boot preference.
# TODO(rameshg87): Not all in-tree drivers using reboot_to_instance
# have a boot interface. So include a check for now. Remove this
# check once all in-tree drivers have a boot interface.
if task.driver.boot and iwdi:
task.driver.boot.clean_up_ramdisk(task)
class AgentDeploy(AgentDeployMixin, base.DeployInterface):
"""Interface for deploy-related actions."""
def get_properties(self):
"""Return the properties of the interface.
:returns: dictionary of <property name>:<property description> entries.
"""
return COMMON_PROPERTIES
@METRICS.timer('AgentDeploy.validate')
def validate(self, task):
"""Validate the driver-specific Node deployment info.
This method validates whether the properties of the supplied node
contain the required information for this driver to deploy images to
the node.
:param task: a TaskManager instance
:raises: MissingParameterValue, if any of the required parameters are
missing.
:raises: InvalidParameterValue, if any of the parameters have invalid
value.
"""
if CONF.agent.manage_agent_boot:
task.driver.boot.validate(task)
node = task.node
params = {}
image_source = node.instance_info.get('image_source')
params['instance_info.image_source'] = image_source
error_msg = _('Node %s failed to validate deploy image info. Some '
'parameters were missing') % node.uuid
deploy_utils.check_for_missing_params(params, error_msg)
if not service_utils.is_glance_image(image_source):
if not node.instance_info.get('image_checksum'):
raise exception.MissingParameterValue(_(
"image_source's image_checksum must be provided in "
"instance_info for node %s") % node.uuid)
check_image_size(task, image_source)
# Validate the root device hints
try:
root_device = node.properties.get('root_device')
il_utils.parse_root_device_hints(root_device)
except ValueError as e:
raise exception.InvalidParameterValue(
_('Failed to validate the root device hints for node '
'%(node)s. Error: %(error)s') % {'node': node.uuid,
'error': e})
# Validate node capabilities
deploy_utils.validate_capabilities(node)
validate_image_proxies(node)
@METRICS.timer('AgentDeploy.deploy')
@task_manager.require_exclusive_lock
def deploy(self, task):
"""Perform a deployment to a node.
Perform the necessary work to deploy an image onto the specified node.
This method will be called after prepare(), which may have already
performed any preparatory steps, such as pre-caching some data for the
node.
:param task: a TaskManager instance.
:returns: status of the deploy. One of ironic.common.states.
"""
manager_utils.node_power_action(task, states.REBOOT)
return states.DEPLOYWAIT
@METRICS.timer('AgentDeploy.tear_down')
@task_manager.require_exclusive_lock
def tear_down(self, task):
"""Tear down a previous deployment on the task's node.
:param task: a TaskManager instance.
:returns: status of the deploy. One of ironic.common.states.
:raises: NetworkError if the cleaning ports cannot be removed.
:raises: InvalidParameterValue when the wrong power state is specified
or the wrong driver info is specified for power management.
:raises: other exceptions by the node's power driver if something
wrong occurred during the power action.
"""
manager_utils.node_power_action(task, states.POWER_OFF)
task.driver.network.unconfigure_tenant_networks(task)
return states.DELETED
@METRICS.timer('AgentDeploy.prepare')
@task_manager.require_exclusive_lock
def prepare(self, task):
"""Prepare the deployment environment for this node.
:param task: a TaskManager instance.
:raises: NetworkError: if the previous cleaning ports cannot be removed
or if new cleaning ports cannot be created.
:raises: InvalidParameterValue when the wrong power state is specified
or the wrong driver info is specified for power management.
:raises: other exceptions by the node's power driver if something
wrong occurred during the power action.
:raises: exception.ImageRefValidationFailed if image_source is not
Glance href and is not HTTP(S) URL.
:raises: any boot interface's prepare_ramdisk exceptions.
"""
node = task.node
if node.provision_state == states.DEPLOYING:
# Adding the node to provisioning network so that the dhcp
# options get added for the provisioning port.
manager_utils.node_power_action(task, states.POWER_OFF)
# NOTE(vdrok): in case of rebuild, we have tenant network already
# configured, unbind tenant ports if present
task.driver.network.unconfigure_tenant_networks(task)
task.driver.network.add_provisioning_network(task)
if node.provision_state == states.ACTIVE:
task.driver.boot.prepare_instance(task)
elif node.provision_state != states.ADOPTING:
node.instance_info = deploy_utils.build_instance_info_for_deploy(
task)
node.save()
if CONF.agent.manage_agent_boot:
deploy_opts = deploy_utils.build_agent_options(node)
task.driver.boot.prepare_ramdisk(task, deploy_opts)
@METRICS.timer('AgentDeploy.clean_up')
@task_manager.require_exclusive_lock
def clean_up(self, task):
"""Clean up the deployment environment for this node.
If preparation of the deployment environment ahead of time is possible,
this method should be implemented by the driver. It should erase
anything cached by the `prepare` method.
If implemented, this method must be idempotent. It may be called
multiple times for the same node on the same conductor, and it may be
called by multiple conductors in parallel. Therefore, it must not
require an exclusive lock.
This method is called before `tear_down`.
:param task: a TaskManager instance.
"""
if CONF.agent.manage_agent_boot:
task.driver.boot.clean_up_ramdisk(task)
task.driver.boot.clean_up_instance(task)
provider = dhcp_factory.DHCPFactory()
provider.clean_dhcp(task)
def take_over(self, task):
"""Take over management of this node from a dead conductor.
:param task: a TaskManager instance.
"""
pass
@METRICS.timer('AgentDeploy.get_clean_steps')
def get_clean_steps(self, task):
"""Get the list of clean steps from the agent.
:param task: a TaskManager object containing the node
:raises NodeCleaningFailure: if the clean steps are not yet
available (cached), for example, when a node has just been
enrolled and has not been cleaned yet.
:returns: A list of clean step dictionaries
"""
new_priorities = {
'erase_devices': CONF.deploy.erase_devices_priority,
'erase_devices_metadata':
CONF.deploy.erase_devices_metadata_priority,
}
return deploy_utils.agent_get_clean_steps(
task, interface='deploy',
override_priorities=new_priorities)
@METRICS.timer('AgentDeploy.execute_clean_step')
def execute_clean_step(self, task, step):
"""Execute a clean step asynchronously on the agent.
:param task: a TaskManager object containing the node
:param step: a clean step dictionary to execute
:raises: NodeCleaningFailure if the agent does not return a command
status
:returns: states.CLEANWAIT to signify the step will be completed async
"""
return deploy_utils.agent_execute_clean_step(task, step)
@METRICS.timer('AgentDeploy.prepare_cleaning')
def prepare_cleaning(self, task):
"""Boot into the agent to prepare for cleaning.
:param task: a TaskManager object containing the node
:raises: NodeCleaningFailure, NetworkError if the previous cleaning
ports cannot be removed or if new cleaning ports cannot be created.
:raises: InvalidParameterValue if cleaning network UUID config option
has an invalid value.
:returns: states.CLEANWAIT to signify an asynchronous prepare
"""
return deploy_utils.prepare_inband_cleaning(
task, manage_boot=CONF.agent.manage_agent_boot)
@METRICS.timer('AgentDeploy.tear_down_cleaning')
def tear_down_cleaning(self, task):
"""Clean up the PXE and DHCP files after cleaning.
:param task: a TaskManager object containing the node
:raises: NodeCleaningFailure, NetworkError if the cleaning ports cannot
be removed
"""
deploy_utils.tear_down_inband_cleaning(
task, manage_boot=CONF.agent.manage_agent_boot)
class AgentRAID(base.RAIDInterface):
"""Implementation of RAIDInterface which uses agent ramdisk."""
def get_properties(self):
"""Return the properties of the interface."""
return {}
@METRICS.timer('AgentRAID.create_configuration')
@base.clean_step(priority=0)
def create_configuration(self, task,
create_root_volume=True,
create_nonroot_volumes=True):
"""Create a RAID configuration on a bare metal using agent ramdisk.
This method creates a RAID configuration on the given node.
:param task: a TaskManager instance.
:param create_root_volume: If True, a root volume is created
during RAID configuration. Otherwise, no root volume is
created. Default is True.
:param create_nonroot_volumes: If True, non-root volumes are
created. If False, no non-root volumes are created. Default
is True.
:returns: states.CLEANWAIT if operation was successfully invoked.
:raises: MissingParameterValue, if node.target_raid_config is missing
or was found to be empty after skipping root volume and/or non-root
volumes.
"""
node = task.node
LOG.debug("Agent RAID create_configuration invoked for node %(node)s "
"with create_root_volume=%(create_root_volume)s and "
"create_nonroot_volumes=%(create_nonroot_volumes)s with the "
"following target_raid_config: %(target_raid_config)s.",
{'node': node.uuid,
'create_root_volume': create_root_volume,
'create_nonroot_volumes': create_nonroot_volumes,
'target_raid_config': node.target_raid_config})
if not node.target_raid_config:
raise exception.MissingParameterValue(
_("Node %s has no target RAID configuration.") % node.uuid)
target_raid_config = node.target_raid_config.copy()
error_msg_list = []
if not create_root_volume:
target_raid_config['logical_disks'] = [
x for x in target_raid_config['logical_disks']
if not x.get('is_root_volume')]
error_msg_list.append(_("skipping root volume"))
if not create_nonroot_volumes:
error_msg_list.append(_("skipping non-root volumes"))
target_raid_config['logical_disks'] = [
x for x in target_raid_config['logical_disks']
if x.get('is_root_volume')]
if not target_raid_config['logical_disks']:
error_msg = _(' and ').join(error_msg_list)
raise exception.MissingParameterValue(
_("Node %(node)s has empty target RAID configuration "
"after %(msg)s.") % {'node': node.uuid, 'msg': error_msg})
# Rewrite it back to the node object, but no need to save it as
# we need to just send this to the agent ramdisk.
node.driver_internal_info['target_raid_config'] = target_raid_config
LOG.debug("Calling agent RAID create_configuration for node %(node)s "
"with the following target RAID configuration: %(target)s",
{'node': node.uuid, 'target': target_raid_config})
step = node.clean_step
return deploy_utils.agent_execute_clean_step(task, step)
@staticmethod
@agent_base_vendor.post_clean_step_hook(
interface='raid', step='create_configuration')
def _create_configuration_final(task, command):
"""Clean step hook after a RAID configuration was created.
This method is invoked as a post clean step hook by the Ironic
conductor once a create raid configuration is completed successfully.
The node (properties, capabilities, RAID information) will be updated
to reflect the actual RAID configuration that was created.
:param task: a TaskManager instance.
:param command: A command result structure of the RAID operation
returned from agent ramdisk on query of the status of command(s).
:raises: InvalidParameterValue, if 'current_raid_config' has more than
one root volume or if node.properties['capabilities'] is malformed.
:raises: IronicException, if clean_result couldn't be found within
the 'command' argument passed.
"""
try:
clean_result = command['command_result']['clean_result']
except KeyError:
raise exception.IronicException(
_("Agent ramdisk didn't return a proper command result while "
"cleaning %(node)s. It returned '%(result)s' after command "
"execution.") % {'node': task.node.uuid,
'result': command})
raid.update_raid_info(task.node, clean_result)
@METRICS.timer('AgentRAID.delete_configuration')
@base.clean_step(priority=0)
def delete_configuration(self, task):
"""Deletes RAID configuration on the given node.
:param task: a TaskManager instance.
:returns: states.CLEANWAIT if operation was successfully invoked
"""
LOG.debug("Agent RAID delete_configuration invoked for node %s.",
task.node.uuid)
step = task.node.clean_step
return deploy_utils.agent_execute_clean_step(task, step)
@staticmethod
@agent_base_vendor.post_clean_step_hook(
interface='raid', step='delete_configuration')
def _delete_configuration_final(task, command):
"""Clean step hook after RAID configuration was deleted.
This method is invoked as a post clean step hook by the Ironic
conductor once a delete raid configuration is completed successfully.
It sets node.raid_config to empty dictionary.
:param task: a TaskManager instance.
:param command: A command result structure of the RAID operation
returned from agent ramdisk on query of the status of command(s).
:returns: None
"""
task.node.raid_config = {}
task.node.save()
| {
"content_hash": "5e79bcf291f6471c869ce6154c3dee8d",
"timestamp": "",
"source": "github",
"line_count": 640,
"max_line_length": 79,
"avg_line_length": 43.546875,
"alnum_prop": 0.6151417294581988,
"repo_name": "NaohiroTamura/ironic",
"id": "e07753fefa154bbebebef4032fe81821f9349cbf",
"size": "28449",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ironic/drivers/modules/agent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "349"
},
{
"name": "Python",
"bytes": "5077786"
},
{
"name": "Shell",
"bytes": "107935"
}
],
"symlink_target": ""
} |
from random import randint
from time import sleep
import tweepy
import config
old_tweets = []
def get_tweets(name, api):
lst = []
statuses = api.user_timeline(id = name, count = 500, include_rts = False)
for status in statuses:
status_encoded = status.text.encode('utf-8')
status_encoded = status_encoded.lower()
if status_encoded.endswith('.'):
status_encoded = status_encoded[:-1]
if 't.co' not in status_encoded and '@' not in status_encoded:
lst.append(status_encoded)
return lst
def get_old_tweets(api):
old_tweets = get_tweets('pcoudanilo', api)
def save_tweet(tweet):
old_tweets.append(tweet)
def get_random_tweet(fst_lst, snd_lst):
while True:
fst_tweet = fst_lst[randint(0,len(fst_lst))]
snd_tweet = snd_lst[randint(0,len(snd_lst))]
if fst_tweet in old_tweets or snd_tweet in old_tweets:
fst_tweet = fst_lst[randint(0,len(fst_lst))]
snd_tweet = snd_lst[randint(0,len(snd_lst))]
else:
break
decides = randint(0,1)
if decides == 0:
return fst_tweet
else:
return snd_tweet
def main():
auth = tweepy.OAuthHandler(config.CONSUMER_KEY, config.CONSUMER_SECRET)
auth.set_access_token(config.ACCESS_TOKEN_KEY, config.ACCESS_TOKEN_SECRET)
api = tweepy.API(auth)
get_old_tweets(api)
while True:
fst_lst = [] # pecesiqueira tweets
snd_lst = [] # danilo tweets
fst_lst = get_tweets('pecesiqueira', api)
snd_lst = get_tweets('danilordgss', api)
tweet = get_random_tweet(fst_lst, snd_lst)
api.update_status(tweet)
save_tweet(tweet)
sleep(21600)
if __name__ == "__main__":
main()
| {
"content_hash": "85b7e3a6442e13fdb7d8e39dac1e2704",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 78,
"avg_line_length": 26.220588235294116,
"alnum_prop": 0.6006730229949523,
"repo_name": "leonardolima/pcordanilo",
"id": "ad55f8f92bf9a3ee588638811456b40450354404",
"size": "1783",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pcordanilo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2069"
}
],
"symlink_target": ""
} |
from setuptools import setup
# http://bugs.python.org/issue15881#msg170215 workaround:
try:
import multiprocessing
except ImportError:
pass
VERSION = '0.1.1'
setup(
name='Carcade',
version=VERSION,
description='Static site generator powered by Jinja2.',
long_description=open('README.rst').read(),
url='https://github.com/aromanovich/carcade',
author='Anton Romanovich',
author_email='anthony.romanovich@gmail.com',
packages=['carcade'],
package_dir={'carcade': 'carcade'},
package_data={'carcade': ['template/*/.gitkeep']},
entry_points={
'console_scripts': [
'carcade = carcade.cli:main',
],
},
install_requires=[
'argh>=0.21.2',
'argcomplete>=0.3.7',
'Jinja2>=2.6',
'polib>=1.0.2',
'webassets>=0.8',
'Markdown>=2.2.1',
'PyYAML>=3.10',
'watchdog>=0.6.0',
],
license='BSD',
tests_require=['nose'],
test_suite='nose.collector',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: BSD License',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Code Generators',
'Topic :: Internet :: WWW/HTTP :: Site Management',
'Operating System :: POSIX',
'Operating System :: Unix',
],
)
| {
"content_hash": "1b2f5cd15f0babd9da03c3a7ccfe2ec9",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 59,
"avg_line_length": 29.305084745762713,
"alnum_prop": 0.5795257374204743,
"repo_name": "aromanovich/carcade",
"id": "8d9e386c28158c8d299ffa494c9d703e8f89559e",
"size": "1729",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "0"
},
{
"name": "Python",
"bytes": "41275"
},
{
"name": "Shell",
"bytes": "93"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
import os
# Check dex so file exists in dex directory.
so_file = "libDex.so"
dex_dir = os.path.join(os.path.dirname(__file__), 'dex')
if not os.path.exists(os.path.join(dex_dir, so_file)):
raise FileNotFoundError(f"{so_file} not found in dex/, "
f"please run `make build-ffis`")
setup(
name='dex',
version='0.0.1',
description='A research language for typed, functional array processing',
license='BSD',
author='Adam Paszke',
author_email='apaszke@google.com',
packages=find_packages(),
package_data={'dex': ['libDex.so']},
install_requires=['numpy'],
)
| {
"content_hash": "e23e55adafc47e942ce6e8ecfe3e7cdc",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 75,
"avg_line_length": 30.761904761904763,
"alnum_prop": 0.6671826625386997,
"repo_name": "google-research/dex-lang",
"id": "67250e49da73cb912b7f85526176c49d939908cd",
"size": "646",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "python/setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "665"
},
{
"name": "C++",
"bytes": "12300"
},
{
"name": "CSS",
"bytes": "1734"
},
{
"name": "Emacs Lisp",
"bytes": "1773"
},
{
"name": "HTML",
"bytes": "1261"
},
{
"name": "Haskell",
"bytes": "1673331"
},
{
"name": "JavaScript",
"bytes": "6560"
},
{
"name": "Julia",
"bytes": "25339"
},
{
"name": "Makefile",
"bytes": "15034"
},
{
"name": "Nix",
"bytes": "3324"
},
{
"name": "Python",
"bytes": "101678"
},
{
"name": "Shell",
"bytes": "2729"
}
],
"symlink_target": ""
} |
from .exceptions import TerminationNotice
__all__ = ['Job', 'SuicideJob', 'SimpleJob']
class Job(object):
"Interface for a Job object."
def __init__(self):
pass
def run(self):
"The actual task for the job should be implemented here."
pass
class SuicideJob(Job):
"A worker receiving this job will commit suicide."
def run(self, **kw):
raise TerminationNotice()
class SimpleJob(Job):
"""
Given a `result` queue, a `method` pointer, and an `args` dictionary or
list, the method will execute r = method(*args) or r = method(**args),
depending on args' type, and perform result.put(r).
"""
def __init__(self, result, method, args=[]):
self.result = result
self.method = method
self.args = args
def run(self):
if isinstance(self.args, list) or isinstance(self.args, tuple):
r = self.method(*self.args)
elif isinstance(self.args, dict):
r = self.method(**self.args)
self._return(r)
def _return(self, r):
"Handle return value by appending to the ``self.result`` queue."
self.result.put(r)
| {
"content_hash": "da30c9e39a858e626335019a05c6bfd0",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 75,
"avg_line_length": 29.76923076923077,
"alnum_prop": 0.6037898363479759,
"repo_name": "Codeusa/Shrinkwrap-worker",
"id": "e4f24a8a85c68ca5cc18de137fa80b0f7095f46b",
"size": "1161",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shrinkwrap-worker/jobs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8485"
}
],
"symlink_target": ""
} |
from models import AbstractTemporalModel
| {
"content_hash": "07acce3acc14e30c80e8484084c78026",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 40,
"avg_line_length": 41,
"alnum_prop": 0.9024390243902439,
"repo_name": "shriramrseee/django-temporal-models",
"id": "35c566e37a0d4c0af8ff04f086320d9194a7662f",
"size": "68",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "temporal_models/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6872"
}
],
"symlink_target": ""
} |
"""CLI options."""
import click
from common import chronicle_auth
verbose_option = click.option(
"--verbose", is_flag=True, help="Prints verbose output to the console.")
credential_file_option = click.option(
"-c",
"--credential_file",
help=f"Path of Service Account JSON. Default: {chronicle_auth.default_cred_file_path}"
)
region_option = click.option(
"--region",
type=click.Choice(["US", "ASIA-SOUTHEAST1", "EUROPE"],
case_sensitive=False),
default="US",
help="Select region")
url_option = click.option("--url", help="Base URL to be used for API calls")
env_option = click.option(
"--env",
type=click.Choice(["prod", "test"], case_sensitive=False),
default="prod",
help="""Optionally specify
the environment for API calls""")
export_option = click.option(
"--export", help="Export output to specified file path")
| {
"content_hash": "c0c9d0266238a04b23e3c287389c2739",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 90,
"avg_line_length": 27.96969696969697,
"alnum_prop": 0.6359696641386782,
"repo_name": "chronicle/cli",
"id": "4e03ce504a63f2ca52534e049ab9de15cc62e12a",
"size": "1499",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "common/options.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "303761"
}
],
"symlink_target": ""
} |
"""Gradients for operators defined in array_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
@ops.RegisterGradient("Pack")
def _PackGrad(op, grad):
"""Gradient for pack op."""
return array_ops.unstack(grad, num=op.get_attr("N"), axis=op.get_attr("axis"))
@ops.RegisterGradient("Unpack")
def _UnpackGrad(op, *grads):
"""Gradient for unpack op."""
return array_ops.stack(grads, axis=op.get_attr("axis"))
def _ConcatGradHelper(op, grad, start_value_index, end_value_index, dim_index):
"""Gradient for concat op.
Args:
op: An operation.
grad: `Tensor` or `IndexedSlices` representing the gradients with respect
to each output of the op.
start_value_index: An integer index of the first value in the op.inputs.
end_value_index: An integer index of the last value in the op.inputs.
dim_index: An interger index of concat_dim or axis parameter in op.inputs.
Returns:
Tensors representing the partial gradients with respect to each input
of the op.
Raises:
ValueError: if concat_dim/axis is not statically known.
"""
def _CreateDenseMaskAndBegin(sizes, concat_dim):
"""Create variables for iteratively slicing a dense gradients tensor."""
# Since shape is 1-D, shape_of_shape = [rank-of-inputs]
shape_of_shape = array_ops.shape(sizes[0])
# Make a vector of length equal to the input's dimensions,
# with 0's everywhere and 1 in the concat dim position.
# Note: Can't use sparse_to_dense since it isn't GPU-capable (for now)
mask = array_ops.concat([
array_ops.fill(array_ops.expand_dims(concat_dim, 0), 0), [1],
array_ops.fill(shape_of_shape - concat_dim - 1, 0)
], 0)
begin = array_ops.fill(shape_of_shape, 0)
return mask, begin
def _ExtractInputShapes(inputs):
"""Extract the shapes of a set of input tensors."""
if context.executing_eagerly():
return array_ops.shape_n(inputs)
sizes = []
fully_known = True
for x in inputs:
input_shape = array_ops.shape(x)
if not isinstance(input_shape,
ops.Tensor) or input_shape.op.type != "Const":
fully_known = False
break
sizes.append(input_shape)
if fully_known:
return sizes
else:
return array_ops.shape_n(inputs)
# Degenerate concatenation, just return grad.
if len(op.inputs) == 2:
return grad + [None] if end_value_index <= dim_index else [None] + grad
concat_dim = op.inputs[dim_index]
input_values = op.inputs[start_value_index:end_value_index]
out_grads = []
if isinstance(grad, ops.Tensor):
if context.executing_eagerly():
# Using mod here for convenience since concat_dim is already verified
# in concat implementation to be within the allowed [-rank, rank) range.
non_neg_concat_dim = (
concat_dim._numpy().item(0) % input_values[0]._rank()) # pylint: disable=protected-access
# All inputs are guaranteed to be EagerTensors in eager mode
sizes = pywrap_tensorflow.TFE_Py_TensorShapeSlice(input_values,
non_neg_concat_dim)
out_grads = array_ops.split(grad, sizes, non_neg_concat_dim)
else:
if constant_op.is_constant(concat_dim):
# If concat_dim is a constant defined in a different context,
# then we duplicate it in the current context to avoid passing it
# through an Enter node.
# This is a small optimization in general, but it is required when
# compiling with XLA, as XLA needs the concat input to be folded into a
# constant.
grad_context = control_flow_util.GetOutputContext(grad.op)
dim_context = control_flow_util.GetOutputContext(concat_dim.op)
if dim_context != grad_context:
value = tensor_util.constant_value(concat_dim)
concat_dim = constant_op.constant(value=value, dtype=concat_dim.dtype)
# Using mod here for convenience since concat_dim is already verified
# in concat implementation to be within the allowed [-rank, rank) range.
non_neg_concat_dim = concat_dim % array_ops.rank(input_values[0])
# Get the inputs' tensor shapes
sizes = _ExtractInputShapes(input_values)
# The magic number of 16 was found through benchmarking a range of sizes
# on CPUs and a Maxwell TitanX. A speedup was seen in a large majority of
# cases when switching implementations at N=16, but it is possible that
# there will be a small number of performance regressions.
if len(sizes) > 16:
# extract the size of each input along the concat dimension
sizes = array_ops.squeeze(
array_ops.slice(
array_ops.stack(sizes, axis=1), [non_neg_concat_dim, 0],
[1, -1]))
out_grads = array_ops.split(grad, sizes, non_neg_concat_dim)
else:
offset = gen_array_ops.concat_offset(non_neg_concat_dim, sizes)
for (begin, size) in zip(offset, sizes):
out_grads.append(array_ops.slice(grad, begin, size))
elif isinstance(grad, ops.IndexedSlices):
# Using mod here for convenience since concat_dim is already verified
# in concat implementation to be within the allowed [-rank, rank) range.
non_neg_concat_dim = concat_dim % array_ops.rank(input_values[0])
concat_dim_static = tensor_util.constant_value(concat_dim)
if concat_dim_static is None:
raise ValueError("Can only compute IndexedSlices gradient with "
"statically-known concat_dim")
if concat_dim_static < 0:
rank = tensor_util.constant_value(array_ops.rank(input_values[0]))
if rank is None:
raise ValueError("Can only compute IndexedSlices gradient with "
"negative concat_dim when first value rank is "
"statically-known.")
concat_dim_static %= rank
# Get the inputs' tensor shapes
sizes = [array_ops.shape(x) for x in input_values]
if concat_dim_static > 0:
# IndexedSlices, non_neg_concat_dim > 0. Each input gets IndexedSlices
# gradients with all the indices, but with grad.values sliced accordingly.
# This is like the Tensor case, except shape(grad.values)[0] is not equal
# to shape(sizes[i])[0], since only a subset of the dim-0 values are
# stored.
mask, begin = _CreateDenseMaskAndBegin(sizes, non_neg_concat_dim)
for size in sizes:
new_values = array_ops.slice(
grad.values, begin,
array_ops.concat([[-1], array_ops.slice(size, [1], [-1])], 0))
out_grads.append(ops.IndexedSlices(new_values, grad.indices, size))
# Lint complains begin = begin + ...
begin = math_ops.add(begin, size * mask)
else:
# IndexedSlices, concat_dim == 0. Each input gets IndexedSlices gradients
# only for the relevant indices.
start = constant_op.constant(0, dtype=grad.indices.dtype)
for size in sizes:
size_concat_dim = array_ops.gather(size, non_neg_concat_dim)
if size_concat_dim.dtype != grad.indices.dtype:
size_concat_dim = math_ops.cast(
size_concat_dim, dtype=grad.indices.dtype)
end = start + size_concat_dim
# Compute the 1-D Tensor of indices relevant for this input.
indices_to_select = array_ops.squeeze(
array_ops.where(
math_ops.logical_and(grad.indices >= start,
grad.indices < end)),
axis=[1])
new_indices = array_ops.gather(grad.indices, indices_to_select) - start
new_values = array_ops.gather(grad.values, indices_to_select)
out_grads.append(ops.IndexedSlices(new_values, new_indices, size))
start = end
else:
raise TypeError("Expected Tensor or IndexedSlices, got %s" % type(grad))
return (out_grads + [None]
if end_value_index <= dim_index else [None] + out_grads)
@ops.RegisterGradient("Concat")
def _ConcatGrad(op, grad):
return _ConcatGradHelper(
op,
grad,
start_value_index=1,
end_value_index=len(op.inputs),
dim_index=0)
@ops.RegisterGradient("ConcatV2")
def _ConcatGradV2(op, grad):
return _ConcatGradHelper(
op, grad, start_value_index=0, end_value_index=-1, dim_index=-1)
ops.NotDifferentiable("ConcatOffset")
@ops.RegisterGradient("Slice")
def _SliceGrad(op, grad):
"""Gradient for Slice op."""
# Create an Nx2 padding where the first column represents how many
# zeros are to be prepended for each dimension, and the second
# column indicates how many zeros are appended.
#
# The number of zeros to append is the shape of the input
# elementwise-subtracted by both the begin vector and sizes vector.
#
# Some more reshaping is needed to assemble this tensor with the
# right dimensions.
input_vec = op.inputs[0]
begin_vec = op.inputs[1]
input_rank = array_ops.rank(input_vec)
slice_size = array_ops.shape(op.outputs[0])
shape = array_ops.stack([input_rank, 1])
before_pad = array_ops.reshape(begin_vec, shape)
after_pad = array_ops.reshape(
array_ops.shape(input_vec) - slice_size - begin_vec, shape)
paddings = array_ops.concat([before_pad, after_pad], 1)
return array_ops.pad(grad, paddings), None, None
@ops.RegisterGradient("StridedSlice")
def _StridedSliceGrad(op, grad):
"""Gradient for StridedSlice op."""
begin = op.inputs[1]
end = op.inputs[2]
strides = op.inputs[3]
# StridedSliceGrad requires `x`, `begin`, `end` and `strides` to be of the
# same dtype so we build a shape of the same type as other args.
# Note that the choice of `begin` for specifying `out_type` is arbitrary.
# We could choose any of {begin|end|strides}.dtype since they are required to
# be the same.
x = array_ops.shape(op.inputs[0], out_type=begin.dtype)
return array_ops.strided_slice_grad(
x,
begin,
end,
strides,
grad,
begin_mask=op.get_attr("begin_mask"),
end_mask=op.get_attr("end_mask"),
ellipsis_mask=op.get_attr("ellipsis_mask"),
new_axis_mask=op.get_attr("new_axis_mask"),
shrink_axis_mask=op.get_attr("shrink_axis_mask")), None, None, None
@ops.RegisterGradient("StridedSliceGrad")
def _StridedSliceGradGrad(op, grad):
"""Gradient for StridedSliceGrad op."""
begin = op.inputs[1]
end = op.inputs[2]
strides = op.inputs[3]
return None, None, None, None, array_ops.strided_slice(
grad,
begin,
end,
strides,
begin_mask=op.get_attr("begin_mask"),
end_mask=op.get_attr("end_mask"),
ellipsis_mask=op.get_attr("ellipsis_mask"),
new_axis_mask=op.get_attr("new_axis_mask"),
shrink_axis_mask=op.get_attr("shrink_axis_mask"))
@ops.RegisterGradient("Split")
def _SplitGrad(op, *grads):
return None, array_ops.concat(list(grads), op.inputs[0])
@ops.RegisterGradient("SplitV")
def _SplitVGrad(op, *grads):
returnval = array_ops.concat(list(grads), op.inputs[2])
returnval = [returnval] + [
None,
] * (
len(op.inputs) - 1)
return returnval
ops.NotDifferentiable("Const")
@ops.RegisterGradient("Diag")
def _DiagGrad(_, grad):
return array_ops.diag_part(grad)
@ops.RegisterGradient("DiagPart")
def _DiagPartGrad(_, grad):
return array_ops.diag(grad)
@ops.RegisterGradient("MatrixDiag")
def _MatrixDiagGrad(_, grad):
return array_ops.matrix_diag_part(grad)
@ops.RegisterGradient("MatrixDiagPart")
def _MatrixDiagPartGrad(op, grad):
matrix_shape = op.inputs[0].get_shape()[-2:]
if matrix_shape.is_fully_defined() and matrix_shape[0] == matrix_shape[1]:
return array_ops.matrix_diag(grad)
else:
return array_ops.matrix_set_diag(array_ops.zeros_like(op.inputs[0]), grad)
@ops.RegisterGradient("MatrixSetDiag")
def _MatrixSetDiagGrad(op, grad):
"""Gradient for MatrixSetDiag."""
input_shape = op.inputs[0].get_shape().merge_with(grad.get_shape())
diag_shape = op.inputs[1].get_shape()
batch_shape = input_shape[:-2].merge_with(diag_shape[:-1])
matrix_shape = input_shape[-2:]
if batch_shape.is_fully_defined() and matrix_shape.is_fully_defined():
diag_shape = batch_shape.as_list() + [min(matrix_shape.as_list())]
else:
with ops.colocate_with(grad):
grad_shape = array_ops.shape(grad)
grad_rank = array_ops.rank(grad)
batch_shape = array_ops.slice(grad_shape, [0], [grad_rank - 2])
matrix_shape = array_ops.slice(grad_shape, [grad_rank - 2], [2])
min_dim = math_ops.reduce_min(matrix_shape)
diag_shape = array_ops.concat([batch_shape, [min_dim]], 0)
grad_input = array_ops.matrix_set_diag(grad,
array_ops.zeros(
diag_shape, dtype=grad.dtype))
grad_diag = array_ops.matrix_diag_part(grad)
return (grad_input, grad_diag)
@ops.RegisterGradient("MatrixBandPart")
def _MatrixBandPartGrad(op, grad):
num_lower = op.inputs[1]
num_upper = op.inputs[2]
return (array_ops.matrix_band_part(grad, num_lower, num_upper), None, None)
# Edit Distance has no gradient (but can be used to eval seq2seq or CTC).
ops.NotDifferentiable("EditDistance")
@ops.RegisterGradient("Fill")
def _FillGrad(_, grad):
return None, math_ops.reduce_sum(grad)
ops.NotDifferentiable("ZerosLike")
ops.NotDifferentiable("OnesLike")
@ops.RegisterGradient("PreventGradient")
def _PreventGradientGrad(op, _):
raise LookupError(
"Gradient explicitly disabled. Reason: %s" % op.get_attr("message"))
@ops.RegisterGradient("Gather")
def _GatherGrad(op, grad):
"""Gradient for Gather op."""
# params can be large, so colocate the shape calculation with it.
#
# params can be very large for sparse model, array_ops.shape raises
# exception on the Windows platform when any dimension is larger than
# int32. params_shape is not used in optimizer apply_sparse gradients,
# so it's fine to convert it back to int32 regardless of truncation.
params = op.inputs[0]
with ops.colocate_with(params):
params_shape = array_ops.shape(params, out_type=ops.dtypes.int64)
params_shape = math_ops.to_int32(params_shape)
# Build appropriately shaped IndexedSlices
indices = op.inputs[1]
size = array_ops.expand_dims(array_ops.size(indices), 0)
values_shape = array_ops.concat([size, params_shape[1:]], 0)
values = array_ops.reshape(grad, values_shape)
indices = array_ops.reshape(indices, size)
return [ops.IndexedSlices(values, indices, params_shape), None]
@ops.RegisterGradient("GatherV2")
def _GatherV2Grad(op, grad):
"""Gradient for GatherV2 op."""
# params can be large, so colocate the shape calculation with it.
#
# params can be very large for sparse model, array_ops.shape raises
# exception on the Windows platform when any dimension is larger than
# int32. params_shape is not used in optimizer apply_sparse gradients,
# so it's fine to convert it back to int32 regardless of truncation.
params = op.inputs[0]
with ops.colocate_with(params):
params_shape = array_ops.shape(params, out_type=ops.dtypes.int64)
params_shape = math_ops.to_int32(params_shape)
indices = op.inputs[1]
indices_size = array_ops.expand_dims(array_ops.size(indices), 0)
axis = op.inputs[2]
axis_static = tensor_util.constant_value(axis)
# For axis 0 gathers, build an appropriately shaped IndexedSlices.
if axis_static == 0:
if context.executing_eagerly():
params_tail_shape = params_shape.cpu()[1:]
else:
params_tail_shape = params_shape[1:]
values_shape = array_ops.concat([indices_size, params_tail_shape], 0)
values = array_ops.reshape(grad, values_shape)
indices = array_ops.reshape(indices, indices_size)
return [ops.IndexedSlices(values, indices, params_shape), None, None]
outer_shape = params_shape[:axis]
outer_dims = array_ops.size(outer_shape)
inner_shape = params_shape[axis:][1:]
inner_dims = array_ops.size(inner_shape)
outer_axes_indices = math_ops.range(outer_dims)
inner_axes_indices = math_ops.range(outer_dims + 1,
outer_dims + 1 + inner_dims)
values_shape = array_ops.concat([outer_shape, indices_size, inner_shape], 0)
values = array_ops.reshape(grad, values_shape)
indices = array_ops.reshape(indices, indices_size)
# We need to sum up every slice `values[..., i, ....]` corresponding to
# `params[..., indices[i], ...]`. Since `unsorted_segment_sum` does not
# support an axis parameter, we transpose the gather dimension to the front,
# then use `unsorted_segment_sum` to build a
# [gather_axis, outer_axes, inner_axes] tensor with all the gradients
# affecting each index in `gather_axis` summed up.
transpose_dims = array_ops.concat(
[[outer_dims], outer_axes_indices, inner_axes_indices], 0)
values_transpose = array_ops.transpose(values, transpose_dims)
num_segments = params_shape[axis]
params_grad = math_ops.unsorted_segment_sum(values_transpose, indices,
num_segments)
# Inverts the above transpose by moving dimension 0 back to its original
# position.
invert_transpose_dims = array_ops.concat(
[outer_axes_indices + 1, [0], inner_axes_indices], 0)
params_grad = array_ops.transpose(params_grad, invert_transpose_dims)
return [params_grad, None, None]
@ops.RegisterGradient("GatherNd")
def _GatherNdGrad(op, grad):
ref = op.inputs[0]
indices = op.inputs[1]
ref_shape = array_ops.shape(ref, out_type=indices.dtype)
if indices.shape.ndims == 2 and indices.shape.dims[-1].value == 1:
ref_grad = ops.IndexedSlices(grad, array_ops.squeeze(indices, axis=-1),
ref_shape)
else:
ref_grad = array_ops.scatter_nd(indices, grad, ref_shape)
return [ref_grad, None]
@ops.RegisterGradient("CheckNumerics")
def _CheckNumericsGrad(op, grad):
"""Gradient for check_numerics op."""
return array_ops.check_numerics(
grad,
"Not a number (NaN) or infinity (Inf) values detected in gradient. %s" %
op.get_attr("message"))
@ops.RegisterGradient("PlaceholderWithDefault")
@ops.RegisterGradient("Identity")
def _IdGrad(_, grad):
return grad
@ops.RegisterGradient("RefIdentity")
def _RefIdGrad(_, grad):
return grad
@ops.RegisterGradient("IdentityN")
def _IdNGrad(_, *grad):
return grad
ops.NotDifferentiable("StopGradient")
@ops.RegisterGradient("Reshape")
def _ReshapeGrad(op, grad):
return [array_ops.reshape(grad, array_ops.shape(op.inputs[0])), None]
ops.NotDifferentiable("InvertPermutation")
def _ReshapeToInput(op, grad):
"""Reshapes the gradient to the shape of the original input."""
return array_ops.reshape(grad, array_ops.shape(op.inputs[0]))
@ops.RegisterGradient("ExpandDims")
def _ExpandDimsGrad(op, grad):
return [_ReshapeToInput(op, grad), None]
@ops.RegisterGradient("Squeeze")
def _SqueezeGrad(op, grad):
return _ReshapeToInput(op, grad)
@ops.RegisterGradient("Transpose")
def _TransposeGrad(op, grad):
"""Returns unshuffle(grad)."""
p = op.inputs[1]
return [array_ops.transpose(grad, array_ops.invert_permutation(p)), None]
@ops.RegisterGradient("ConjugateTranspose")
def _ConjugateTransposeGrad(op, grad):
"""Returns conj(unshuffle(grad))."""
p = op.inputs[1]
return [
array_ops.transpose(
grad, array_ops.invert_permutation(p), conjugate=True), None
]
ops.NotDifferentiable("Shape")
ops.NotDifferentiable("ShapeN")
ops.NotDifferentiable("Rank")
ops.NotDifferentiable("Size")
@ops.RegisterGradient("Tile")
def _TileGrad(op, grad):
"""Sum reduces grad along the tiled dimensions."""
input_shape = array_ops.shape(op.inputs[0])
# We interleave multiples and input_shape to get split_shape,
# reshape grad to split_shape, and reduce along all even
# dimensions (the tiled dimensions) to get the result
# with shape input_shape. For example
# input_shape = [20, 30, 40]
# multiples = [2, 3, 4]
# split_shape = [2, 20, 3, 30, 4, 40]
# axes = [0, 2, 4]
split_shape = array_ops.reshape(
array_ops.transpose(array_ops.stack([op.inputs[1], input_shape])), [-1])
axes = math_ops.range(0, array_ops.size(split_shape), 2)
# Sum reduces grad along the first dimension for IndexedSlices
if isinstance(grad, ops.IndexedSlices):
grad = math_ops.unsorted_segment_sum(
grad.values,
math_ops.mod(grad.indices, input_shape[0]),
input_shape[0])
split_shape = array_ops.concat([[1], split_shape[1:]], axis=0)
input_grad = math_ops.reduce_sum(array_ops.reshape(grad, split_shape), axes)
# Fix shape inference
if not context.executing_eagerly():
input_grad.set_shape(op.inputs[0].get_shape())
return [input_grad, None]
ops.NotDifferentiable("BroadcastGradientArgs")
def _PadGrad(op, grad):
"""Gradient for Pad."""
# Pad introduces values around the original tensor, so the gradient function
# slices the original shape out of the gradient."""
x = op.inputs[0]
a = op.inputs[1] # [Rank(x), 2]
# Takes a slice of a. The 1st column. [Rank(x), 1].
pad_before = array_ops.slice(a, [0, 0],
array_ops.stack([array_ops.rank(x), 1]))
# Make it a 1-D tensor.
begin = array_ops.reshape(pad_before, [-1])
sizes = array_ops.shape(x)
x_grad = array_ops.slice(grad, begin, sizes)
if len(op.inputs) == 3:
return x_grad, None, None
else:
return x_grad, None
ops.RegisterGradient("Pad")(_PadGrad)
ops.RegisterGradient("PadV2")(_PadGrad)
# ReverseSequence is just a permutation. The gradient permutes back.
@ops.RegisterGradient("ReverseSequence")
def _ReverseSequenceGrad(op, grad):
seq_lengths = op.inputs[1]
return [
array_ops.reverse_sequence(
grad,
batch_axis=op.get_attr("batch_dim"),
seq_axis=op.get_attr("seq_dim"),
seq_lengths=seq_lengths), None
]
@ops.RegisterGradient("Reverse")
def _ReverseGrad(op, grad):
reverse_dims = op.inputs[1]
return gen_array_ops.reverse(grad, reverse_dims), None
@ops.RegisterGradient("ReverseV2")
def _ReverseV2Grad(op, grad):
axis = op.inputs[1]
return array_ops.reverse_v2(grad, axis), None
@ops.RegisterGradient("SpaceToBatch")
def _SpaceToBatchGrad(op, grad):
# Its gradient is the opposite op: BatchToSpace.
block_size = op.get_attr("block_size")
return [
array_ops.batch_to_space(grad, op.inputs[1], block_size=block_size), None
]
@ops.RegisterGradient("SpaceToBatchND")
def _SpaceToBatchNDGrad(op, grad):
# Its gradient is the opposite op: BatchToSpaceND.
return [
array_ops.batch_to_space_nd(grad, op.inputs[1], op.inputs[2]), None, None
]
@ops.RegisterGradient("BatchToSpace")
def _BatchToSpaceGrad(op, grad):
# Its gradient is the opposite op: SpaceToBatch.
block_size = op.get_attr("block_size")
return [
array_ops.space_to_batch(grad, op.inputs[1], block_size=block_size), None
]
@ops.RegisterGradient("BatchToSpaceND")
def _BatchToSpaceNDGrad(op, grad):
# Its gradient is the opposite op: SpaceToBatchND.
return [
array_ops.space_to_batch_nd(grad, op.inputs[1], op.inputs[2]), None, None
]
@ops.RegisterGradient("SpaceToDepth")
def _SpaceToDepthGrad(op, grad):
# Its gradient is the opposite op: DepthToSpace.
block_size = op.get_attr("block_size")
data_format = op.get_attr("data_format")
if data_format == "NCHW_VECT_C":
raise ValueError("Cannot compute SpaceToDepth gradient with NCHW_VECT_C. "
"NCHW_VECT_C requires qint8 data type.")
return array_ops.depth_to_space(grad, block_size, data_format=data_format)
@ops.RegisterGradient("DepthToSpace")
def _DepthToSpaceGrad(op, grad):
# Its gradient is the opposite op: SpaceToDepth.
block_size = op.get_attr("block_size")
data_format = op.get_attr("data_format")
if data_format == "NCHW_VECT_C":
raise ValueError("Cannot compute DepthToSpace gradient with NCHW_VECT_C. "
"NCHW_VECT_C requires qint8 data type.")
return array_ops.space_to_depth(grad, block_size, data_format=data_format)
ops.NotDifferentiable("OneHot")
@ops.RegisterGradient("MirrorPad")
def _MirrorPadGrad(op, grad):
mode = op.get_attr("mode")
return [gen_array_ops.mirror_pad_grad(grad, op.inputs[1], mode=mode), None]
@ops.RegisterGradient("MirrorPadGrad")
def _MirrorPadGradGrad(op, grad):
mode = op.get_attr("mode")
return [gen_array_ops.mirror_pad(grad, op.inputs[1], mode=mode), None]
@ops.RegisterGradient("QuantizeAndDequantize")
def _QuantizeAndDequantizeGrad(_, grad):
return grad
@ops.RegisterGradient("QuantizeAndDequantizeV2")
def _QuantizeAndDequantizeV2Grad(_, grad):
return [grad, None, None]
@ops.RegisterGradient("QuantizeAndDequantizeV3")
def _QuantizeAndDequantizeV3Grad(_, grad):
# Only propagate the gradient for the unquantized input.
return [grad, None, None, None]
@ops.RegisterGradient("ExtractImagePatches")
def _ExtractImagePatchesGrad(op, grad):
batch_size, rows_in, cols_in, channels = [
dim.value for dim in op.inputs[0].shape.dims
]
input_bhwc = array_ops.shape(op.inputs[0])
batch_size = input_bhwc[0]
channels = input_bhwc[3]
# Create indices matrix for input tensor.
# Note that 0 is preserved for padding location,
# so indices for input start from 1 to 1 + rows_in * cols_in.
input_indices_num = 1 + rows_in * cols_in
input_idx = array_ops.reshape(math_ops.range(1, input_indices_num,
dtype=ops.dtypes.int64),
(1, rows_in, cols_in, 1))
input_idx_patched = gen_array_ops.extract_image_patches(
input_idx,
op.get_attr("ksizes"),
op.get_attr("strides"),
op.get_attr("rates"),
op.get_attr("padding"))
# Create indices matrix for output tensor.
_, rows_out, cols_out, _ = [dim.value for dim in op.outputs[0].shape.dims]
_, ksize_r, ksize_c, _ = op.get_attr("ksizes")
# Indices for output start from 0.
output_indices_num = rows_out * cols_out * ksize_r * ksize_c
output_idx = array_ops.reshape(math_ops.range(output_indices_num,
dtype=ops.dtypes.int64),
(1, rows_out, cols_out, ksize_r * ksize_c))
# Construct mapping table for indices: (input -> output).
idx_matrix = array_ops.concat(
[array_ops.expand_dims(input_idx_patched, axis=-1),
array_ops.expand_dims(output_idx, axis=-1)],
axis=-1)
idx_map = array_ops.reshape(idx_matrix, (-1, 2))
sp_shape = (input_indices_num, output_indices_num)
sp_mat_full = sparse_tensor.SparseTensor(
idx_map,
array_ops.ones([output_indices_num], dtype=grad.dtype),
sp_shape)
# Remove all padding locations [0, :].
sp_mat = sparse_ops.sparse_slice(sp_mat_full,
(1, 0),
(input_indices_num - 1, output_indices_num))
grad_expanded = array_ops.transpose(
array_ops.reshape(
grad, (batch_size, rows_out, cols_out, ksize_r, ksize_c, channels)),
(1, 2, 3, 4, 0, 5))
grad_flat = array_ops.reshape(grad_expanded, (-1, batch_size * channels))
jac = sparse_ops.sparse_tensor_dense_matmul(sp_mat, grad_flat)
grad_out = array_ops.reshape(jac, (rows_in, cols_in, batch_size, channels))
grad_out = array_ops.transpose(grad_out, (2, 0, 1, 3))
return [grad_out]
@ops.RegisterGradient("ExtractVolumePatches")
def _ExtractVolumePatchesGrad(op, grad):
batch_size, planes_in, rows_in, cols_in, channels = [
dim.value for dim in op.inputs[0].shape.dims
]
input_bphwc = array_ops.shape(op.inputs[0])
batch_size = input_bphwc[0]
channels = input_bphwc[4]
# Create indices matrix for input tensor.
# Note that 0 is preserved for padding location,
# so indices for input start from 1 to 1 + rows_in * cols_in.
input_indices_num = 1 + planes_in * rows_in * cols_in
input_idx = array_ops.reshape(
math_ops.range(1, input_indices_num, dtype=ops.dtypes.int64),
(1, planes_in, rows_in, cols_in, 1))
input_idx_patched = gen_array_ops.extract_volume_patches(
input_idx, op.get_attr("ksizes"), op.get_attr("strides"),
op.get_attr("padding"))
# Create indices matrix for output tensor.
_, planes_out, rows_out, cols_out, _ = [
dim.value for dim in op.outputs[0].shape.dims
]
_, ksize_p, ksize_r, ksize_c, _ = op.get_attr("ksizes")
# Indices for output start from 0.
prc_indices_num = planes_out * rows_out * cols_out
output_indices_num = prc_indices_num * ksize_p * ksize_r * ksize_c
output_idx = array_ops.reshape(
math_ops.range(output_indices_num, dtype=ops.dtypes.int64),
(1, planes_out, rows_out, cols_out, ksize_p * ksize_r * ksize_c))
# Construct mapping table for indices: (input -> output).
idx_matrix = array_ops.concat([
array_ops.expand_dims(input_idx_patched, axis=-1),
array_ops.expand_dims(output_idx, axis=-1)
],
axis=-1)
idx_map = array_ops.reshape(idx_matrix, (-1, 2))
sp_shape = (input_indices_num, output_indices_num)
sp_mat_full = sparse_tensor.SparseTensor(
idx_map, array_ops.ones([output_indices_num], dtype=grad.dtype), sp_shape)
# Remove all padding locations [0, :].
sp_mat = sparse_ops.sparse_slice(sp_mat_full, (1, 0),
(input_indices_num - 1, output_indices_num))
grad_expanded = array_ops.transpose(
array_ops.reshape(grad, (batch_size, planes_out, rows_out, cols_out,
ksize_p, ksize_r, ksize_c, channels)),
(1, 2, 3, 4, 5, 6, 0, 7))
grad_flat = array_ops.reshape(grad_expanded, (-1, batch_size * channels))
jac = sparse_ops.sparse_tensor_dense_matmul(sp_mat, grad_flat)
grad_out = array_ops.reshape(
jac, (planes_in, rows_in, cols_in, batch_size, channels))
grad_out = array_ops.transpose(grad_out, (3, 0, 1, 2, 4))
return [grad_out]
@ops.RegisterGradient("ScatterNd")
def _ScatterNdGrad(op, grad):
indices = op.inputs[0]
updates_grad = array_ops.gather_nd(grad, indices)
return [None, updates_grad, None]
@ops.RegisterGradient("TensorScatterUpdate")
def _TensorScatterUpdateGrad(op, grad):
indices = op.inputs[1]
updates_grad = array_ops.gather_nd(grad, indices)
tensor_grad = array_ops.tensor_scatter_update(
array_ops.identity(grad), indices,
array_ops.zeros_like(op.inputs[2], dtype=grad.dtype))
return [tensor_grad, None, updates_grad]
@ops.RegisterGradient("TensorScatterAdd")
def _TensorScatterAddGrad(op, grad):
indices = op.inputs[1]
updates_grad = array_ops.gather_nd(grad, indices)
tensor_grad = array_ops.identity(grad)
return [tensor_grad, None, updates_grad]
@ops.RegisterGradient("TensorScatterSub")
def _TensorScatterSubGrad(op, grad):
indices = op.inputs[1]
updates_grad = array_ops.gather_nd(grad, indices)
tensor_grad = array_ops.identity(grad)
return [tensor_grad, None, -updates_grad]
@ops.RegisterGradient("ScatterNdNonAliasingAdd")
def _ScatterNdNonAliasingAddGrad(op, grad):
indices = op.inputs[1]
updates_grad = array_ops.gather_nd(grad, indices)
return [grad, None, updates_grad]
@ops.RegisterGradient("BroadcastTo")
def _BroadcastToGrad(op, grad):
input_value = op.inputs[0]
broadcast_shape = op.inputs[1]
input_value_shape = array_ops.shape(input_value)
_, reduction_axes = gen_array_ops.broadcast_gradient_args(broadcast_shape,
input_value_shape)
updates_grad_reshaped = math_ops.reduce_sum(grad,
axis=reduction_axes,
keepdims=True)
updates_grad = array_ops.reshape(updates_grad_reshaped, input_value_shape)
return [updates_grad, None]
| {
"content_hash": "541812f3a2f923f82a2feeb4125da147",
"timestamp": "",
"source": "github",
"line_count": 897,
"max_line_length": 100,
"avg_line_length": 36.04905239687849,
"alnum_prop": 0.6689448292924295,
"repo_name": "ageron/tensorflow",
"id": "337cf2ff5592b52e8c3e3f1c8eae2c61075e4c74",
"size": "33025",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/array_grad.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3560"
},
{
"name": "Batchfile",
"bytes": "14734"
},
{
"name": "C",
"bytes": "644380"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "59281238"
},
{
"name": "CMake",
"bytes": "207169"
},
{
"name": "Dockerfile",
"bytes": "75509"
},
{
"name": "Go",
"bytes": "1501606"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "908340"
},
{
"name": "Jupyter Notebook",
"bytes": "2510253"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "94466"
},
{
"name": "Objective-C",
"bytes": "60069"
},
{
"name": "Objective-C++",
"bytes": "118322"
},
{
"name": "PHP",
"bytes": "15024"
},
{
"name": "Pascal",
"bytes": "617"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "46230508"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "838"
},
{
"name": "Shell",
"bytes": "481859"
},
{
"name": "Smarty",
"bytes": "27249"
},
{
"name": "Swift",
"bytes": "53109"
}
],
"symlink_target": ""
} |
import struct
from pydoop.mapreduce.pipes import run_task, Factory
from pydoop.mapreduce.api import Mapper, Reducer
class WordCountMapper(Mapper):
def map(self, context):
for w in context.value.split():
context.emit(w, 1)
class WordCountReducer(Reducer):
def reduce(self, context):
s = sum(context.values)
context.emit(context.key.encode("utf-8"), struct.pack(">i", s))
def __main__():
factory = Factory(WordCountMapper, WordCountReducer)
run_task(factory, auto_serialize=False)
| {
"content_hash": "ff1d47c2237a867c418cc8f7f0d764df",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 71,
"avg_line_length": 23.47826086956522,
"alnum_prop": 0.6814814814814815,
"repo_name": "simleo/pydoop",
"id": "c848ba80678b84826db84c4cae8eca7680aad17f",
"size": "1174",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "examples/sequence_file/bin/wordcount.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "202110"
},
{
"name": "C++",
"bytes": "101371"
},
{
"name": "Dockerfile",
"bytes": "9590"
},
{
"name": "Emacs Lisp",
"bytes": "153"
},
{
"name": "Java",
"bytes": "177920"
},
{
"name": "Python",
"bytes": "400609"
},
{
"name": "Shell",
"bytes": "29222"
}
],
"symlink_target": ""
} |
import logging
import re
import sys
import traceback
import mwparserfromhell as mwp
from .extractor import TemplateExtractor
logger = logging.getLogger(__name__)
PROJECT_NAME = "vikiproje"
def from_template(template):
template_name = str(template.name).lower().strip()
if template_name[:9] == "vikiproje" and (template.has_param('Sınıf') or template.has_param('sınıf')):
try:
if template.has_param('Sınıf'):
label = normalize_label(template.get('Sınıf').value)
else:
label = normalize_label(template.get('sınıf').value)
project_name = None
if template.has_param('Proje'):
project_name = template.get('Proje').value.strip().lower()
elif template.has_param('proje'):
project_name = template.get('proje').value.strip().lower()
else:
project_name = template_name[9:].strip()
project_name = project_name or PROJECT_NAME
if label is not None:
return project_name, label
else:
logger.debug("Class '{0}' not in possible classes."
.format(label))
pass # not a quality assessment class
except ValueError as e:
logger.warning(traceback.format_exc())
pass # no assessment class in template
LABEL_MATCHES = [
("sm", re.compile(r"\bsm\b", re.I)), # featured article
("km", re.compile(r"\bkm\b", re.I)), # good article
("b", re.compile(r"\bb\b", re.I)), # B class
("c", re.compile(r"\bc\b", re.I)), # C class
("baslagıç", re.compile(r"\bbaşlan[gğ]ıç\b", re.I)), # start class
("taslak", re.compile(r"\btaslak\b", re.I)) # stub class
]
def normalize_label(value):
value = str(value.strip_code()).lower().replace("_", " ").strip()
for label, regex in LABEL_MATCHES:
if regex.match(value):
return label
return None
sys.modules[__name__] = TemplateExtractor(
__name__,
doc="""
wikiclass.extractors.trwiki
+++++++++++++++++++++++++++
This extractor looks for instances of the "VikiProje" template on article talk
pages (namespace = 1) with a parameter called "Sınıf". All `project` s are
hard-coded to "VikiProje"
""",
namespaces={1},
from_template=from_template)
| {
"content_hash": "06a88c928d02172f5377ca87b4a7aaa0",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 105,
"avg_line_length": 31.77027027027027,
"alnum_prop": 0.584857507443641,
"repo_name": "nettrom/Wiki-Class",
"id": "de197b650c5d36a0a0a1eeaac35db990d82c6622",
"size": "2369",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wikiclass/extractors/trwiki.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "16156"
},
{
"name": "Python",
"bytes": "102524"
}
],
"symlink_target": ""
} |
import unittest
import sys
from androguard.core.bytecodes import dvm
class AnnotationTest(unittest.TestCase):
def testAnnotation(self):
with open("examples/android/TestsAnnotation/classes.dex", "rb") as f:
d = dvm.DalvikVMFormat(f.read())
clazz = d.get_class('Landroid/support/v4/widget/SlidingPaneLayout$SlidingPanelLayoutImplJB;')
annotations = clazz._get_annotation_type_ids()
self.assertIn('Landroid/support/annotation/RequiresApi;', [clazz.CM.get_type(annotation.type_idx) for annotation in annotations])
self.assertIn('Landroid/support/annotation/RequiresApi;', clazz.get_annotations())
| {
"content_hash": "a22bb1868fc7c42a875fcf2d154cdda8",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 137,
"avg_line_length": 40.875,
"alnum_prop": 0.7293577981651376,
"repo_name": "androguard/androguard",
"id": "d03d0e00d406aa8a7e3677981b4d70488a0c1e16",
"size": "654",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_annotations.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "118146"
},
{
"name": "Python",
"bytes": "1120041"
}
],
"symlink_target": ""
} |
"""
Some methods which are useful but didn't quite fit into the graph classes.
"""
import logging
import numpy
import scipy.sparse
class GraphUtils(object):
def __init__(self):
pass
@staticmethod
def vertexLabelPairs(graph, edgeArray):
"""
Create an array of pairs of vertex labels for each edge in the graph. Returns
a tuple of this array and a vector of corresponding edge weights, for the
indices of the full edge index array as given by getAllEdges.
:param edgeArray: A numpy array with 2 columns, and values corresponding to vertex indices.
:type edgeArray: :class:`numpy.ndarray`
"""
#Parameter.checkList(edgeArray, Parameter.checkIndex, (0, self.getNumVertices()))
numFeatures = graph.getVertexList().getNumFeatures()
X = numpy.zeros((edgeArray.shape[0], numFeatures*2))
for i in range(edgeArray.shape[0]):
X[i, 0:numFeatures] = graph.getVertex(edgeArray[i, 0])
X[i, numFeatures:numFeatures*2] = graph.getVertex(edgeArray[i, 1])
return X
@staticmethod
def vertexLabelExamples(graph):
"""
Return a set of examples with pairs of vertex labels connected by an
edge. For undircted graphs there exists an example (v_i, v_j) for
every (v_j, v_i). Also, there is a set of negative examples where the
edge does not exist.
"""
numFeatures = graph.getVertexList().getNumFeatures()
numEdges = graph.getNumEdges()
#Also add non-edges
logging.info("Computing graph complement")
cGraph = graph.complement()
logging.info("Done with " + str(cGraph.getNumEdges()) + " edges.")
perm = numpy.random.permutation(cGraph.getNumEdges())[0:numEdges]
X = GraphUtils.vertexLabelPairs(graph, graph.getAllEdges())
Xc = GraphUtils.vertexLabelPairs(cGraph, cGraph.getAllEdges()[perm, :])
X = numpy.r_[X, Xc]
y = numpy.ones(numEdges*2)
y[numEdges:numEdges*2] = -1
logging.debug(y)
#If the graph is undirected add reverse edges
if graph.isUndirected():
X2 = numpy.zeros((numEdges*2, numFeatures*2))
X2[:, 0:numFeatures] = X[:, numFeatures:numFeatures*2]
X2[:, numFeatures:numFeatures*2] = X[:, 0:numFeatures]
X = numpy.r_[X, X2]
y = numpy.r_[y, y]
return X, y
@staticmethod
def treeRoot(treeGraph):
"""
Find the root of the given tree
"""
inDegSeq = treeGraph.inDegreeSequence()
root = numpy.nonzero(inDegSeq==0)[0][0]
return root
@staticmethod
def treeDepth(treeGraph):
"""
Find the depth of the given tree.
"""
if treeGraph.getNumVertices()==0:
return 0
if not treeGraph.isTree():
raise ValueError("Input graph is not a tree")
root = GraphUtils.treeRoot(treeGraph)
distances = treeGraph.dijkstrasAlgorithm(root)
return numpy.max(distances[distances!=float('inf')])
@staticmethod
def modularity(W, clustering):
"""
Give a symmetric weight matrix W and a clustering array "clustering", compute the
modularity of Newman and Girvan. The input matrix W can be either an
ndarray or a scipy.sparse matrix.
"""
numVertices = W.shape[0]
clusterIds = numpy.unique(clustering)
if type(W) == numpy.ndarray:
degSequence = numpy.sum(W, 0)
else:
degSequence = numpy.array(W.sum(0)).ravel()
numEdges = numpy.sum(degSequence)/2.0
Q = 0
for i in clusterIds:
inds = numpy.arange(numVertices)[i==clustering]
subW = W[inds, :][:, inds]
Q += subW.sum()
Q -= degSequence[inds].sum()**2/(2.0*numEdges)
Q = Q/(2*numEdges)
return Q
@staticmethod
def kwayNormalisedCut(W, clustering):
"""
Do k-way normalised cut. Each cluster should have at least 1 edge. The input
matrix W can be either an ndarray or a scipy.sparse matrix.
"""
numVertices = W.shape[0]
clusterIds = numpy.unique(clustering)
Q = 0
for i in clusterIds:
inds = numpy.arange(numVertices)[i==clustering]
invInds = numpy.arange(numVertices)[i!=clustering]
numClustEdges = float((W[inds, :]).sum())
if (len(invInds) != 0) and (numClustEdges != 0):
Q += (W[inds, :][:, invInds]).sum()/numClustEdges
Q = Q/clusterIds.shape[0]
return Q
@staticmethod
def shiftLaplacian(W):
"""
Give a scipy sparse csr matrix W, compute the shifted Laplacian matrix,
which is defined as I + D^-0.5 W D^-0.5 where D is a diagonal matrix of
degrees. For vertices of degree zero, the corresponding row/col of the
Laplacian is zero with a 0 at the diagonal. The eigenvalues of the shift
Laplacian are between 0 and 2.
"""
if not scipy.sparse.isspmatrix_csr(W):
raise ValueError("W is not a csr matrix")
W = scipy.sparse.csr_matrix(W, dtype=numpy.float)
d = numpy.array(W.sum(0)).ravel()
d[d!=0] = d[d!=0]**-0.5
D = scipy.sparse.spdiags(d, 0, d.shape[0], d.shape[0], format='csr')
i = numpy.zeros(W.shape[0])
i[d!=0] = 1
I = scipy.sparse.spdiags(i, 0, i.shape[0], i.shape[0], format='csr')
Lhat = I + D.dot(W).dot(D)
return Lhat
@staticmethod
def normalisedLaplacianSym(W):
"""
Give a scipy sparse csr matrix W, compute the normalised Laplacian matrix,
which is defined as I - D^-0.5 W D^-0.5 where D is a diagonal matrix of
degrees. For vertices of degree zero, the corresponding row/col of the
Laplacian is zero with a 0 at the diagonal. The eigenvalues of the
Laplacian are between 0 and 2.
"""
if not scipy.sparse.isspmatrix_csr(W):
raise ValueError("W is not a csr matrix")
W = scipy.sparse.csr_matrix(W, dtype=numpy.float)
d = numpy.array(W.sum(0)).ravel()
d[d!=0] = d[d!=0]**-0.5
D = scipy.sparse.spdiags(d, 0, d.shape[0], d.shape[0], format='csr')
i = numpy.zeros(W.shape[0])
i[d!=0] = 1
I = scipy.sparse.spdiags(i, 0, i.shape[0], i.shape[0], format='csr')
Lhat = I - D.dot(W).dot(D)
return Lhat
@staticmethod
def normalisedLaplacianRw(W):
"""
Compute the random walk Laplacian matrix given by D^-1 L where L is the
unnormalised Laplacian.
"""
if not scipy.sparse.isspmatrix_csr(W):
raise ValueError("W is not a csr matrix")
d = numpy.array(W.sum(0)).ravel()
d[d!=0] = d[d!=0]**-1
D = scipy.sparse.spdiags(d, 0, d.shape[0], d.shape[0], format='csr')
i = numpy.zeros(W.shape[0])
i[d!=0] = 1
I = scipy.sparse.spdiags(i, 0, i.shape[0], i.shape[0], format='csr')
Lhat = I - D.dot(W)
return Lhat
@staticmethod
def modularityMatrix(W):
"""
Compute the modularity matrix from a weight matrix W.
"""
if not scipy.sparse.isspmatrix_csr(W):
raise ValueError("W is not a csr matrix")
d = numpy.array(W.sum(0)).ravel()
m = W.getnnz()/2
B = W - numpy.outer(d, d)/(2*m)
return B
@staticmethod
def randIndex(clustering1, clustering2):
"""
Compute the rand index for 2 clusterings given in arrays v1 and v2.
"""
numVertices = clustering1.shape[0]
error = 0
for i in range(numVertices):
same_cl = clustering1[i] == clustering1
same_learned_cl = clustering2[i] == clustering2
error += (same_cl != same_learned_cl).sum()
return float(error)/(numVertices*(numVertices-1))
| {
"content_hash": "ad114f0aa25a8aea572d1275274e2761",
"timestamp": "",
"source": "github",
"line_count": 242,
"max_line_length": 99,
"avg_line_length": 33.93801652892562,
"alnum_prop": 0.5680019481310118,
"repo_name": "charanpald/APGL",
"id": "cb7949cdadda4db1fff84fd646679731dd0116a8",
"size": "8213",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apgl/graph/GraphUtils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "532692"
}
],
"symlink_target": ""
} |
from cmd3.shell import function_command
from cloudmesh.shell.cm_list import shell_command_list
class cm_shell_list:
def activate_cm_shell_list(self):
self.register_command_topic('cloud', 'list')
@function_command(shell_command_list)
def do_list(self, args, arguments):
shell_command_list(arguments)
| {
"content_hash": "0ee3814ac74c28af65fcc02f52301650",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 54,
"avg_line_length": 25.53846153846154,
"alnum_prop": 0.713855421686747,
"repo_name": "rajpushkar83/cloudmesh",
"id": "47fe4a66a9da9b5b15509f78249b2b75f5ad32d2",
"size": "332",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cloudmesh_cmd3/plugins/cm_shell_list.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ActionScript",
"bytes": "15982"
},
{
"name": "CSS",
"bytes": "390396"
},
{
"name": "HTML",
"bytes": "4158355"
},
{
"name": "Java",
"bytes": "369"
},
{
"name": "JavaScript",
"bytes": "2803977"
},
{
"name": "Makefile",
"bytes": "7572"
},
{
"name": "PHP",
"bytes": "183557"
},
{
"name": "Python",
"bytes": "1736957"
},
{
"name": "Ruby",
"bytes": "10670"
},
{
"name": "Shell",
"bytes": "32263"
}
],
"symlink_target": ""
} |
import csv
class SimpleSaleInfo:
def __init__(self, city, item):
self.city = city
self.item = item
def _read_sales(sales_file_path):
sales = []
with open(sales_file_path, encoding='utf-8') as f:
sales_reader = csv.reader(f)
for row in sales_reader:
if any(row):
if len(row) != 5:
raise ValueError('Invalid data in file: {}'.format(row))
sales.append(SimpleSaleInfo(row[2], row[0]))
return sales
def _get_cities_with_unique_item_sales(sales):
result = []
items_set = set([sale.item for sale in sales])
cities_by_item = {}
for item in items_set:
if item not in cities_by_item:
cities_by_item[item] = set()
for sale in sales:
if sale.item == item and sale.city not in cities_by_item[item]:
cities_by_item[item].add(sale.city)
sorted_cities_set = sorted(set([sale.city for sale in sales]))
for city in sorted_cities_set:
unique_sale_items_in_city = sorted(
[item for _, item in enumerate(cities_by_item)
if len(cities_by_item[item]) == 1 and city in cities_by_item[item]])
if unique_sale_items_in_city:
result.append('{},{}'.format(city, ','.join(unique_sale_items_in_city)))
return result
def main():
try:
sales_file_path = input()
sales = _read_sales(sales_file_path)
if not sales:
print('INVALID INPUT')
return
cities_with_unique_item_sales = _get_cities_with_unique_item_sales(sales)
if cities_with_unique_item_sales:
for entry in cities_with_unique_item_sales:
print(entry)
else:
print('NO UNIQUE SALES')
except Exception as e:
print('INVALID INPUT')
if __name__ == '__main__':
main()
| {
"content_hash": "4f829072033bd1af13d5ceb4ae69c593",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 84,
"avg_line_length": 25.931506849315067,
"alnum_prop": 0.560486001056524,
"repo_name": "vladislav-karamfilov/Python-Playground",
"id": "cbb4e67897ac4ffc7a2fe4ac53c10a0e96a3ad34",
"size": "1893",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SoftUni-Python-Programming-Course/Exam/unique_sales_by_cities.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "59465"
}
],
"symlink_target": ""
} |
__author__ = 'Mind'
import cv2
import numpy as np
import glob
import matplotlib.pyplot as plt
print('Loading training data...')
e0 = cv2.getTickCount()
# load training data
image_array = np.zeros((1, 38400))
label_array = np.zeros((1, 11), 'float')
training_data = glob.glob('training_data_temptest0407.npz')
for single_npz in training_data:
with np.load(single_npz) as data:
print(data.files)
train_temp = data['train']
train_labels_temp = data['train_labels']
print(train_temp.shape)
print(train_labels_temp.shape)
image_array = np.vstack((image_array, train_temp))
label_array = np.vstack((label_array, train_labels_temp))
train = image_array[1:, :]
train_labels = label_array[1:, :]
print(train.shape)
print(train_labels.shape)
image_array = np.zeros((1, 38400))
label_array = np.zeros((1, 11), 'float')
training_data = glob.glob('training_data_temptest0408.npz')
for single_npz in training_data:
with np.load(single_npz) as data:
print(data.files)
train_temp = data['train']
train_labels_temp = data['train_labels']
print(train_temp.shape)
print(train_labels_temp.shape)
image_array = np.vstack((image_array, train_temp))
label_array = np.vstack((label_array, train_labels_temp))
test = image_array[1:, :]
test_labels = label_array[1:, :]
print(test.shape)
print(test_labels.shape)
e00 = cv2.getTickCount()
time0 = (e00 - e0)/ cv2.getTickFrequency()
print('Loading image duration:%f'%time0)
# set start time
e1 = cv2.getTickCount()
# create MLP
layer_sizes = np.int32([38400, 50, 11]) #250:92.40 200:86.38 50:81.93
model = cv2.ml.ANN_MLP_create()
model.setLayerSizes(layer_sizes)
model.setTrainMethod(cv2.ml.ANN_MLP_BACKPROP)
model.setBackpropMomentumScale(0.01)
model.setBackpropWeightScale(0.1)
model.setTermCriteria((cv2.TERM_CRITERIA_COUNT| cv2.TERM_CRITERIA_EPS, 50, 0.001))
model.setActivationFunction(cv2.ml.ANN_MLP_SIGMOID_SYM, 0.2, 1)
#model.setActivationFunction(cv2.ml.ANN_MLP_SIGMOID_SYM, 0.5, 1)
print('Training MLP ...')
num_iter = model.train(np.float32(train), cv2.ml.ROW_SAMPLE, np.float32(train_labels))
# set end time
e2 = cv2.getTickCount()
time = (e2 - e1)/cv2.getTickFrequency()
print('Training duration:%f'% time)
# save param
model.save('mlp_mlp2.xml')
#print('Ran for %d iterations'% num_iter)
ret, resp = model.predict(train)
prediction = resp.argmax(-1)
#print('Prediction:', prediction)
true_labels = train_labels.argmax(-1)
#print('True labels:', true_labels)
number = prediction.shape[0]
print('Testing...')
train_rate0 = np.mean(prediction == true_labels)
train_rate1 = np.mean(prediction == true_labels+1)
train_rate2 = np.mean(prediction == true_labels-1)
train_rate = train_rate0 + train_rate1 + train_rate2
print('Train rate: %f:' % (train_rate0*100))
print('Train rate: %f:' % (train_rate*100))
plt.plot(range(number), prediction, 'b')
plt.plot(range(number), true_labels, 'r')
ret2, resp2 = model.predict(test)
prediction2 = resp2.argmax(-1)
#print('Prediction:', prediction)
true_test_labels = test_labels.argmax(-1)
#print('True labels:', true_labels)
number = prediction2.shape[0]
print('Testing...')
test_rate0 = np.mean(prediction2 == true_test_labels)
test_rate1 = np.mean(prediction2 == true_test_labels+1)
test_rate2 = np.mean(prediction2 == true_test_labels-1)
test_rate2 = test_rate0 + test_rate1 + test_rate2
print('Train rate: %f:' % (test_rate0*100))
print('Train rate: %f:' % (test_rate2*100))
plt.show() | {
"content_hash": "2c1a337fd60a371c0ff354a0fa0e6b08",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 86,
"avg_line_length": 30.831858407079647,
"alnum_prop": 0.6977611940298507,
"repo_name": "lurenlym/BPnetwork_smartcar",
"id": "8012810ce954fe9f87bcd8d907287e0c9c498da1",
"size": "3490",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "computer/computer_mlp_training.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "6784"
},
{
"name": "C++",
"bytes": "1013"
},
{
"name": "Python",
"bytes": "14549"
}
],
"symlink_target": ""
} |
from django import forms
from .models import Message
class MessageForm(forms.ModelForm):
class Meta:
model = Message
fields = ('name', 'email', 'message')
# Przykładowa metoda sprawdzająca
def clean_name(self):
data = self.cleaned_data['name']
if "A" not in data:
raise forms.ValidationError("Musisz mieć imię zaczynające się na 'A'!")
return data | {
"content_hash": "de49b35a0b57af20c1ad248466753965",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 83,
"avg_line_length": 27.333333333333332,
"alnum_prop": 0.6414634146341464,
"repo_name": "adrianborkowski/kurs_django",
"id": "3a1e7e050ae0077dd8f1c84ca5a94b35afa20925",
"size": "416",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "contact/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2632"
},
{
"name": "Python",
"bytes": "24816"
}
],
"symlink_target": ""
} |
import functools
def Memoize(f):
"""Decorator to cache return values of function."""
memoize_dict = {}
@functools.wraps(f)
def wrapper(*args, **kwargs):
key = repr((args, kwargs))
if key not in memoize_dict:
memoize_dict[key] = f(*args, **kwargs)
return memoize_dict[key]
return wrapper
| {
"content_hash": "aef162b5411bc2d04b32f599abc8c17d",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 53,
"avg_line_length": 24.384615384615383,
"alnum_prop": 0.6498422712933754,
"repo_name": "catapult-project/catapult",
"id": "5d2861077dab480ed87fdd155007ea4580bfa723",
"size": "480",
"binary": false,
"copies": "9",
"ref": "refs/heads/main",
"path": "devil/devil/utils/decorators.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1324"
},
{
"name": "C++",
"bytes": "46069"
},
{
"name": "CSS",
"bytes": "23376"
},
{
"name": "Dockerfile",
"bytes": "1541"
},
{
"name": "Go",
"bytes": "114396"
},
{
"name": "HTML",
"bytes": "12394298"
},
{
"name": "JavaScript",
"bytes": "1559584"
},
{
"name": "Makefile",
"bytes": "1774"
},
{
"name": "Python",
"bytes": "6778695"
},
{
"name": "Shell",
"bytes": "2288"
}
],
"symlink_target": ""
} |
from django.core.paginator import Paginator, InvalidPage, Page
from rest_framework.response import Response
from rest_framework import status
import six
STATUS_CODES = {
'created': status.HTTP_201_CREATED,
'updated': status.HTTP_200_OK,
'accepted': status.HTTP_202_ACCEPTED
}
class MultipleObjectMixin(object):
"""SQLAlchemy analog to Django's MultipleObjectMixin."""
allow_empty = True
query_object = None
paginate_by = None
paginator_class = Paginator
def filter_query_object(self, query_object):
"""Generic filtering.
This is a stub and has yet to be implemented.
"""
return query_object
def paginate_query_object(self, query_object, page_size):
"""Paginate the query object."""
paginator = self.get_paginator(
query_object, page_size,
allow_empty_first_page=self.get_allow_empty())
page = self.kwargs.get('page') or self.request.GET.get('page') or 1
try:
page_number = int(page)
except ValueError:
if page == 'last':
page_number = paginator.num_pages
else:
raise InvalidPage("Page is not 'last', "
"nor can it be converted to an int.")
# DB2 fix for invalid 0 literal.
# Generates FETCH 0 rows if not done this way
if not paginator.count == 0:
page = paginator.page(page_number)
return (paginator, page, page.object_list, page.has_other_pages())
else:
return (paginator, Page([], 1, paginator), [], False)
def get_paginate_by(self, query_object):
"""Get the number of items to paginate by. None for no pagination."""
return self.paginate_by
def get_paginator(self, query_object, per_page, orphans=0,
allow_empty_first_page=True):
"""Get a paginator instance.
The class used is overridable by setting the paginator_class
attribute.
"""
return self.paginator_class(
query_object, per_page, orphans=orphans,
allow_empty_first_page=allow_empty_first_page)
def get_allow_empty(self):
"""Returns True to display empty lists, False to 404."""
return self.allow_empty
def get_page(self, queryset):
"""Add the object list to the template context."""
page_size = self.get_paginate_by(queryset)
query_object = self.filter_query_object(queryset)
paginator, page, query_object, is_paginated = \
self.paginate_query_object(query_object, page_size)
return query_object
def make_action_method(name, methods, **kwargs):
def func(self, request, pk=None, **kwargs):
assert hasattr(request, 'DATA'), 'request object must have DATA'
' attribute'
assert hasattr(self, 'manager_class'), 'viewset must have'
' manager_class defined'
assert hasattr(self, 'manager_factory'), 'viewset must provide a'
' manager_factory method to insantiate the manager'
mgr = self.manager_factory(context={'request': request})
mgr_method = getattr(mgr, name)
resp = mgr_method(request.DATA, pk, **kwargs)
# no response returned back, assume everything is fine
if not resp:
return Response(resp, status.HTTP_200_OK)
return Response(resp, STATUS_CODES[resp['status']])
func.bind_to_methods = methods
func.kwargs = kwargs
return func
class ManagerMeta(type):
"""
Meta class to read action methods from
manager and attach them to viewset
This allows us to directly call manager methods
without writing any action methods on viewsets
"""
def __new__(cls, name, bases, attrs):
if 'manager_class' in attrs:
mgr_class = attrs['manager_class']
if hasattr(mgr_class, 'action_methods'):
for mname, methods in mgr_class.action_methods.iteritems():
attrs[mname] = make_action_method(mname.lower(), methods)
return super(ManagerMeta, cls).__new__(cls, name, bases, attrs)
class ManagerMixin(six.with_metaclass(ManagerMeta, object)):
"""
Manager mixin allows to use a manager class
to provide the actual CRUD implementation in
addition to providing action methods
Example::
class MyManager(AlchemyModelManager):
action_methods = {'my_method': ['POST']}
def my_method(self, data, pk=None, **kwargs):
# data is actual payload
return {'status': 'created'}
class MyViewSet(viewset.Viewsets, ManagerMixin):
manager_class = MyManager
"""
def manager_factory(self, *args, **kwargs):
'''
Factory method for instantiating manager class
Override to return back your instance
'''
assert hasattr(self, 'manager_class'), \
"manager_class has to be specified"
return self.manager_class(*args, **kwargs)
| {
"content_hash": "5d1eaacd7831eb7ec8948fa082bbf5e1",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 78,
"avg_line_length": 33.733333333333334,
"alnum_prop": 0.61699604743083,
"repo_name": "pombredanne/djangorest-alchemy",
"id": "b8d2e7a3db202b3287512ed5cec1cb36a0780dcf",
"size": "5085",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djangorest_alchemy/mixins.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1372"
},
{
"name": "Python",
"bytes": "40865"
}
],
"symlink_target": ""
} |
"""Tests for draft upgrade services."""
from __future__ import annotations
from core import feconf
from core import utils
from core.domain import draft_upgrade_services
from core.domain import exp_domain
from core.domain import exp_fetchers
from core.domain import exp_services
from core.domain import state_domain
from core.tests import test_utils
from typing import Dict, Final
class DraftUpgradeUnitTests(test_utils.GenericTestBase):
"""Test the draft upgrade services module."""
EXP_ID: Final = 'exp_id'
USER_ID: Final = 'user_id'
OTHER_CHANGE_LIST: Final = [exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY,
'property_name': 'title',
'new_value': 'New title'
})]
EXP_MIGRATION_CHANGE_LIST: Final = [exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_MIGRATE_STATES_SCHEMA_TO_LATEST_VERSION,
'from_version': '0',
'to_version': str(feconf.CURRENT_STATE_SCHEMA_VERSION)
})]
DRAFT_CHANGELIST: Final = [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'title',
'old_value': None,
'new_value': 'Updated title'})]
def setUp(self) -> None:
super().setUp()
self.save_new_valid_exploration(self.EXP_ID, self.USER_ID)
def test_try_upgrade_with_no_version_difference(self) -> None:
self.assertIsNone(
draft_upgrade_services.try_upgrading_draft_to_exp_version(
self.DRAFT_CHANGELIST, 1, 1, self.EXP_ID))
def test_try_upgrade_raises_exception_if_versions_are_invalid(self) -> None:
with self.assertRaisesRegex(
utils.InvalidInputException,
'Current draft version is greater than the exploration version.'):
draft_upgrade_services.try_upgrading_draft_to_exp_version(
self.DRAFT_CHANGELIST, 2, 1, self.EXP_ID)
exp_services.update_exploration(
self.USER_ID, self.EXP_ID, self.OTHER_CHANGE_LIST,
'Changed exploration title.')
exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID)
self.assertEqual(exploration.version, 2)
self.assertIsNone(
draft_upgrade_services.try_upgrading_draft_to_exp_version(
self.DRAFT_CHANGELIST, 1, exploration.version, self.EXP_ID))
def test_try_upgrade_failure_due_to_unsupported_commit_type(self) -> None:
exp_services.update_exploration(
self.USER_ID, self.EXP_ID, self.OTHER_CHANGE_LIST,
'Changed exploration title.')
exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID)
self.assertEqual(exploration.version, 2)
self.assertIsNone(
draft_upgrade_services.try_upgrading_draft_to_exp_version(
self.DRAFT_CHANGELIST, 1, exploration.version, self.EXP_ID))
def test_try_upgrade_failure_due_to_unimplemented_upgrade_methods(
self
) -> None:
exp_services.update_exploration(
self.USER_ID, self.EXP_ID, self.EXP_MIGRATION_CHANGE_LIST,
'Ran Exploration Migration job.')
exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID)
self.assertEqual(exploration.version, 2)
self.assertIsNone(
draft_upgrade_services.try_upgrading_draft_to_exp_version(
self.DRAFT_CHANGELIST, 1, exploration.version, self.EXP_ID))
class DraftUpgradeUtilUnitTests(test_utils.GenericTestBase):
"""Test the DraftUpgradeUtil module."""
EXP_ID: Final = 'exp_id'
USER_ID: Final = 'user_id'
EXP_MIGRATION_CHANGE_LIST: Final = [exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_MIGRATE_STATES_SCHEMA_TO_LATEST_VERSION,
'from_version': '36',
'to_version': '37'
})]
def create_and_migrate_new_exploration(
self,
current_schema_version: str,
target_schema_version: str
) -> None:
"""Creates an exploration and applies a state schema migration to it.
Creates an exploration and migrates its state schema from version
current_schema_version to target_schema_version. Asserts that the
exploration was successfully migrated.
Args:
current_schema_version: string. The current schema version of the
exploration (eg. '29').
target_schema_version: string. The schema version to upgrade
the exploration to (eg. '30').
"""
# Create an exploration change list with the command that will migrate
# the schema from current_schema_version to target_schema_version.
exp_migration_change_list = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_MIGRATE_STATES_SCHEMA_TO_LATEST_VERSION,
'from_version': current_schema_version,
'to_version': target_schema_version
})
]
# The migration will automatically migrate the exploration to the latest
# state schema version, so we set the latest schema version to be the
# target_schema_version.
with self.swap(
feconf, 'CURRENT_STATE_SCHEMA_VERSION', int(target_schema_version)):
# Create and migrate the exploration.
self.save_new_valid_exploration(self.EXP_ID, self.USER_ID)
exp_services.update_exploration(
self.USER_ID, self.EXP_ID, exp_migration_change_list,
'Ran Exploration Migration job.')
# Assert that the update was applied and that the exploration state
# schema was successfully updated.
exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID)
self.assertEqual(exploration.version, 2)
self.assertEqual(
str(exploration.states_schema_version), target_schema_version)
def test_convert_to_latest_schema_version_implemented(self) -> None:
state_schema_version = feconf.CURRENT_STATE_SCHEMA_VERSION
conversion_fn_name = '_convert_states_v%s_dict_to_v%s_dict' % (
state_schema_version - 1, state_schema_version)
self.assertTrue(
hasattr(
draft_upgrade_services.DraftUpgradeUtil, conversion_fn_name),
msg='Current schema version is %d but DraftUpgradeUtil.%s is '
'unimplemented.' % (state_schema_version, conversion_fn_name))
def test_convert_states_v53_dict_to_v54_dict(self) -> None:
draft_change_list_v53 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'TextInput'
})
]
# Migrate exploration to state schema version 54.
self.create_and_migrate_new_exploration('53', '54')
migrated_draft_change_list_v54 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v53, 1, 2, self.EXP_ID)
)
# Ruling out the possibility of None for mypy type checking.
assert migrated_draft_change_list_v54 is not None
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
draft_change_list_v52_dict_list = [
change.to_dict() for change in draft_change_list_v53
]
migrated_draft_change_list_v53_dict_list = [
change.to_dict() for change in migrated_draft_change_list_v54
]
self.assertEqual(
draft_change_list_v52_dict_list,
migrated_draft_change_list_v53_dict_list)
def test_convert_states_v52_dict_to_v53_dict(self) -> None:
ans_group_1 = state_domain.AnswerGroup(
state_domain.Outcome(
'state_name', None, state_domain.SubtitledHtml(
'feedback_1', '<p>Try again</p>'),
True, [], 'Not None', None),
[
state_domain.RuleSpec(
'Contains',
{
'x': {
'contentId': 'rule_input_Equals',
'normalizedStrSet': ['Test']
}
})
],
[],
'Not None'
).to_dict()
ans_group_2 = state_domain.AnswerGroup(
state_domain.Outcome(
'state_name',
None,
state_domain.SubtitledHtml('feedback_1', '<p>Feedback</p>'),
False,
[],
None,
None
),
[],
[],
None
).to_dict()
interaction_answer_groups = [
ans_group_1,
ans_group_2
]
draft_change_list_v52_1 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'state_name',
'property_name': (
exp_domain.STATE_PROPERTY_INTERACTION_ANSWER_GROUPS),
'new_value': interaction_answer_groups
})
]
draft_change_list_v52_2 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'New state',
'old_value': state_domain.SubtitledHtml(
'content', '').to_dict(),
'new_value': state_domain.SubtitledHtml(
'content',
'<oppia-noninteractive-image filepath-with-value='
'""abc.png"" caption-with-value=""'
'""></oppia-noninteractive-image>'
).to_dict()
})
]
draft_change_list_v52_3 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'New state',
'old_value': state_domain.SubtitledHtml(
'content', '').to_dict(),
'new_value': state_domain.SubtitledHtml(
'content', (
'<oppia-noninteractive-tabs tab_contents-with-value=\"'
'[{&quot;title&quot;:&quot;Title1&'
'quot;,&quot;content&quot;:&quot;&lt;p'
'&gt;Content1&lt;/p&gt;&quot;},'
'{&quot;title&quot;:&quot;Title2&quot;'
',&quot;content&quot;:&quot;&lt;p&'
'gt;Content2&lt;/p&gt;&lt;'
'oppia-noninteractive-image filepath-with-value=\\'
'&quot;&amp;amp;quot;s7TabImage.png&amp;'
'amp;quot;\\&quot;&gt;&lt;/'
'oppia-noninteractive-image&gt;&quot;}]\">'
'</oppia-noninteractive-tabs>')).to_dict()
}
)
]
draft_change_list_v52_4 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': (
exp_domain.STATE_PROPERTY_WRITTEN_TRANSLATIONS),
'state_name': 'New state',
'old_value': state_domain.WrittenTranslations({
'content': {
'en': state_domain.WrittenTranslation(
'html', '', False)
}
}).to_dict(),
'new_value': state_domain.WrittenTranslations({
'content': {
'en': state_domain.WrittenTranslation(
'html',
(
'<oppia-noninteractive-image '
'filepath-with-value=""abc.png"" '
'caption-with-value="""">'
'</oppia-noninteractive-image>'
),
True
)
}
}).to_dict()
})
]
draft_change_list_v52_5 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY,
'property_name': 'auto_tts_enabled',
'new_value': True,
})
]
draft_change_list_v52_6 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': (
exp_domain.STATE_PROPERTY_WRITTEN_TRANSLATIONS),
'state_name': 'New state',
'old_value': state_domain.WrittenTranslations({
'content': {
'en': state_domain.WrittenTranslation(
'html', '', False)
}
}).to_dict(),
'new_value': state_domain.WrittenTranslations({
'content': {
'en': state_domain.WrittenTranslation(
'html', ['content'], True
)
}
}).to_dict()
})
]
self.create_and_migrate_new_exploration('52', '53')
migrated_draft_change_list_v53_1 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v52_1, 1, 2, self.EXP_ID)
)
assert migrated_draft_change_list_v53_1 is None
migrated_draft_change_list_v53_2 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v52_2, 1, 2, self.EXP_ID)
)
assert migrated_draft_change_list_v53_2 is not None
self.assertEqual(
[change.to_dict() for change in draft_change_list_v52_2],
[change.to_dict() for change in migrated_draft_change_list_v53_2]
)
migrated_draft_change_list_v53_3 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v52_3, 1, 2, self.EXP_ID)
)
assert migrated_draft_change_list_v53_3 is not None
self.assertEqual(
[change.to_dict() for change in draft_change_list_v52_3],
[change.to_dict() for change in migrated_draft_change_list_v53_3]
)
migrated_draft_change_list_v53_4 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v52_4, 1, 2, self.EXP_ID)
)
assert migrated_draft_change_list_v53_4 is not None
self.assertEqual(
[change.to_dict() for change in draft_change_list_v52_4],
[change.to_dict() for change in migrated_draft_change_list_v53_4]
)
migrated_draft_change_list_v53_5 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v52_5, 1, 2, self.EXP_ID)
)
assert migrated_draft_change_list_v53_5 is not None
self.assertEqual(
[change.to_dict() for change in draft_change_list_v52_5],
[change.to_dict() for change in migrated_draft_change_list_v53_5]
)
migrated_draft_change_list_v53_6 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v52_6, 1, 2, self.EXP_ID)
)
assert migrated_draft_change_list_v53_6 is None
def test_convert_states_v51_dict_to_v52_dict(self) -> None:
draft_change_list_v51_1 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'new value'
})
]
draft_change_list_v51_2 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'next_content_id_index',
'new_value': 'new value'
})
]
draft_change_list_v51_3 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION,
'state_name': 'Intro',
'content_id': 'content_id',
'language_code': 'en',
'content_html': 'content',
'translation_html': 'content',
'data_format': 'format_1',
})
]
self.create_and_migrate_new_exploration('51', '52')
migrated_draft_change_list_v52_1 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v51_1, 1, 2, self.EXP_ID)
)
assert migrated_draft_change_list_v52_1 is not None
self.assertEqual(
[change.to_dict() for change in draft_change_list_v51_1],
[change.to_dict() for change in migrated_draft_change_list_v52_1]
)
migrated_draft_change_list_v52_2 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v51_2, 1, 2, self.EXP_ID)
)
self.assertIsNone(migrated_draft_change_list_v52_2)
migrated_draft_change_list_v52_3 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v51_3, 1, 2, self.EXP_ID)
)
self.assertIsNone(migrated_draft_change_list_v52_3)
def test_convert_states_v50_dict_to_v51_dict(self) -> None:
draft_change_list_v50 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'new value'
})
]
# Migrate exploration to state schema version 51.
self.create_and_migrate_new_exploration('50', '51')
migrated_draft_change_list_v51 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v50, 1, 2, self.EXP_ID)
)
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
# Ruling out the possibility of None for mypy type checking.
assert migrated_draft_change_list_v51 is not None
draft_change_list_v50_dict_list = [
change.to_dict() for change in draft_change_list_v50
]
migrated_draft_change_list_v51_dict_list = [
change.to_dict() for change in migrated_draft_change_list_v51
]
self.assertEqual(
draft_change_list_v50_dict_list,
migrated_draft_change_list_v51_dict_list)
def test_convert_states_v49_dict_to_v50_dict(self) -> None:
draft_change_list_v49 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'new value'
})
]
# Migrate exploration to state schema version 49.
self.create_and_migrate_new_exploration('49', '50')
migrated_draft_change_list_v50 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v49, 1, 2, self.EXP_ID)
)
# Ruling out the possibility of None for mypy type checking.
assert migrated_draft_change_list_v50 is not None
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
draft_change_list_v49_dict_list = [
change.to_dict() for change in draft_change_list_v49
]
migrated_draft_change_list_v50_dict_list = [
change.to_dict() for change in migrated_draft_change_list_v50
]
self.assertEqual(
draft_change_list_v49_dict_list,
migrated_draft_change_list_v50_dict_list)
def test_convert_states_v48_dict_to_v49_dict(self) -> None:
draft_change_list_v48 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'NumericInput'
})
]
# Migrate exploration to state schema version 48.
self.create_and_migrate_new_exploration('48', '49')
migrated_draft_change_list_v49 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v48, 1, 2, self.EXP_ID)
)
# Ruling out the possibility of None for mypy type checking.
assert migrated_draft_change_list_v49 is not None
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
draft_change_list_v48_dict_list = [
change.to_dict() for change in draft_change_list_v48
]
migrated_draft_change_list_v49_dict_list = [
change.to_dict() for change in migrated_draft_change_list_v49
]
self.assertEqual(
draft_change_list_v48_dict_list,
migrated_draft_change_list_v49_dict_list)
def test_convert_states_v47_dict_to_v48_dict(self) -> None:
draft_change_list_v47 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': state_domain.SubtitledHtml(
'content',
'<p>àãáâäåæçèéêëìíîïóúûýö÷üÕÇÖÑÓÄÀÜ×ßğīĻıİćęąĀ<p>'
'<p>İźžśşɛمшصحếở“∉⅘√∈◯–⅖⅔≤€やんもをり北木我是西错õ</p>'
'<p>üóäüñıīç×÷öóûؤ¡´</p>'
'<p>😕😊😉🙄🙂😊🙂💡😑😊🔖😉😃🤖📷😂📀💿💯💡</p>'
'<p>👋😱😑😊🎧🎙🎼📻🤳👌🚦🤗😄👉📡📣📢🔊²</p>'
).to_dict()
}), exp_domain.ExplorationChange({
'cmd': 'edit_state_property',
'state_name': 'Intro',
'property_name': 'widget_customization_args',
'new_value': {
'choices': {
'value': [
state_domain.SubtitledHtml(
'ca_choices_0',
'<p>àãáâäåæçèéêëìíîïóúûýö÷üÕÇÖÑÓÄÀÜ×ßğīĻıİć<p>'
'<p>İźžśşɛمшصحếở“∉⅘√∈◯–⅖⅔≤ęąĀ€やんもをり</p>'
'<p>üóäüñıīç×÷öóûؤ¡北木我是西错õ´😕😊😉</p>'
'<p>🙄🙂😊🙂💡😑😊🔖😉😃🤖📷😂📀💿💯💡</p>'
'<p>👋😱😑😊🎧🎙🎼📻🤳👌🚦🤗😄👉📡📣📢🔊²</p>'
).to_dict()
]
},
'showChoicesInShuffledOrder': {'value': True}
}
})
]
# Migrate exploration to state schema version 48.
self.create_and_migrate_new_exploration('47', '48')
migrated_draft_change_list_v48 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v47, 1, 2, self.EXP_ID)
)
# Ruling out the possibility of None for mypy type checking.
assert migrated_draft_change_list_v48 is not None
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
draft_change_list_v47_dict_list = [
change.to_dict() for change in draft_change_list_v47
]
migrated_draft_change_list_v48_dict_list = [
change.to_dict() for change in migrated_draft_change_list_v48
]
self.assertEqual(
draft_change_list_v47_dict_list,
migrated_draft_change_list_v48_dict_list)
def test_convert_states_v46_dict_to_v47_dict(self) -> None:
draft_change_list_v46 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': state_domain.SubtitledHtml(
'content',
'<oppia-noninteractive-svgdiagram '
'svg_filename-with-value="&quot;img12.svg&quot;"'
' alt-with-value="&quot;Image&quot;">'
'</oppia-noninteractive-svgdiagram>'
'<oppia-noninteractive-svgdiagram '
'svg_filename-with-value="&quot;img2.svg&quot;"'
' alt-with-value="&quot;Image123&quot;">'
'</oppia-noninteractive-svgdiagram>'
'<oppia-noninteractive-svgdiagram '
'alt-with-value="&quot;Image12345&quot;"'
' svg_filename-with-value="&quot;igage.svg&quot;">'
'</oppia-noninteractive-svgdiagram>'
).to_dict()
}), exp_domain.ExplorationChange({
'cmd': 'edit_state_property',
'state_name': 'Intro',
'property_name': 'widget_customization_args',
'new_value': {
'choices': {
'value': [
state_domain.SubtitledHtml(
'ca_choices_0',
'<oppia-noninteractive-svgdiagram '
'svg_filename-with-value="&quot;'
'img12.svg&quot;" alt-with-value="'
'&quot;Image&quot;">'
'</oppia-noninteractive-svgdiagram>'
).to_dict()
]
},
'showChoicesInShuffledOrder': {'value': True}
}
})
]
# Migrate exploration to state schema version 47.
self.create_and_migrate_new_exploration('46', '47')
migrated_draft_change_list_v47 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v46, 1, 2, self.EXP_ID)
)
# Ruling out the possibility of None for mypy type checking.
assert migrated_draft_change_list_v47 is not None
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
draft_change_list_v46_dict_list = [
change.to_dict() for change in draft_change_list_v46
]
migrated_draft_change_list_v47_dict_list = [
change.to_dict() for change in migrated_draft_change_list_v47
]
self.assertEqual(
draft_change_list_v46_dict_list,
migrated_draft_change_list_v47_dict_list)
def test_convert_states_v45_dict_to_v46_dict(self) -> None:
draft_change_list_v45 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'new value'
})
]
# Migrate exploration to state schema version 46.
self.create_and_migrate_new_exploration('45', '46')
migrated_draft_change_list_v46 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v45, 1, 2, self.EXP_ID)
)
# Ruling out the possibility of None for mypy type checking.
assert migrated_draft_change_list_v46 is not None
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
draft_change_list_v45_dict_list = [
change.to_dict() for change in draft_change_list_v45
]
migrated_draft_change_list_v46_dict_list = [
change.to_dict() for change in migrated_draft_change_list_v46
]
self.assertEqual(
draft_change_list_v45_dict_list,
migrated_draft_change_list_v46_dict_list)
def test_convert_states_v44_dict_to_v45_dict(self) -> None:
draft_change_list_v44 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'new value'
})
]
# Migrate exploration to state schema version 45.
self.create_and_migrate_new_exploration('44', '45')
migrated_draft_change_list_v45 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v44, 1, 2, self.EXP_ID)
)
# Ruling out the possibility of None for mypy type checking.
assert migrated_draft_change_list_v45 is not None
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
draft_change_list_v44_dict_list = [
change.to_dict() for change in draft_change_list_v44
]
migrated_draft_change_list_v45_dict_list = [
change.to_dict() for change in migrated_draft_change_list_v45
]
self.assertEqual(
draft_change_list_v44_dict_list,
migrated_draft_change_list_v45_dict_list)
def test_convert_states_v43_dict_to_v44_dict(self) -> None:
draft_change_list_v43 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Introduction',
'property_name': 'content',
'new_value': 'new value'
})
]
# Migrate exploration to state schema version 44.
self.create_and_migrate_new_exploration('43', '44')
# Migrate the draft change list's state schema to the migrated
# exploration's schema. In this case there are no change to the
# draft change list since version 44 adds the
# card_is_checkpoint boolean variable to the exploration
# state, for which there should be no changes to drafts.
migrated_draft_change_list_v44 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v43, 1, 2, self.EXP_ID)
)
# Ruling out the possibility of None for mypy type checking.
assert migrated_draft_change_list_v44 is not None
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
draft_change_list_v43_dict_list = [
change.to_dict() for change in draft_change_list_v43
]
migrated_draft_change_list_v44_dict_list = [
change.to_dict() for change in migrated_draft_change_list_v44
]
self.assertEqual(
draft_change_list_v43_dict_list,
migrated_draft_change_list_v44_dict_list)
def test_convert_states_v42_dict_to_v43_dict(self) -> None:
new_value: Dict[str, str] = {}
draft_change_list_1_v42 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'new value'
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'widget_id',
'new_value': 'MathExpressionInput'
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'answer_groups',
'new_value': new_value
})
]
draft_change_list_2_v42 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'new value'
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'widget_id',
'new_value': 'MathExpressionInput'
})
]
# Migrate exploration to state schema version 43.
self.create_and_migrate_new_exploration('42', '43')
migrated_draft_change_list_1_v43 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_1_v42, 1, 2, self.EXP_ID))
# Verify that changes which include answer groups are
# not upgraded to v42.
self.assertIsNone(migrated_draft_change_list_1_v43)
migrated_draft_change_list_2_v43 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_2_v42, 1, 2, self.EXP_ID))
# Ruling out the possibility of None for mypy type checking.
assert migrated_draft_change_list_2_v43 is not None
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
draft_change_list_2_v42_dict_list = [
change.to_dict() for change in draft_change_list_2_v42
]
migrated_draft_change_list_2_v43_dict_list = [
change.to_dict() for change in migrated_draft_change_list_2_v43
]
# Verify that changes which do not include answer groups can
# be upgraded to v43.
self.assertEqual(
draft_change_list_2_v42_dict_list,
migrated_draft_change_list_2_v43_dict_list)
def test_convert_states_v41_dict_to_v42_dict(self) -> None:
new_value: Dict[str, str] = {}
draft_change_list_1_v41 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'new value'
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'widget_id',
'new_value': 'MathExpressionInput'
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'answer_groups',
'new_value': new_value
})
]
draft_change_list_2_v41 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'new value'
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'widget_id',
'new_value': 'MathExpressionInput'
})
]
# Migrate exploration to state schema version 42.
self.create_and_migrate_new_exploration('41', '42')
migrated_draft_change_list_1_v42 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_1_v41, 1, 2, self.EXP_ID))
# Verify that changes which include answer groups are
# not upgraded to v41.
self.assertIsNone(migrated_draft_change_list_1_v42)
migrated_draft_change_list_2_v42 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_2_v41, 1, 2, self.EXP_ID))
# Ruling out the possibility of None for mypy type checking.
assert migrated_draft_change_list_2_v42 is not None
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
draft_change_list_2_v41_dict_list = [
change.to_dict() for change in draft_change_list_2_v41
]
migrated_draft_change_list_2_v42_dict_list = [
change.to_dict() for change in migrated_draft_change_list_2_v42
]
# Verify that changes which do not include answer groups can
# be upgraded to v42.
self.assertEqual(
draft_change_list_2_v41_dict_list,
migrated_draft_change_list_2_v42_dict_list)
def test_convert_states_v40_dict_to_v41_dict(self) -> None:
new_value: Dict[str, str] = {}
draft_change_list_1_v40 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'new value'
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'widget_id',
'new_value': 'MathExpressionInput'
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'answer_groups',
'new_value': new_value
})
]
draft_change_list_2_v40 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'new value'
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'widget_id',
'new_value': 'MathExpressionInput'
})
]
# Migrate exploration to state schema version 41.
self.create_and_migrate_new_exploration('40', '41')
migrated_draft_change_list_1_v41 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_1_v40, 1, 2, self.EXP_ID))
# Verify that changes which include answer groups are
# not upgraded to v41.
self.assertIsNone(migrated_draft_change_list_1_v41)
migrated_draft_change_list_2_v41 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_2_v40, 1, 2, self.EXP_ID))
# Ruling out the possibility of None for mypy type checking.
assert migrated_draft_change_list_2_v41 is not None
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
draft_change_list_2_v40_dict_list = [
change.to_dict() for change in draft_change_list_2_v40
]
migrated_draft_change_list_2_v41_dict_list = [
change.to_dict() for change in migrated_draft_change_list_2_v41
]
# Verify that changes which do not include answer groups can
# be upgraded to v41.
self.assertEqual(
draft_change_list_2_v40_dict_list,
migrated_draft_change_list_2_v41_dict_list)
def test_convert_states_v39_dict_to_v40_dict(self) -> None:
new_value: Dict[str, str] = {}
draft_change_list_1_v39 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'new value'
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'widget_id',
'new_value': 'MathExpressionInput'
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'widget_customization_args',
'new_value': new_value
})
]
draft_change_list_2_v39 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'new value'
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'widget_id',
'new_value': 'MathExpressionInput'
})
]
# Migrate exploration to state schema version 40.
self.create_and_migrate_new_exploration('39', '40')
migrated_draft_change_list_1_v40 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_1_v39, 1, 2, self.EXP_ID))
# Verify that changes which include customization arguments are
# not upgraded to v40.
self.assertIsNone(migrated_draft_change_list_1_v40)
migrated_draft_change_list_2_v40 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_2_v39, 1, 2, self.EXP_ID))
# Ruling out the possibility of None for mypy type checking.
assert migrated_draft_change_list_2_v40 is not None
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
draft_change_list_2_v39_dict_list = [
change.to_dict() for change in draft_change_list_2_v39
]
migrated_draft_change_list_2_v40_dict_list = [
change.to_dict() for change in migrated_draft_change_list_2_v40
]
# Verify that changes which do not include customization arguments can
# be upgraded to v40.
self.assertEqual(
draft_change_list_2_v39_dict_list,
migrated_draft_change_list_2_v40_dict_list)
def test_convert_states_v38_dict_to_v39_dict(self) -> None:
draft_change_list_v38 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'new value'
})
]
# Migrate exploration to state schema version 39.
self.create_and_migrate_new_exploration('38', '39')
# Migrate the draft change list's state schema to the migrated
# exploration's schema. In this case there are no changes to the
# draft change list since version 39 adds a customization arg
# for modifying the placeholder text in the Numeric Expression Input
# interaction, for which there should be no changes to drafts.
migrated_draft_change_list_v39 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v38, 1, 2, self.EXP_ID)
)
# Ruling out the possibility of None for mypy type checking.
assert migrated_draft_change_list_v39 is not None
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
draft_change_list_v38_dict_list = [
change.to_dict() for change in draft_change_list_v38
]
migrated_draft_change_list_v39_dict_list = [
change.to_dict() for change in migrated_draft_change_list_v39
]
self.assertEqual(
draft_change_list_v38_dict_list,
migrated_draft_change_list_v39_dict_list)
def test_convert_states_v37_dict_to_v38_dict(self) -> None:
draft_change_list_v37 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'new value'
})
]
# Migrate exploration to state schema version 38.
self.create_and_migrate_new_exploration('37', '38')
# Migrate the draft change list's state schema to the migrated
# exploration's schema. In this case there are no changes to the
# draft change list since version 38 adds a customization arg
# for the "Add" button text in SetInput interaction for the
# exploration, for which there should be no changes to drafts.
migrated_draft_change_list_v38 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v37, 1, 2, self.EXP_ID)
)
# Ruling out the possibility of None for mypy type checking.
assert migrated_draft_change_list_v38 is not None
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
draft_change_list_v37_dict_list = [
change.to_dict() for change in draft_change_list_v37
]
migrated_draft_change_list_v38_dict_list = [
change.to_dict() for change in migrated_draft_change_list_v38
]
self.assertEqual(
draft_change_list_v37_dict_list,
migrated_draft_change_list_v38_dict_list)
def test_convert_states_v36_dict_to_v37_dict(self) -> None:
draft_change_list_v36 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'new value'
}),
exp_domain.ExplorationChange({
'cmd': 'edit_state_property',
'state_name': 'Intro',
'property_name': 'answer_groups',
'new_value': [{
'rule_specs': [{
'rule_type': 'CaseSensitiveEquals',
'inputs': {
'x': 'test'
}
}],
'outcome': {
'dest': 'Introduction',
'feedback': {
'content_id': 'feedback',
'html': '<p>Content</p>'
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_skill_misconception_id': None
}]
})
]
draft_change_list_v37 = [
exp_domain.ExplorationChange({
'cmd': 'edit_state_property',
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'new value'
}),
exp_domain.ExplorationChange({
'cmd': 'edit_state_property',
'state_name': 'Intro',
'property_name': 'answer_groups',
'new_value': [{
'rule_specs': [{
'rule_type': 'Equals',
'inputs': {
'x': 'test'
}
}],
'outcome': {
'dest': 'Introduction',
'feedback': {
'content_id': 'feedback',
'html': '<p>Content</p>'
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_skill_misconception_id': None
}]
})
]
# Migrate exploration to state schema version 37.
self.create_and_migrate_new_exploration('36', '37')
migrated_draft_change_list_v37 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v36, 1, 2, self.EXP_ID))
# Ruling out the possibility of None for mypy type checking.
assert migrated_draft_change_list_v37 is not None
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
draft_change_list_v37_dict_list = [
change.to_dict() for change in draft_change_list_v37
]
migrated_draft_change_list_v37_dict_list = [
change.to_dict() for change in migrated_draft_change_list_v37
]
self.assertEqual(
draft_change_list_v37_dict_list,
migrated_draft_change_list_v37_dict_list)
def test_convert_states_v35_dict_to_v36_dict(self) -> None:
new_value: Dict[str, str] = {}
draft_change_list_1_v35 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'new value'
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'widget_id',
'new_value': 'MathExpressionInput'
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'widget_customization_args',
'new_value': new_value
})
]
draft_change_list_2_v35 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'new value'
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'widget_id',
'new_value': 'MathExpressionInput'
})
]
# Migrate exploration to state schema version 36.
self.create_and_migrate_new_exploration('35', '36')
migrated_draft_change_list_1_v36 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_1_v35, 1, 2, self.EXP_ID))
self.assertIsNone(migrated_draft_change_list_1_v36)
migrated_draft_change_list_2_v36 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_2_v35, 1, 2, self.EXP_ID))
# Ruling out the possibility of None for mypy type checking.
assert migrated_draft_change_list_2_v36 is not None
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
draft_change_list_2_v35_dict_list = [
change.to_dict() for change in draft_change_list_2_v35
]
migrated_draft_change_list_2_v36_dict_list = [
change.to_dict() for change in migrated_draft_change_list_2_v36
]
self.assertEqual(
draft_change_list_2_v35_dict_list,
migrated_draft_change_list_2_v36_dict_list)
def test_convert_states_v34_dict_to_v35_dict(self) -> None:
draft_change_list_1_v34 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'new value'
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'widget_id',
'new_value': 'MathExpressionInput'
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'answer_groups',
'new_value': [{
'rule_specs': [{
'rule_type': 'IsMathematicallyEquivalentTo',
'inputs': {
'x': 'x+y/2'
}
}],
'outcome': {
'dest': 'Introduction',
'feedback': {
'content_id': 'feedback',
'html': '<p>Content</p>'
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_skill_misconception_id': None
}]
})
]
draft_change_list_2_v34 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'new value'
})
]
# Migrate exploration to state schema version 35.
self.create_and_migrate_new_exploration('34', '35')
migrated_draft_change_list_1_v35 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_1_v34, 1, 2, self.EXP_ID))
self.assertIsNone(migrated_draft_change_list_1_v35)
migrated_draft_change_list_2_v35 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_2_v34, 1, 2, self.EXP_ID))
# Ruling out the possibility of None for mypy type checking.
assert migrated_draft_change_list_2_v35 is not None
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
draft_change_list_2_v34_dict_list = [
change.to_dict() for change in draft_change_list_2_v34
]
migrated_draft_change_list_2_v35_dict_list = [
change.to_dict() for change in migrated_draft_change_list_2_v35
]
self.assertEqual(
draft_change_list_2_v34_dict_list,
migrated_draft_change_list_2_v35_dict_list)
def test_convert_states_v33_dict_to_v34_dict(self) -> None:
html_content = (
'<p>Value</p><oppia-noninteractive-math raw_latex-with-value="&a'
'mp;quot;+,-,-,+&quot;"></oppia-noninteractive-math>')
expected_html_content = (
'<p>Value</p><oppia-noninteractive-math math_content-with-value='
'"{&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, &'
'amp;quot;svg_filename&quot;: &quot;&quot;}"></oppia'
'-noninteractive-math>')
draft_change_list = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'state2',
'property_name': 'widget_customization_args',
'new_value': {
'choices': {
'value': [
'<p>1</p>',
'<p>2</p>',
html_content,
'<p>4</p>'
]
},
'maxAllowableSelectionCount': {
'value': 1
},
'minAllowableSelectionCount': {
'value': 1
}
}
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': 'answer_groups',
'state_name': 'State 1',
'new_value': [{
'rule_specs': [{
'rule_type': 'Equals',
'inputs': {
'x': [html_content]
}
}, {
'rule_type': 'ContainsAtLeastOneOf',
'inputs': {
'x': [html_content]
}
}, {
'rule_type': 'IsProperSubsetOf',
'inputs': {
'x': [html_content]
}
}, {
'rule_type': 'DoesNotContainAtLeastOneOf',
'inputs': {
'x': [html_content]
}
}, {
'rule_type': 'Equals',
'inputs': {
'x': 1
}
}, {
'rule_type': 'HasElementXAtPositionY',
'inputs': {
'x': html_content,
'y': 2
}
}, {
'rule_type': 'IsEqualToOrdering',
'inputs': {
'x': [[html_content]]
}
}, {
'rule_type': 'HasElementXBeforeElementY',
'inputs': {
'x': html_content,
'y': html_content
}
}, {
'rule_type': (
'IsEqualToOrderingWithOneItemAtIncorrectPosition'),
'inputs': {
'x': [[html_content]]
}
}],
'outcome': {
'dest': 'Introduction',
'feedback': {
'content_id': 'feedback',
'html': html_content
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_skill_misconception_id': None
}]
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': {
'content_id': 'content',
'html': html_content
}
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'written_translations',
'new_value': {
'translations_mapping': {
# Here we use MyPy ignore because we are testing convert
# function and in convert function we are working with
# previous versions of the domain object and in previous
# versions of the domain object there are some fields
# (eg: html) that are discontinued in the latest domain
# object. So, while defining these old keys MyPy throw
# an error. To avoid the error, we used ignore here.
'content1': {
'en': { # type: ignore[typeddict-item]
'html': html_content,
'needs_update': True
},
# Here we use MyPy ignore because here we are
# defining 'html' key that was deprecated from
# the latest domain object and causing MyPy to
# throw an error. Thus, to silence the error,
# we used ignore here.
'hi': { # type: ignore[typeddict-item]
'html': 'Hey!',
'needs_update': False
}
},
'feedback_1': {
# Here we use MyPy ignore because here we are
# defining 'html' key that was deprecated from
# the latest domain object and causing MyPy to
# throw an error. Thus, to silence the error,
# we used ignore here.
'hi': { # type: ignore[typeddict-item]
'html': html_content,
'needs_update': False
},
# Here we use MyPy ignore because here we are
# defining 'html' key that was deprecated from
# the latest domain object and causing MyPy to
# throw an error. Thus, to silence the error,
# we used ignore here.
'en': { # type: ignore[typeddict-item]
'html': 'hello!',
'needs_update': False
}
}
}
}
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'solution',
'new_value': {
'answer_is_exclusive': False,
'correct_answer': 'helloworld!',
'explanation': {
'content_id': 'solution',
'html': html_content
},
}
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'solution',
'new_value': {
'answer_is_exclusive': True,
'correct_answer': [
[html_content],
['<p>2</p>'],
['<p>3</p>'],
['<p>4</p>']
],
'explanation': {
'content_id': 'solution',
'html': '<p>This is solution for state1</p>'
}
}
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'default_outcome',
'new_value': {
'param_changes': [],
'feedback': {
'content_id': 'default_outcome',
'html': html_content
},
'dest': 'Introduction',
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None,
'labelled_as_correct': False
}
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'hints',
'new_value': [{
'hint_content': {
'content_id': 'hint1',
'html': html_content
}
}]
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_RENAME_STATE,
'old_state_name': 'Intro',
'new_state_name': 'Introduction',
})
]
self.create_and_migrate_new_exploration('33', '34')
migrated_draft_change_list = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list, 1, 2, self.EXP_ID))
# Ruling out the possibility of None for mypy type checking.
assert migrated_draft_change_list is not None
self.assertEqual(
migrated_draft_change_list[0].to_dict(),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'state2',
'property_name': 'widget_customization_args',
'new_value': {
'choices': {
'value': [
'<p>1</p>',
'<p>2</p>',
expected_html_content,
'<p>4</p>'
]
},
'maxAllowableSelectionCount': {
'value': 1
},
'minAllowableSelectionCount': {
'value': 1
}
}
}).to_dict())
self.assertEqual(
migrated_draft_change_list[1].to_dict(),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': 'answer_groups',
'state_name': 'State 1',
'new_value': [{
'rule_specs': [{
'rule_type': 'Equals',
'inputs': {
'x': [expected_html_content]
}
}, {
'rule_type': 'ContainsAtLeastOneOf',
'inputs': {
'x': [expected_html_content]
}
}, {
'rule_type': 'IsProperSubsetOf',
'inputs': {
'x': [expected_html_content]
}
}, {
'rule_type': 'DoesNotContainAtLeastOneOf',
'inputs': {
'x': [expected_html_content]
}
}, {
'rule_type': 'Equals',
'inputs': {
'x': 1
}
}, {
'rule_type': 'HasElementXAtPositionY',
'inputs': {
'x': expected_html_content,
'y': 2
}
}, {
'rule_type': 'IsEqualToOrdering',
'inputs': {
'x': [[expected_html_content]]
}
}, {
'rule_type': 'HasElementXBeforeElementY',
'inputs': {
'x': expected_html_content,
'y': expected_html_content
}
}, {
'rule_type': (
'IsEqualToOrderingWithOneItemAtIncorrectPosition'),
'inputs': {
'x': [[expected_html_content]]
}
}],
'outcome': {
'dest': 'Introduction',
'feedback': {
'content_id': 'feedback',
'html': expected_html_content
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_skill_misconception_id': None
}]
}).to_dict())
self.assertEqual(
migrated_draft_change_list[2].to_dict(),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': {
'content_id': 'content',
'html': expected_html_content
}
}).to_dict())
self.assertEqual(
migrated_draft_change_list[3].to_dict(),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'written_translations',
'new_value': {
'translations_mapping': {
# Here we use MyPy ignore because we are testing convert
# function and in convert function we are working with
# previous versions of the domain object and in previous
# versions of the domain object there are some fields
# (eg: html) that are discontinued in the latest domain
# object. So, while defining these old keys MyPy throw
# an error. To avoid the error, we used ignore here.
'content1': {
'en': { # type: ignore[typeddict-item]
'html': expected_html_content,
'needs_update': True
},
# Here we use MyPy ignore because here we are
# defining 'html' key that was deprecated from
# the latest domain object and causing MyPy to
# throw an error. Thus, to silence the error,
# we used ignore here.
'hi': { # type: ignore[typeddict-item]
'html': 'Hey!',
'needs_update': False
}
},
'feedback_1': {
# Here we use MyPy ignore because here we are
# defining 'html' key that was deprecated from
# the latest domain object and causing MyPy to
# throw an error. Thus, to silence the error,
# we used ignore here.
'hi': { # type: ignore[typeddict-item]
'html': expected_html_content,
'needs_update': False
},
# Here we use MyPy ignore because here we are
# defining 'html' key that was deprecated from
# the latest domain object and causing MyPy to
# throw an error. Thus, to silence the error,
# we used ignore here.
'en': { # type: ignore[typeddict-item]
'html': 'hello!',
'needs_update': False
}
}
}
}
}).to_dict())
self.assertEqual(
migrated_draft_change_list[4].to_dict(),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'solution',
'new_value': {
'answer_is_exclusive': False,
'correct_answer': 'helloworld!',
'explanation': {
'content_id': 'solution',
'html': expected_html_content
},
}
}).to_dict())
self.assertEqual(
migrated_draft_change_list[5].to_dict(),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'solution',
'new_value': {
'answer_is_exclusive': True,
'correct_answer': [
[expected_html_content],
['<p>2</p>'],
['<p>3</p>'],
['<p>4</p>']
],
'explanation': {
'content_id': 'solution',
'html': '<p>This is solution for state1</p>'
}
}
}).to_dict())
self.assertEqual(
migrated_draft_change_list[6].to_dict(),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'default_outcome',
'new_value': {
'param_changes': [],
'feedback': {
'content_id': 'default_outcome',
'html': expected_html_content
},
'dest': 'Introduction',
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None,
'labelled_as_correct': False
}
}).to_dict())
self.assertEqual(
migrated_draft_change_list[7].to_dict(),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'hints',
'new_value': [{
'hint_content': {
'content_id': 'hint1',
'html': expected_html_content
}
}]
}).to_dict())
def test_convert_states_v32_dict_to_v33_dict(self) -> None:
draft_change_list_v32 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'state1',
'property_name': 'widget_customization_args',
'new_value': {
'choices': {
'value': [
'<p>1</p>',
'<p>2</p>',
'<p>3</p>',
'<p>4</p>'
]
}
}
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'state2',
'property_name': 'widget_customization_args',
'new_value': {
'choices': {
'value': [
'<p>1</p>',
'<p>2</p>',
'<p>3</p>',
'<p>4</p>'
]
},
'maxAllowableSelectionCount': {
'value': 1
},
'minAllowableSelectionCount': {
'value': 1
}
}
})
]
# Version 33 adds a showChoicesInShuffledOrder bool, which doesn't
# impact the second ExplorationChange because it will only impact
# it if 'choices' is the only key for new_value.
expected_draft_change_list_v33 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'state1',
'property_name': 'widget_customization_args',
'new_value': {
'choices': {
'value': [
'<p>1</p>',
'<p>2</p>',
'<p>3</p>',
'<p>4</p>'
]
},
'showChoicesInShuffledOrder': {
'value': False
}
}
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'state2',
'property_name': 'widget_customization_args',
'new_value': {
'choices': {
'value': [
'<p>1</p>',
'<p>2</p>',
'<p>3</p>',
'<p>4</p>'
]
},
'maxAllowableSelectionCount': {
'value': 1
},
'minAllowableSelectionCount': {
'value': 1
}
}
})
]
# Migrate exploration to state schema version 33.
self.create_and_migrate_new_exploration('32', '33')
# Migrate the draft change list's state schema to the migrated
# exploration's schema.
migrated_draft_change_list_v33 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v32, 1, 2, self.EXP_ID)
)
# Ruling out the possibility of None for mypy type checking.
assert migrated_draft_change_list_v33 is not None
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
expected_draft_change_list_v33_dict_list = [
change.to_dict() for change in expected_draft_change_list_v33
]
migrated_draft_change_list_v33_dict_list = [
change.to_dict() for change in migrated_draft_change_list_v33
]
self.assertItemsEqual(
expected_draft_change_list_v33_dict_list,
migrated_draft_change_list_v33_dict_list)
def test_convert_states_v31_dict_to_v32_dict(self) -> None:
draft_change_list_v31 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'new value'
})
]
# Migrate exploration to state schema version 32.
self.create_and_migrate_new_exploration('31', '32')
# Migrate the draft change list's state schema to the migrated
# exploration's schema. In this case there are no changes to the
# draft change list since version 32 adds a customization arg
# for the "Add" button text in SetInput interaction for the
# exploration, for which there should be no changes to drafts.
migrated_draft_change_list_v32 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v31, 1, 2, self.EXP_ID)
)
# Ruling out the possibility of None for mypy type checking.
assert migrated_draft_change_list_v32 is not None
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
draft_change_list_v31_dict_list = [
change.to_dict() for change in draft_change_list_v31
]
migrated_draft_change_list_v32_dict_list = [
change.to_dict() for change in migrated_draft_change_list_v32
]
self.assertEqual(
draft_change_list_v31_dict_list,
migrated_draft_change_list_v32_dict_list)
def test_convert_states_v30_dict_to_v31_dict(self) -> None:
draft_change_list_v30 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'recorded_voiceovers',
'new_value': {
'voiceovers_mapping': {
'content': {
'en': {
'file_size_bytes': 100,
'filename': 'atest.mp3',
'needs_update': False,
'duration_secs': 0.0
}
}
}
}
})
]
# Version 31 adds the duration_secs property.
expected_draft_change_list_v31 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'recorded_voiceovers',
'new_value': {
'voiceovers_mapping': {
'content': {
'en': {
'file_size_bytes': 100,
'filename': 'atest.mp3',
'needs_update': False,
'duration_secs': 0.0
}
}
}
}
})
]
# Migrate exploration to state schema version 31.
self.create_and_migrate_new_exploration('30', '31')
# Migrate the draft change list's state schema to the migrated
# exploration's schema.
migrated_draft_change_list_v31 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v30, 1, 2, self.EXP_ID)
)
# Ruling out the possibility of None for mypy type checking.
assert migrated_draft_change_list_v31 is not None
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
expected_draft_change_list_v31_dict_list = [
change.to_dict() for change in expected_draft_change_list_v31
]
migrated_draft_change_list_v31_dict_list = [
change.to_dict() for change in migrated_draft_change_list_v31
]
self.assertEqual(
expected_draft_change_list_v31_dict_list,
migrated_draft_change_list_v31_dict_list)
def test_convert_states_v29_dict_to_v30_dict(self) -> None:
draft_change_list_v29 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': 'answer_groups',
'state_name': 'State 1',
'new_value': [{
'rule_specs': [{
'rule_type': 'Equals',
'inputs': {'x': [
'<p>This is value1 for ItemSelection</p>'
]}
}, {
'rule_type': 'Equals',
'inputs': {'x': [
'<p>This is value2 for ItemSelection</p>'
]}
}],
'outcome': {
'dest': 'Introduction',
'dest_if_really_stuck': None,
'feedback': {
'content_id': 'feedback',
'html': '<p>Outcome for state1</p>'
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_misconception_id': None
}]
})
]
# Version 30 replaces the tagged_misconception_id in version 29
# with tagged_skill_misconception_id.
expected_draft_change_list_v30 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': 'answer_groups',
'state_name': 'State 1',
'new_value': [{
'rule_specs': [{
'rule_type': 'Equals',
'inputs': {'x': [
'<p>This is value1 for ItemSelection</p>'
]}
}, {
'rule_type': 'Equals',
'inputs': {'x': [
'<p>This is value2 for ItemSelection</p>'
]}
}],
'outcome': {
'dest': 'Introduction',
'dest_if_really_stuck': None,
'feedback': {
'content_id': 'feedback',
'html': '<p>Outcome for state1</p>'
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_skill_misconception_id': None
}]
})
]
# Migrate exploration to state schema version 30.
self.create_and_migrate_new_exploration('29', '30')
# Migrate the draft change list's state schema to the migrated
# exploration's schema.
migrated_draft_change_list_v30 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v29, 1, 2, self.EXP_ID)
)
# Ruling out the possibility of None for mypy type checking.
assert migrated_draft_change_list_v30 is not None
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
expected_draft_change_list_v30_dict_list = [
change.to_dict() for change in expected_draft_change_list_v30
]
migrated_draft_change_list_v30_dict_list = [
change.to_dict() for change in migrated_draft_change_list_v30
]
self.assertEqual(
expected_draft_change_list_v30_dict_list,
migrated_draft_change_list_v30_dict_list)
def test_convert_states_v28_dict_to_v29_dict(self) -> None:
draft_change_list_v28 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'Intro',
'property_name': 'content',
'new_value': 'new value'
})
]
# Migrate exploration to state schema version 29.
self.create_and_migrate_new_exploration('28', '29')
# Migrate the draft change list's state schema to the migrated
# exploration's schema. In this case there are no change to the
# draft change list since version 29 adds the
# solicit_answer_details boolean variable to the exploration
# state, for which there should be no changes to drafts.
migrated_draft_change_list_v29 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v28, 1, 2, self.EXP_ID)
)
# Ruling out the possibility of None for mypy type checking.
assert migrated_draft_change_list_v29 is not None
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
draft_change_list_v28_dict_list = [
change.to_dict() for change in draft_change_list_v28
]
migrated_draft_change_list_v29_dict_list = [
change.to_dict() for change in migrated_draft_change_list_v29
]
self.assertEqual(
draft_change_list_v28_dict_list,
migrated_draft_change_list_v29_dict_list)
def test_convert_states_v27_dict_to_v28_dict(self) -> None:
draft_change_list_v27 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': 'content_ids_to_audio_translations',
'state_name': 'State B',
'new_value': {
'content': {
'en': {
'file_size_bytes': 100,
'filename': 'atest.mp3',
'needs_update': False,
'duration_secs': 0.0
}
}
},
})
]
# Version 28 adds voiceovers_mapping.
expected_draft_change_list_v28 = [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': 'recorded_voiceovers',
'state_name': 'State B',
'new_value': {'voiceovers_mapping': {
'content': {
'en': {
'file_size_bytes': 100,
'filename': 'atest.mp3',
'needs_update': False,
'duration_secs': 0.0
}
}
}
}
})
]
# Migrate exploration to state schema version 28.
self.create_and_migrate_new_exploration('27', '28')
# Migrate the draft change list's state schema to the migrated
# exploration's schema.
migrated_draft_change_list_v28 = (
draft_upgrade_services.try_upgrading_draft_to_exp_version(
draft_change_list_v27, 1, 2, self.EXP_ID)
)
# Ruling out the possibility of None for mypy type checking.
assert migrated_draft_change_list_v28 is not None
# Change draft change lists into a list of dicts so that it is
# easy to compare the whole draft change list.
expected_draft_change_list_v28_dict_list = [
change.to_dict() for change in expected_draft_change_list_v28
]
migrated_draft_change_list_v28_dict_list = [
change.to_dict() for change in migrated_draft_change_list_v28
]
self.assertEqual(
expected_draft_change_list_v28_dict_list,
migrated_draft_change_list_v28_dict_list)
| {
"content_hash": "333c08d91056585d4d7445ca0552b125",
"timestamp": "",
"source": "github",
"line_count": 2122,
"max_line_length": 80,
"avg_line_length": 43.433081998114986,
"alnum_prop": 0.4839147181685021,
"repo_name": "oppia/oppia",
"id": "76dcd1bcb020d09a58945edfe51c3b22dc27cd46",
"size": "93252",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "core/domain/draft_upgrade_services_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "476480"
},
{
"name": "HTML",
"bytes": "2092923"
},
{
"name": "JavaScript",
"bytes": "1247116"
},
{
"name": "PEG.js",
"bytes": "71377"
},
{
"name": "Python",
"bytes": "17628953"
},
{
"name": "Shell",
"bytes": "2240"
},
{
"name": "TypeScript",
"bytes": "15541372"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from django.contrib.auth.models import User
class BaseTestCase(TestCase):
def setUp(self):
self.me = self.create_user('bob')
self.login(self.me.username)
self.her = self.create_user('alice')
def create_user(self, username='bob'):
return User.objects.create_user(username, password='secret')
def login(self, username):
self.client.login(username=username, password='secret')
class BaseImageTest(BaseTestCase):
from os.path import join, dirname
testfile = join(dirname(dirname(dirname(__file__))), 'tests', '1px.gif')
class TestHomePage(BaseTestCase):
def test_home_page(self):
resp = self.client.get('/')
self.assertContains(resp, 'Homepage')
class TestTags(BaseTestCase):
def test_get_index(self):
response = self.client.get("/tags/")
self.assertEqual(response.status_code, 200)
| {
"content_hash": "062ae70293635152898f7d4a196f2644",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 76,
"avg_line_length": 26.428571428571427,
"alnum_prop": 0.6713513513513514,
"repo_name": "amarandon/smeuhsocial",
"id": "4e9ad93800696bcac21d3a43444b580f3dd001a4",
"size": "951",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/smeuhoverride/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "72383"
},
{
"name": "HTML",
"bytes": "205774"
},
{
"name": "JavaScript",
"bytes": "18844"
},
{
"name": "Makefile",
"bytes": "781"
},
{
"name": "Python",
"bytes": "604547"
}
],
"symlink_target": ""
} |
import cv2
cascade_path = "./haarcascade_frontalface_alt.xml"
color = (255, 255, 255) # color of rectangle for face detection
cam = cv2.VideoCapture(0)
count=0
while True:
ret, capture = cam.read()
if not ret:
print('error')
break
count += 1
if count > 1:
image = capture.copy()
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
cascade = cv2.CascadeClassifier(cascade_path)
facerect = cascade.detectMultiScale(image_gray, scaleFactor=1.1, minNeighbors=1, minSize=(1, 1))
if len(facerect) > 0:
for rect in facerect:
cv2.rectangle(image, tuple(rect[0:2]),tuple(rect[0:2]+rect[2:4]), color, thickness=2)
count=0
cv2.imshow('face detector', image)
cam.release()
cv2.destroyAllWindows()
| {
"content_hash": "eea42256aa4a9698826da8a8a21aa4f0",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 104,
"avg_line_length": 28.821428571428573,
"alnum_prop": 0.6257744733581165,
"repo_name": "karaage0703/python-image-processing",
"id": "6c00590100d74a9207c37d99089fd1f6358c0155",
"size": "853",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "face_detection_camera.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23923"
},
{
"name": "Shell",
"bytes": "646"
}
],
"symlink_target": ""
} |
"""
SnapSearch.api.backend
~~~~~~~~~~~~~~~~~~~~~~
wrapper for communication with SnapSearch backend service
:copyright: 2014 by `SnapSearch <https://snapsearch.io/>`_
:license: MIT, see LICENSE for more details.
:author: `LIU Yu <liuyu@opencps.net>`_
:date: 2014/03/08
"""
__all__ = ['dispatch', ]
import json
import os
import sys
import SnapSearch.error as error
from .._compat import b
from .response import Response
def _build_message(content):
# import locally to allow override
from . import SNAPSEARCH_API_ACCEPT_ENCODING, SNAPSEARCH_API_USER_AGENT
payload = b(content)
headers = {
"User-Agent": SNAPSEARCH_API_USER_AGENT,
"Accept-Encoding": SNAPSEARCH_API_ACCEPT_ENCODING,
"Content-Type": "application/json",
"Content-Length": str(len(payload))}
return headers, payload
def _dispatch_via_requests(**kwds):
# import locally to allow override
from . import SNAPSEARCH_API_FOLLOW_REDIRECT, SNAPSEARCH_API_TIMEOUT
# HTTPS connection
import requests
s = requests.Session()
# HTTPS POST request
headers, payload = _build_message(kwds['payload'])
try:
r = s.request(
method="POST",
url=kwds['url'],
verify=kwds['ca_path'],
auth=(kwds['email'], kwds['key']),
data=payload,
headers=headers,
allow_redirects=SNAPSEARCH_API_FOLLOW_REDIRECT,
timeout=SNAPSEARCH_API_TIMEOUT)
except Exception as e:
raise error.SnapSearchConnectionError(e)
else:
return Response(
status=r.status_code, headers=r.headers, body=json.loads(r.text))
finally:
s.close()
pass # void return
def _dispatch_via_pycurl(**kwds):
# import locally to allow override
from . import (
SNAPSEARCH_API_ACCEPT_ENCODING,
SNAPSEARCH_API_FOLLOW_REDIRECT,
SNAPSEARCH_API_TIMEOUT, )
# HTTPS connection
import pycurl
c = pycurl.Curl()
c.setopt(pycurl.URL, kwds['url'])
# HTTPS POST request
headers_dict, payload = _build_message(kwds['payload'])
headers = ["%s: %s" % (key, val) for key, val in headers_dict.items()]
c.setopt(pycurl.POST, True)
c.setopt(pycurl.HTTPHEADER, headers)
c.setopt(pycurl.POSTFIELDS, payload)
# authentication
c.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC)
c.setopt(pycurl.USERPWD, "%s:%s" % (kwds['email'], kwds['key']))
# SSL verfification
c.setopt(pycurl.CAINFO, kwds['ca_path'])
c.setopt(pycurl.SSL_VERIFYPEER, True)
c.setopt(pycurl.SSL_VERIFYHOST, 2)
# transfer parameters
CURLOPT_ENCODING = getattr(pycurl, 'ACCEPT_ENCODING', pycurl.ENCODING)
c.setopt(CURLOPT_ENCODING, SNAPSEARCH_API_ACCEPT_ENCODING)
c.setopt(pycurl.FOLLOWLOCATION, SNAPSEARCH_API_FOLLOW_REDIRECT)
c.setopt(pycurl.TIMEOUT, SNAPSEARCH_API_TIMEOUT)
# buffer for response
buffer_ = bytearray()
c.setopt(pycurl.HEADER, True)
c.setopt(pycurl.WRITEFUNCTION, buffer_.extend)
try:
c.perform()
# markup buffer sections
CRLF = b"\r\n"
eos = buffer_.find(CRLF) # end of status line
eoh = buffer_.find(CRLF + CRLF) # end of header lines
# response status
normalize_status = \
lambda tup: tup[2].partition(b" ")[::2]
status_tuple = tuple(map(
lambda b: bytes(b.strip()),
normalize_status(buffer_[:eos].partition(b" "))))
status_code = int(status_tuple[0] or "0")
# response headers
normalize_header = \
lambda hdr: (bytes(hdr[0].strip().lower()),
bytes(hdr[2].strip()))
headers = dict(map(
lambda b: normalize_header(b.partition(b":")),
buffer_[eos + len(CRLF):eoh].splitlines()))
# response content
text = bytes(buffer_[eoh + 2 * len(CRLF):].strip()).decode()
except pycurl.error as e:
raise error.SnapSearchConnectionError(e)
except Exception as e:
raise error.SnapSearchError(
"malformed response from SnapSearch backend")
else:
return Response(
status=status_code, headers=headers, body=json.loads(text))
finally:
c.close()
pass # void return
# unified HTTP library interface
dispatch = None
httpinfo = None
# preferred HTTP library
try:
import requests
except ImportError:
pass # no raise
else:
if not dispatch:
dispatch = _dispatch_via_requests
httpinfo = ("requests", requests.__version__,
("gzip", "deflate", "identity"))
pass
# fallback HTTP library
try:
import pycurl
except ImportError:
pass # no raise
else:
if not dispatch:
dispatch = _dispatch_via_pycurl
httpinfo = ("pycurl", pycurl.version,
("gzip", "deflate", "identity"))
pass
# failed all options
if not dispatch:
raise error.SnapSearchDependencyError(
"missing HTTP library, requires ``requests`` or ``pycurl``")
| {
"content_hash": "26ee083f760160df91538b6c029fd91c",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 77,
"avg_line_length": 27.301075268817204,
"alnum_prop": 0.6165813312327688,
"repo_name": "SnapSearch/SnapSearch-Client-Python",
"id": "932f8c1a253daed09e2072a3bafd03a8feeec2c7",
"size": "5102",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/SnapSearch/api/backend.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "90471"
}
],
"symlink_target": ""
} |
from datetime import datetime
from functools import wraps
import netaddr
import time
from oslo.config import cfg
from akanda.rug.api import configuration
from akanda.rug.api import akanda_client as router_api
from akanda.rug.api import quantum
DOWN = 'down'
BOOTING = 'booting'
UP = 'up'
CONFIGURED = 'configured'
RESTART = 'restart'
REPLUG = 'replug'
GONE = 'gone'
ERROR = 'error'
STATUS_MAP = {
DOWN: quantum.STATUS_DOWN,
BOOTING: quantum.STATUS_BUILD,
UP: quantum.STATUS_BUILD,
CONFIGURED: quantum.STATUS_ACTIVE,
ERROR: quantum.STATUS_ERROR,
}
def synchronize_router_status(f):
@wraps(f)
def wrapper(self, worker_context, silent=False):
old_status = self._last_synced_status
val = f(self, worker_context, silent)
if not self.router_obj:
return val
new_status = STATUS_MAP.get(self.state, quantum.STATUS_ERROR)
if not old_status or old_status != new_status:
worker_context.neutron.update_router_status(
self.router_obj.id,
new_status
)
self._last_synced_status = new_status
return val
return wrapper
class BootAttemptCounter(object):
def __init__(self):
self._attempts = 0
def start(self):
self._attempts += 1
def reset(self):
self._attempts = 0
@property
def count(self):
return self._attempts
class VmManager(object):
def __init__(self, router_id, tenant_id, log, worker_context):
self.router_id = router_id
self.tenant_id = tenant_id
self.log = log
self.state = DOWN
self.router_obj = None
self.last_boot = None
self.last_error = None
self._boot_counter = BootAttemptCounter()
self._currently_booting = False
self._last_synced_status = None
self.update_state(worker_context, silent=True)
@property
def attempts(self):
return self._boot_counter.count
def reset_boot_counter(self):
self._boot_counter.reset()
@synchronize_router_status
def update_state(self, worker_context, silent=False):
self._ensure_cache(worker_context)
if self.state == GONE:
self.log.debug('not updating state of deleted router')
return self.state
if self.router_obj.management_port is None:
self.log.debug('no management port, marking router as down')
self.state = DOWN
return self.state
addr = _get_management_address(self.router_obj)
for i in xrange(cfg.CONF.max_retries):
if router_api.is_alive(addr, cfg.CONF.akanda_mgt_service_port):
if self.state != CONFIGURED:
self.state = UP
break
if not silent:
self.log.debug(
'Alive check failed. Attempt %d of %d',
i,
cfg.CONF.max_retries,
)
time.sleep(cfg.CONF.retry_delay)
else:
old_state = self.state
self._check_boot_timeout()
# If the router isn't responding, make sure Nova knows about it
instance = worker_context.nova_client.get_instance(self.router_obj)
if instance is None and self.state != ERROR:
self.log.info('No router VM was found; rebooting')
self.state = DOWN
# update_state() is called from Alive() to check the
# status of the router. If we can't talk to the API at
# that point, the router should be considered missing and
# we should reboot it, so mark it down if we think it was
# configured before.
if old_state == CONFIGURED and self.state != ERROR:
self.log.debug(
'Did not find router alive, marking it as down',
)
self.state = DOWN
# After the router is all the way up, record how long it took
# to boot and accept a configuration.
if self._currently_booting and self.state == CONFIGURED:
# If we didn't boot the server (because we were restarted
# while it remained running, for example), we won't have a
# last_boot time to log.
if self.last_boot:
boot_duration = (datetime.utcnow() - self.last_boot)
self.log.info('Router booted in %s seconds after %s attempts',
boot_duration.total_seconds(),
self._boot_counter.count)
# Always reset the boot counter, even if we didn't boot
# the server ourself, so we don't accidentally think we
# have an erroring router.
self._boot_counter.reset()
# We've reported how long it took to boot and reset the
# counter, so we are no longer "currently" booting.
self._currently_booting = False
return self.state
def boot(self, worker_context, router_image_uuid):
self._ensure_cache(worker_context)
if self.state == GONE:
self.log.info('not booting deleted router')
return
self.log.info('Booting router')
self.state = DOWN
self._boot_counter.start()
try:
self._ensure_provider_ports(self.router_obj, worker_context)
# In the event that the current akanda instance isn't deleted
# cleanly (which we've seen in certain circumstances, like
# hypervisor failures), or the vm has alredy been deleted but
# device_id is still set incorrectly, be proactive and attempt to
# clean up the router ports manually. This helps avoid a situation
# where the rug repeatedly attempts to plug stale router ports into
# the newly created akanda instance (and fails).
router = self.router_obj
for p in router.ports:
if p.device_id:
worker_context.neutron.clear_device_id(p)
created = worker_context.nova_client.reboot_router_instance(
router,
router_image_uuid
)
if not created:
self.log.info('Previous router is deleting')
return
except:
self.log.exception('Router failed to start boot')
return
else:
# We have successfully started a (re)boot attempt so
# record the timestamp so we can report how long it takes.
self.state = BOOTING
self.last_boot = datetime.utcnow()
self._currently_booting = True
def check_boot(self, worker_context):
ready_states = (UP, CONFIGURED)
if self.update_state(worker_context, silent=True) in ready_states:
self.log.info('Router has booted, attempting initial config')
self.configure(worker_context, BOOTING, attempts=1)
if self.state != CONFIGURED:
self._check_boot_timeout()
return self.state == CONFIGURED
self.log.debug('Router is %s' % self.state.upper())
return False
@synchronize_router_status
def set_error(self, worker_context, silent=False):
"""Set the internal and neutron status for the router to ERROR.
This is called from outside when something notices the router
is "broken". We don't use it internally because this class is
supposed to do what it's told and not make decisions about
whether or not the router is fatally broken.
"""
self._ensure_cache(worker_context)
if self.state == GONE:
self.log.debug('not updating state of deleted router')
return self.state
self.state = ERROR
self.last_error = datetime.utcnow()
return self.state
@synchronize_router_status
def clear_error(self, worker_context, silent=False):
"""Clear the internal error state.
This is called from outside when something wants to force a
router rebuild, so that the state machine that checks our
status won't think we are broken unless we actually break
again.
"""
# Clear the boot counter.
self._boot_counter.reset()
self._ensure_cache(worker_context)
if self.state == GONE:
self.log.debug('not updating state of deleted router')
return self.state
self.state = DOWN
return self.state
@property
def error_cooldown(self):
# Returns True if the router was recently set to ERROR state.
if self.last_error and self.state == ERROR:
seconds_since_error = (
datetime.utcnow() - self.last_error
).total_seconds()
if seconds_since_error < cfg.CONF.error_state_cooldown:
return True
return False
def stop(self, worker_context):
self._ensure_cache(worker_context)
if self.state == GONE:
# We are being told to delete a router that neutron has
# already removed. Make a fake router object to use in
# this method.
router_obj = quantum.Router(
id_=self.router_id,
tenant_id=self.tenant_id,
name='unnamed',
admin_state_up=False,
status=quantum.STATUS_DOWN
)
self.log.info('Destroying router neutron has deleted')
else:
router_obj = self.router_obj
self.log.info('Destroying router')
nova_client = worker_context.nova_client
nova_client.destroy_router_instance(router_obj)
start = time.time()
while time.time() - start < cfg.CONF.boot_timeout:
if not nova_client.get_router_instance_status(router_obj):
if self.state != GONE:
self.state = DOWN
return
self.log.debug('Router has not finished stopping')
time.sleep(cfg.CONF.retry_delay)
self.log.error(
'Router failed to stop within %d secs',
cfg.CONF.boot_timeout)
def configure(self, worker_context, failure_state=RESTART, attempts=None):
self.log.debug('Begin router config')
self.state = UP
attempts = attempts or cfg.CONF.max_retries
# FIXME: This might raise an error, which doesn't mean the
# *router* is broken, but does mean we can't update it.
# Change the exception to something the caller can catch
# safely.
self._ensure_cache(worker_context)
if self.state == GONE:
return
addr = _get_management_address(self.router_obj)
# FIXME: This should raise an explicit exception so the caller
# knows that we could not talk to the router (versus the issue
# above).
interfaces = router_api.get_interfaces(
addr,
cfg.CONF.akanda_mgt_service_port
)
if not self._verify_interfaces(self.router_obj, interfaces):
# FIXME: Need a REPLUG state when we support hot-plugging
# interfaces.
self.log.debug("Interfaces aren't plugged as expected.")
self.state = REPLUG
return
# FIXME: Need to catch errors talking to neutron here.
config = configuration.build_config(
worker_context.neutron,
self.router_obj,
interfaces
)
self.log.debug('preparing to update config to %r', config)
for i in xrange(attempts):
try:
router_api.update_config(
addr,
cfg.CONF.akanda_mgt_service_port,
config
)
except Exception:
if i == attempts - 1:
# Only log the traceback if we encounter it many times.
self.log.exception('failed to update config')
else:
self.log.debug(
'failed to update config, attempt %d',
i
)
time.sleep(cfg.CONF.retry_delay)
else:
self.state = CONFIGURED
self.log.info('Router config updated')
return
else:
# FIXME: We failed to configure the router too many times,
# so restart it.
self.state = failure_state
def replug(self, worker_context):
self.log.debug('Attempting to replug...')
self._ensure_provider_ports(self.router_obj, worker_context)
addr = _get_management_address(self.router_obj)
interfaces = router_api.get_interfaces(
addr,
cfg.CONF.akanda_mgt_service_port
)
actual_macs = set((iface['lladdr'] for iface in interfaces))
expected_ports = dict(
(p.mac_address, p) for p in self.router_obj.internal_ports
)
expected_macs = set(expected_ports.keys())
expected_macs.add(self.router_obj.management_port.mac_address)
expected_macs.add(self.router_obj.external_port.mac_address)
ports_to_delete = []
if expected_macs != actual_macs:
instance = worker_context.nova_client.get_instance(self.router_obj)
# For each port that doesn't have a mac address on the VM...
for mac in expected_macs - actual_macs:
port = expected_ports.get(mac)
if port:
self.log.debug(
'New port %s, %s found, plugging...' % (port.id, mac)
)
try:
instance.interface_attach(port.id, None, None)
except:
self.log.exception('Interface attach failed')
self.state = RESTART
return
# For each *extra* mac address on the VM...
for mac in actual_macs - expected_macs:
interface_ports = map(
quantum.Port.from_dict,
worker_context.neutron.api_client.list_ports(
device_id=instance.id,
device_owner=quantum.DEVICE_OWNER_ROUTER_INT
)['ports']
)
for port in interface_ports:
if port.mac_address == mac:
# If we find a router-interface port attached to the
# device (meaning the interface has been removed
# from the neutron router, but not the VM), detach the
# port from the Nova instance and mark the orphaned
# port for deletion
self.log.debug(''.join([
'Port %s, %s is detached from ' % (port.id, mac),
'the neutron router, unplugging...'
]))
try:
instance.interface_detach(port.id)
except:
self.log.exception('Interface detach failed')
self.state = RESTART
return
ports_to_delete.append(port)
# The action of attaching/detaching interfaces in Nova happens via the
# message bus and is *not* blocking. We need to wait a few seconds to
# see if the list of tap devices on the appliance actually changed. If
# not, assume the hotplug failed, and reboot the VM.
replug_seconds = cfg.CONF.hotplug_timeout
while replug_seconds > 0:
self.log.debug(
"Waiting for interface attachments to take effect..."
)
interfaces = router_api.get_interfaces(
addr,
cfg.CONF.akanda_mgt_service_port
)
if self._verify_interfaces(self.router_obj, interfaces):
# If the interfaces now match (hotplugging was successful), go
# ahead and clean up any orphaned neutron ports that may have
# been detached
for port in ports_to_delete:
self.log.debug('Deleting orphaned port %s' % port.id)
worker_context.neutron.api_client.update_port(
port.id, {'port': {'device_owner': ''}}
)
worker_context.neutron.api_client.delete_port(port.id)
return
time.sleep(1)
replug_seconds -= 1
self.log.debug("Interfaces aren't plugged as expected, rebooting.")
self.state = RESTART
def _ensure_cache(self, worker_context):
try:
self.router_obj = worker_context.neutron.get_router_detail(
self.router_id
)
except quantum.RouterGone:
# The router has been deleted, set our state accordingly
# and return without doing any more work.
self.state = GONE
self.router_obj = None
def _check_boot_timeout(self):
if self.last_boot:
seconds_since_boot = (
datetime.utcnow() - self.last_boot
).total_seconds()
if seconds_since_boot < cfg.CONF.boot_timeout:
# Do not reset the state if we have an error
# condition already. The state will be reset when
# the router starts responding again, or when the
# error is cleared from a forced rebuild.
if self.state != ERROR:
self.state = BOOTING
else:
# If the VM was created more than `boot_timeout` seconds
# ago, log an error and set the state set to DOWN
self.last_boot = None
self._currently_booting = False
self.log.info(
'Router is DOWN. Created over %d secs ago.',
cfg.CONF.boot_timeout)
# Do not reset the state if we have an error condition
# already. The state will be reset when the router starts
# responding again, or when the error is cleared from a
# forced rebuild.
if self.state != ERROR:
self.state = DOWN
def _verify_interfaces(self, logical_config, interfaces):
router_macs = set((iface['lladdr'] for iface in interfaces))
self.log.debug('MACs found: %s', ', '.join(sorted(router_macs)))
if not all(
getattr(p, 'mac_address', None) for p in logical_config.ports
):
return False
expected_macs = set(p.mac_address
for p in logical_config.internal_ports)
expected_macs.add(logical_config.management_port.mac_address)
expected_macs.add(logical_config.external_port.mac_address)
self.log.debug('MACs expected: %s', ', '.join(sorted(expected_macs)))
return router_macs == expected_macs
def _ensure_provider_ports(self, router, worker_context):
if router.management_port is None:
self.log.debug('Adding management port to router')
mgt_port = worker_context.neutron.create_router_management_port(
router.id
)
router.management_port = mgt_port
if router.external_port is None:
# FIXME: Need to do some work to pick the right external
# network for a tenant.
self.log.debug('Adding external port to router')
ext_port = worker_context.neutron.create_router_external_port(
router
)
router.external_port = ext_port
return router
def _get_management_address(router):
network = netaddr.IPNetwork(cfg.CONF.management_prefix)
tokens = ['%02x' % int(t, 16)
for t in router.management_port.mac_address.split(':')]
eui64 = int(''.join(tokens[0:3] + ['ff', 'fe'] + tokens[3:6]), 16)
# the bit inversion is required by the RFC
return str(netaddr.IPAddress(network.value + (eui64 ^ 0x0200000000000000)))
| {
"content_hash": "6792ba6983eaa89e2f1d8f81950ac600",
"timestamp": "",
"source": "github",
"line_count": 523,
"max_line_length": 79,
"avg_line_length": 39.07648183556405,
"alnum_prop": 0.5593286685912805,
"repo_name": "dreamhost/akanda-rug",
"id": "91fbebb933a9350001d98266fb90c5dc47693f48",
"size": "21045",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "akanda/rug/vm_manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "742381"
}
],
"symlink_target": ""
} |
import unittest
from linkedList import LinkedList
class tests(unittest.TestCase):
def setUp(self):
self.list = LinkedList(range(4))
def testEmptyLength(self):
linkedList = LinkedList()
self.assertEqual(len(linkedList), 0)
def testElementValues(self):
elements = range(4)
self.assertEqual(list(self.list), elements)
def testInsert(self):
elements = [4] + range(4)
self.list.insert(4)
self.assertEqual(list(self.list), elements)
def testInsertMultiple(self):
elements = range(4) * 2
self.list.insert(range(4))
self.assertEqual(list(self.list), elements)
def testInsertAt(self):
elements = [0, 1, 4, 2, 3]
self.list.insertAt(4, 2)
self.assertEqual(list(self.list), elements)
def testInsertAtMultipe(self):
elements = [0, 1, 4, 5, 6, 2, 3]
self.list.insertAt([4, 5, 6], 2)
self.assertEqual(list(self.list), elements)
def testInsertAtEnd(self):
elements = range(4) + range(5)
self.list.insertAt(range(5), len(self.list))
self.assertEqual(list(self.list), elements)
def testRemove(self):
elements = [1, 2, 3]
self.list.remove()
self.assertEqual(list(self.list), elements)
def testRemoveAt(self):
elements = [0, 2, 3]
self.list.removeAt(1)
self.assertEqual(list(self.list), elements)
def testRemoveAtEnd(self):
elements = [0, 1, 2]
self.list.removeAt(len(self.list) - 1)
self.assertEqual(list(self.list), elements)
def testLength(self):
self.assertEqual(len(self.list), 4)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "4f7a708eeb5502e90925ff0d1ea8382e",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 59,
"avg_line_length": 28.833333333333332,
"alnum_prop": 0.6005780346820809,
"repo_name": "dooleykh/rehash",
"id": "337ae2da7cfdb3e2e31180dfba58485f82b9b598",
"size": "1730",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "linkedList/python/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4427"
}
],
"symlink_target": ""
} |
from typing import List
from RULEngine.Game.OurPlayer import OurPlayer
from RULEngine.Util.geometry import get_distance
from RULEngine.Util.Pose import Pose
from RULEngine.Util.constant import POSITION_DEADZONE, ROBOT_RADIUS
from ai.Algorithm.evaluation_module import is_ball_our_side
from ai.STA.Action.PathfindToPosition import PathfindToPosition
from ai.STA.Tactic.Tactic import Tactic
from ai.STA.Action.Idle import Idle
from ai.STA.Tactic.tactic_constants import Flags
from ai.states.game_state import GameState
__author__ = 'RoboCupULaval'
FOLLOW_SPEED = 1.5
class DemoFollowBall(Tactic):
"""
méthodes:
exec(self) : Exécute une Action selon l'état courant
attributs:
game_state: état courant du jeu
player : Instance du joueur auquel est assigné la tactique
"""
def __init__(self, game_state: GameState, player: OurPlayer, p_target: Pose=Pose(), args: List[str]=None):
Tactic.__init__(self, game_state, player, p_target, args)
self.current_state = self.halt
self.next_state = self.halt
def move_to_ball(self):
self.status_flag = Flags.WIP
self.target = Pose(self.game_state.get_ball_position())
if get_distance(self.player.pose.position, self.target.position) < POSITION_DEADZONE + ROBOT_RADIUS:
self.next_state = self.halt
else:
self.next_state = self.move_to_ball
return PathfindToPosition(self.game_state, self.player, self.target)
def halt(self):
self.status_flag = Flags.SUCCESS
if get_distance(self.player.pose.position, self.game_state.get_ball_position()) < \
POSITION_DEADZONE + ROBOT_RADIUS:
self.next_state = self.halt
else:
self.next_state = self.move_to_ball
return Idle(self.game_state, self.player)
| {
"content_hash": "8e4e0612d1a7fe9a9f14613d603056a6",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 110,
"avg_line_length": 36.72,
"alnum_prop": 0.6900871459694989,
"repo_name": "MaximeGLegault/StrategyIA",
"id": "9da25d5c429af539eaabb3beb568903a105d815d",
"size": "1878",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "ai/STA/Tactic/DemoFollowBall.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "207240"
},
{
"name": "Protocol Buffer",
"bytes": "30229"
},
{
"name": "Python",
"bytes": "1445233"
}
],
"symlink_target": ""
} |
import zstackwoodpecker.test_state as ts_header
import os
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template5", checking_point=1, faild_point=100000, path_list=[
[TestAction.create_mini_vm, 'vm1', 'cluster=cluster1'],
[TestAction.destroy_vm, 'vm1'],
[TestAction.create_mini_vm, 'vm2', 'cluster=cluster1'],
[TestAction.create_vm_backup, 'vm2', 'vm2-backup1'],
[TestAction.create_mini_vm, 'vm3', 'memory=random', 'cluster=cluster1'],
[TestAction.migrate_vm, 'vm3'],
[TestAction.poweroff_only, 'cluster=cluster2'],
[TestAction.create_volume, 'volume1', 'size=random', 'cluster=cluster2', 'flag=scsi'],
[TestAction.create_volume, 'volume2', 'cluster=cluster2', 'flag=thick,scsi'],
[TestAction.add_image, 'image1', 'root', 'http://172.20.1.28/mirror/diskimages/centos_vdbench.qcow2'],
[TestAction.stop_vm, 'vm2'],
[TestAction.use_vm_backup, 'vm2-backup1'],
[TestAction.start_vm, 'vm2'],
[TestAction.delete_image, 'image1'],
[TestAction.recover_image, 'image1'],
[TestAction.delete_image, 'image1'],
[TestAction.expunge_image, 'image1'],
[TestAction.create_vm_backup, 'vm2', 'vm2-backup2'],
[TestAction.create_mini_vm, 'vm4', 'cluster=cluster2'],
[TestAction.poweroff_only, 'cluster=cluster1'],
[TestAction.create_image_from_volume, 'vm4', 'vm4-image2'],
[TestAction.attach_volume, 'vm4', 'volume2'],
[TestAction.detach_volume, 'volume2'],
[TestAction.create_volume, 'volume3', 'cluster=cluster2', 'flag=thick,scsi'],
[TestAction.use_vm_backup, 'vm2-backup1'],
[TestAction.create_mini_vm, 'vm5', 'cluster=cluster1', 'flag=thick'],
[TestAction.delete_volume, 'volume2'],
[TestAction.expunge_volume, 'volume2'],
[TestAction.destroy_vm, 'vm4'],
[TestAction.start_vm, 'vm3'],
[TestAction.create_vm_backup, 'vm3', 'vm3-backup3'],
[TestAction.stop_vm, 'vm3'],
[TestAction.create_mini_vm, 'vm6', 'cluster=cluster2'],
[TestAction.resize_volume, 'vm2', 5*1024*1024],
[TestAction.poweroff_only, 'cluster=cluster2'],
[TestAction.use_vm_backup, 'vm3-backup3'],
])
'''
The final status:
Running:['vm5']
Stopped:['vm6', 'vm3', 'vm2']
Enadbled:['vm2-backup1', 'vm2-backup2', 'vm3-backup3', 'vm4-image2']
attached:[]
Detached:['volume1', 'volume3']
Deleted:['vm1', 'vm4']
Expunged:['volume2', 'image1']
Ha:[]
Group:
vm_backup2:['vm2-backup2']---vm2@
vm_backup3:['vm3-backup3']---vm3@
vm_backup1:['vm2-backup1']---vm2@
''' | {
"content_hash": "51a7cbdc59c5e81b8ef849c42f4a79f1",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 104,
"avg_line_length": 39,
"alnum_prop": 0.685690653432589,
"repo_name": "zstackio/zstack-woodpecker",
"id": "ccd3ac1e7d9b50b39eb850655419a7a367a8581f",
"size": "2418",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "integrationtest/vm/mini/multiclusters/paths/multi_path11.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2356"
},
{
"name": "Go",
"bytes": "49822"
},
{
"name": "Makefile",
"bytes": "687"
},
{
"name": "Puppet",
"bytes": "875"
},
{
"name": "Python",
"bytes": "13070596"
},
{
"name": "Shell",
"bytes": "177861"
}
],
"symlink_target": ""
} |
"""
.. _sfm-reconst:
==============================================
Reconstruction with the Sparse Fascicle Model
==============================================
In this example, we will use the Sparse Fascicle Model [Rokem2015]_, to
reconstruct the fiber orientation distribution function (fODF) in every voxel.
First, we import the modules we will use in this example:
"""
import dipy.reconst.sfm as sfm
import dipy.data as dpd
import dipy.direction.peaks as dpp
from dipy.viz import fvtk
"""
For the purpose of this example, we will use the Stanford HARDI dataset (150
directions, single b-value of 2000 s/mm$^2$) that can be automatically
downloaded. If you have not yet downloaded this data-set in one of the other
examples, you will need to be connected to the internet the first time you run
this example. The data will be stored for subsequent runs, and for use with
other examples.
"""
from dipy.data import read_stanford_hardi
img, gtab = read_stanford_hardi()
data = img.get_data()
"""
Reconstruction of the fiber ODF in each voxel guides subsequent tracking
steps. Here, the model is the Sparse Fascicle Model, described in
[Rokem2014]_. This model reconstructs the diffusion signal as a combination of
the signals from different fascicles. This model can be written as:
.. math::
y = X\beta
Where $y$ is the signal and $\beta$ are weights on different points in the
sphere. The columns of the design matrix, $X$ are the signals in each point in
the measurement that would be predicted if there was a fascicle oriented in the
direction represented by that column. Typically, the signal used for this
kernel will be a prolate tensor with axial diffusivity 3-5 times higher than
its radial diffusivity. The exact numbers can also be estimated from examining
parts of the brain in which there is known to be only one fascicle (e.g. in
corpus callosum).
Sparsity constraints on the fiber ODF ($\beta$) are set through the Elastic Net
algorihtm [Zou2005]_.
Elastic Net optimizes the following cost function:
.. math::
\sum_{i=1}^{n}{(y_i - \hat{y}_i)^2} + \alpha (\lambda \sum_{j=1}^{m}{w_j}+(1-\lambda) \sum_{j=1}^{m}{w^2_j}
where $\hat{y}$ is the signal predicted for a particular setting of $\beta$,
such that the left part of this expression is the squared loss function;
$\alpha$ is a parameter that sets the balance between the squared loss on
the data, and the regularization constraints. The regularization parameter
$\lambda$ sets the `l1_ratio`, which controls the balance between L1-sparsity
(low sum of weights), and low L2-sparsity (low sum-of-squares of the weights).
Just like constrained spherical deconvolution (see :ref:`reconst-csd`), the SFM
requires the definition of a response function. We'll take advantage of the
automated algorithm in the :mod:`csdeconv` module to find this response
function:
"""
from dipy.reconst.csdeconv import auto_response
response, ratio = auto_response(gtab, data, roi_radius=10, fa_thr=0.7)
"""
The ``response`` return value contains two entries. The first is an array with
the eigenvalues of the response function and the second is the average S0 for
this response.
It is a very good practice to always validate the result of auto_response. For,
this purpose we can print it and have a look at its values.
"""
print(response)
"""
(array([ 0.0014, 0.00029, 0.00029]), 416.206)
We initialize an SFM model object, using these values. We will use the default
sphere (362 vertices, symmetrically distributed on the surface of the sphere),
as a set of putative fascicle directions that are considered in the model
"""
sphere = dpd.get_sphere()
sf_model = sfm.SparseFascicleModel(gtab, sphere=sphere,
l1_ratio=0.5, alpha=0.001,
response=response[0])
"""
For the purpose of the example, we will consider a small volume of data
containing parts of the corpus callosum and of the centrum semiovale
"""
data_small = data[20:50, 55:85, 38:39]
"""
Fitting the model to this small volume of data, we calculate the ODF of this
model on the sphere, and plot it.
"""
sf_fit = sf_model.fit(data_small)
sf_odf = sf_fit.odf(sphere)
fodf_spheres = fvtk.sphere_funcs(sf_odf, sphere, scale=1.3, norm=True)
ren = fvtk.ren()
fvtk.add(ren, fodf_spheres)
print('Saving illustration as sf_odfs.png')
fvtk.record(ren, out_path='sf_odfs.png', size=(1000, 1000))
"""
We can extract the peaks from the ODF, and plot these as well
"""
sf_peaks = dpp.peaks_from_model(sf_model,
data_small,
sphere,
relative_peak_threshold=.5,
min_separation_angle=25,
return_sh=False)
fvtk.clear(ren)
fodf_peaks = fvtk.peaks(sf_peaks.peak_dirs, sf_peaks.peak_values, scale=1.3)
fvtk.add(ren, fodf_peaks)
print('Saving illustration as sf_peaks.png')
fvtk.record(ren, out_path='sf_peaks.png', size=(1000, 1000))
"""
Finally, we plot both the peaks and the ODFs, overlayed:
"""
fodf_spheres.GetProperty().SetOpacity(0.4)
fvtk.add(ren, fodf_spheres)
print('Saving illustration as sf_both.png')
fvtk.record(ren, out_path='sf_both.png', size=(1000, 1000))
"""
.. figure:: sf_both.png
:align: center
**SFM Peaks and ODFs**.
To see how to use this information in tracking, proceed to :ref:`sfm-track`.
References
----------
.. [Rokem2015] Ariel Rokem, Jason D. Yeatman, Franco Pestilli, Kendrick
N. Kay, Aviv Mezer, Stefan van der Walt, Brian A. Wandell
(2015). Evaluating the accuracy of diffusion MRI models in white
matter. PLoS ONE 10(4): e0123272. doi:10.1371/journal.pone.0123272
.. [Zou2005] Zou H, Hastie T (2005). Regularization and variable
selection via the elastic net. J R Stat Soc B:301-320
"""
| {
"content_hash": "e7622d81809c87b754d5e76110adb45a",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 111,
"avg_line_length": 33.61271676300578,
"alnum_prop": 0.7011177987962167,
"repo_name": "StongeEtienne/dipy",
"id": "ebcdb62b002a17be8f142da6792c54669bbfae97",
"size": "5815",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "doc/examples/sfm_reconst.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2844"
},
{
"name": "Makefile",
"bytes": "3639"
},
{
"name": "Python",
"bytes": "2734362"
}
],
"symlink_target": ""
} |
import sys
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._elastic_pool_activities_operations import build_list_by_elastic_pool_request
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ElasticPoolActivitiesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.sql.aio.SqlManagementClient`'s
:attr:`elastic_pool_activities` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_elastic_pool(
self, resource_group_name: str, server_name: str, elastic_pool_name: str, **kwargs: Any
) -> AsyncIterable["_models.ElasticPoolActivity"]:
"""Returns elastic pool activities.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param server_name: The name of the server. Required.
:type server_name: str
:param elastic_pool_name: The name of the elastic pool for which to get the current activity.
Required.
:type elastic_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ElasticPoolActivity or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.sql.models.ElasticPoolActivity]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2014-04-01")) # type: Literal["2014-04-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.ElasticPoolActivityListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_elastic_pool_request(
resource_group_name=resource_group_name,
server_name=server_name,
elastic_pool_name=elastic_pool_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_elastic_pool.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = HttpRequest("GET", next_link)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ElasticPoolActivityListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_elastic_pool.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/elasticPools/{elasticPoolName}/elasticPoolActivity"} # type: ignore
| {
"content_hash": "a1437729d56cf31c371049bace10e7bb",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 225,
"avg_line_length": 44.66153846153846,
"alnum_prop": 0.6553565277299346,
"repo_name": "Azure/azure-sdk-for-python",
"id": "7b33f6df7dc7f91c4e5a2b9f67b40cb5304a3143",
"size": "6306",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/sql/azure-mgmt-sql/azure/mgmt/sql/aio/operations/_elastic_pool_activities_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from modules.controller import Controller
PUMP_PIN = 24 # pin of pump
if __name__ == "__main__":
controller = Controller(PUMP_PIN)
controller.run(50, 1)
print("Pump Start for a second with 50% power")
| {
"content_hash": "adb6b58ce13500f52e977126ca9419fc",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 51,
"avg_line_length": 24.11111111111111,
"alnum_prop": 0.6589861751152074,
"repo_name": "farmy-maker/farmy-py",
"id": "fd44946bf78d83337f09a7bf8a52d68d1ffd61b5",
"size": "233",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "farmy/modules/pump.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "12080"
}
],
"symlink_target": ""
} |
from aone_app import app
app.run(debug=True)
| {
"content_hash": "fb9da6ff3cd7c0b1e00d9a809807b070",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 24,
"avg_line_length": 22.5,
"alnum_prop": 0.7777777777777778,
"repo_name": "anokata/pythonPetProjects",
"id": "3a08f9619ac650ba31bc6436905ad63a24a2a6be",
"size": "45",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask_again/runserver.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6467"
},
{
"name": "HTML",
"bytes": "56632"
},
{
"name": "JavaScript",
"bytes": "603"
},
{
"name": "Makefile",
"bytes": "889"
},
{
"name": "Python",
"bytes": "840906"
},
{
"name": "Shell",
"bytes": "2407"
},
{
"name": "TSQL",
"bytes": "1299"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('matches', '0003_auto_20150127_0910'),
]
operations = [
migrations.AlterField(
model_name='match',
name='datetime',
field=models.DateTimeField(verbose_name='Date'),
preserve_default=True,
),
]
| {
"content_hash": "ccabfbaef79fcda2404aa592c3b4c6b9",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 60,
"avg_line_length": 22.31578947368421,
"alnum_prop": 0.5919811320754716,
"repo_name": "Zundrium/djangocms-gamegroup",
"id": "de32bd33139d4b070b61499bdc81993c6ffc1ad4",
"size": "448",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "matches/migrations/0004_auto_20150127_0922.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "3904"
},
{
"name": "Python",
"bytes": "27172"
},
{
"name": "Shell",
"bytes": "53"
}
],
"symlink_target": ""
} |
from runner.koan import *
class AboutClassAttributes(Koan):
class Dog(object):
pass
def test_new_style_class_objects_are_objects(self):
# Note: Old style class instances are not objects but they are being
# phased out it Python 3.
fido = self.Dog()
self.assertEqual(True, isinstance(fido, object))
def test_classes_are_types(self):
self.assertEqual(True, self.Dog.__class__ == type)
def test_classes_are_objects_too(self):
self.assertEqual(True, issubclass(self.Dog, object))
def test_objects_have_methods(self):
fido = self.Dog()
self.assertEqual(18, len(dir(fido)))
def test_classes_have_methods(self):
self.assertEqual(18, len(dir(self.Dog)))
def test_creating_objects_without_defining_a_class(self):
singularity = object()
self.assertEqual(15, len(dir(singularity)))
def test_defining_attributes_on_individual_objects(self):
fido = self.Dog()
fido.legs = 4
self.assertEqual(4, fido.legs)
def test_defining_functions_on_individual_objects(self):
fido = self.Dog()
fido.wag = lambda: 'fidos wag'
self.assertEqual('fidos wag', fido.wag())
def test_other_objects_are_not_affected_by_these_singleton_functions(self):
fido = self.Dog()
rover = self.Dog()
def wag():
return 'fidos wag'
fido.wag = wag
try:
rover.wag()
except Exception as ex:
self.assertMatch("'Dog' object has no attribute 'wag'", ex[0])
# ------------------------------------------------------------------
class Dog2(object):
def wag(self):
return 'instance wag'
def bark(self):
return "instance bark"
def growl(self):
return "instance growl"
@staticmethod
def bark():
return "staticmethod bark, arg: None"
@classmethod
def growl(cls):
return "classmethod growl, arg: cls=" + cls.__name__
def test_like_all_objects_classes_can_have_singleton_methods(self):
self.assertMatch("classmethod growl, arg: cls=Dog2", self.Dog2.growl())
def test_classmethods_are_not_independent_of_instance_methods(self):
fido = self.Dog2()
self.assertMatch("classmethod growl, arg: cls=Dog2", fido.growl())
self.assertMatch("classmethod growl, arg: cls=Dog2", self.Dog2.growl())
def test_staticmethods_are_unbound_functions_housed_in_a_class(self):
self.assertMatch("staticmethod bark, arg: None", self.Dog2.bark())
def test_staticmethods_also_overshadow_instance_methods(self):
fido = self.Dog2()
self.assertMatch("staticmethod bark, arg: None", fido.bark())
# ------------------------------------------------------------------
class Dog3(object):
def __init__(self):
self._name = None
def get_name_from_instance(self):
return self._name
def set_name_from_instance(self, name):
self._name = name
@classmethod
def get_name(cls):
return cls._name
@classmethod
def set_name(cls, name):
cls._name = name
name = property(get_name, set_name)
name_from_instance = property(
get_name_from_instance, set_name_from_instance)
def test_classmethods_can_not_be_used_as_properties(self):
fido = self.Dog3()
try:
fido.name = "Fido"
except Exception as ex:
self.assertMatch("'classmethod' object is not callable", ex[0])
def test_classes_and_instances_do_not_share_instance_attributes(self):
fido = self.Dog3()
fido.set_name_from_instance("Fido")
fido.set_name("Rover")
self.assertEqual("Fido", fido.get_name_from_instance())
self.assertEqual("Rover", self.Dog3.get_name())
def test_classes_and_instances_do_share_class_attributes(self):
fido = self.Dog3()
fido.set_name("Fido")
self.assertEqual("Fido", fido.get_name())
self.assertEqual("Fido", self.Dog3.get_name())
# ------------------------------------------------------------------
class Dog4(object):
def a_class_method(cls):
return 'dogs class method'
def a_static_method():
return 'dogs static method'
a_class_method = classmethod(a_class_method)
a_static_method = staticmethod(a_static_method)
def test_you_can_define_class_methods_without_using_a_decorator(self):
self.assertEqual('dogs class method', self.Dog4.a_class_method())
def test_you_can_define_static_methods_without_using_a_decorator(self):
self.assertEqual('dogs static method', self.Dog4.a_static_method())
# ------------------------------------------------------------------
def test_you_can_explicitly_call_class_methods_from_instance_methods(self):
fido = self.Dog4()
self.assertEqual('dogs class method', fido.__class__.a_class_method())
| {
"content_hash": "583a1427e306a99b9f4e5ed6424a6815",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 79,
"avg_line_length": 32.25316455696203,
"alnum_prop": 0.5788854003139717,
"repo_name": "hnarayanan/python-koans",
"id": "c248c88e33ad6d9bd05e4eeea5e505dba3a6cb85",
"size": "5195",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python2/koans/about_class_attributes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "321703"
},
{
"name": "Shell",
"bytes": "1603"
}
],
"symlink_target": ""
} |
from mayavi.core.registry import registry
from mayavi.core.pipeline_info import PipelineInfo
from mayavi.core.metadata import SourceMetadata
from simphony_mayavi.plugins.engine_manager_mayavi2 import (
EngineManagerMayavi2Plugin)
cuds_reader_info = SourceMetadata(
id="CUDSReader",
class_name="simphony_mayavi.sources.cuds_file_source.CUDSFileSource",
tooltip="Load a CUDS file",
desc="Load a SimPhoNy CUDS file",
help="Load a SimPhoNy CUDS file",
menu_name="CUDS file",
extensions=['cuds'],
wildcard='CUDS files (*.cuds)|*.cuds',
output_info=PipelineInfo(
datasets=['unstructured_grid', 'image_data', 'poly_data'],
attribute_types=['any'],
attributes=['scalars', 'vectors']))
registry.sources.append(cuds_reader_info)
def get_plugins():
return [EngineManagerMayavi2Plugin()]
| {
"content_hash": "5b1cc63923925b407c183d9a56b2bcd4",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 73,
"avg_line_length": 33.92,
"alnum_prop": 0.714622641509434,
"repo_name": "simphony/simphony-mayavi",
"id": "ddad6b8710428d50f986080c29db321d7a41dc98",
"size": "848",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simphony_mayavi/user_mayavi.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "382041"
}
],
"symlink_target": ""
} |
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_LoginDialog(object):
def setupUi(self, LoginDialog):
LoginDialog.setObjectName(_fromUtf8("LoginDialog"))
LoginDialog.setWindowModality(QtCore.Qt.ApplicationModal)
LoginDialog.resize(332, 192)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(LoginDialog.sizePolicy().hasHeightForWidth())
LoginDialog.setSizePolicy(sizePolicy)
self.verticalLayout = QtGui.QVBoxLayout(LoginDialog)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.label = QtGui.QLabel(LoginDialog)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.uid_field = QtGui.QLineEdit(LoginDialog)
self.uid_field.setEchoMode(QtGui.QLineEdit.Normal)
self.uid_field.setObjectName(_fromUtf8("uid_field"))
self.gridLayout.addWidget(self.uid_field, 0, 1, 1, 1)
self.label_2 = QtGui.QLabel(LoginDialog)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout.addWidget(self.label_2, 1, 0, 1, 1)
self.password_field = QtGui.QLineEdit(LoginDialog)
self.password_field.setEchoMode(QtGui.QLineEdit.Password)
self.password_field.setObjectName(_fromUtf8("password_field"))
self.gridLayout.addWidget(self.password_field, 1, 1, 1, 1)
self.private_computer_check = QtGui.QCheckBox(LoginDialog)
self.private_computer_check.setObjectName(_fromUtf8("private_computer_check"))
self.gridLayout.addWidget(self.private_computer_check, 2, 1, 1, 1)
self.verticalLayout.addLayout(self.gridLayout)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.login_btn = QtGui.QPushButton(LoginDialog)
self.login_btn.setObjectName(_fromUtf8("login_btn"))
self.horizontalLayout.addWidget(self.login_btn)
self.exit_btn = QtGui.QPushButton(LoginDialog)
self.exit_btn.setObjectName(_fromUtf8("exit_btn"))
self.horizontalLayout.addWidget(self.exit_btn)
self.verticalLayout.addLayout(self.horizontalLayout)
self.retranslateUi(LoginDialog)
QtCore.QObject.connect(self.login_btn, QtCore.SIGNAL(_fromUtf8("clicked()")), LoginDialog.on_login)
QtCore.QObject.connect(self.exit_btn, QtCore.SIGNAL(_fromUtf8("clicked()")), LoginDialog.on_exit)
QtCore.QMetaObject.connectSlotsByName(LoginDialog)
def retranslateUi(self, LoginDialog):
LoginDialog.setWindowTitle(_translate("LoginDialog", "Login To Mozilla", None))
self.label.setText(_translate("LoginDialog", "Login:", None))
self.label_2.setText(_translate("LoginDialog", "Password:", None))
self.private_computer_check.setText(_translate("LoginDialog", "Private computer", None))
self.login_btn.setText(_translate("LoginDialog", "Login", None))
self.exit_btn.setText(_translate("LoginDialog", "Close", None))
| {
"content_hash": "975ad77e476f24b6dbf6217f277c234c",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 107,
"avg_line_length": 53.38666666666666,
"alnum_prop": 0.7100399600399601,
"repo_name": "isbm/pybug",
"id": "8879b3cd71903ca04cb0f6635ff75346b41610f2",
"size": "4202",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ui/_login.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "45567"
}
],
"symlink_target": ""
} |
u"""ci_play setup script
:copyright: Copyright (c) 2022 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from pykern import pksetup
pksetup.setup(
name='ci_play',
author='RadiaSoft LLC',
author_email='pip@radiasoft.net',
description='ci_play',
install_requires=[
'pykern',
],
license='http://www.apache.org/licenses/LICENSE-2.0.html',
url='https://github.com/radiasoft/ci_play',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python',
'Topic :: Utilities',
],
)
| {
"content_hash": "8ccd36bcc0872bb8474ce1c44ed09227",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 67,
"avg_line_length": 30.724137931034484,
"alnum_prop": 0.6184062850729517,
"repo_name": "robnagler/ci-play",
"id": "dce75d9a310ecd694d343aa4ed39ae4a2a10031e",
"size": "915",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2157"
},
{
"name": "Shell",
"bytes": "385"
}
],
"symlink_target": ""
} |
"""Runs code on a fleet of machines.
This runs the lingvo code on a fleet of docker for demonstration and testing
purposes. We assume the following:
* There is a running container
* There is a shared volume in /sharedfs. In reality, this would be something
like an NFS or HDFS mount.
The script is run on the host and only requires python and the docker binary
to be installed.
We run two "clusters": one for training, and one for decoding. The trainer
jobs (controller, trainer_client/worker or trainer/ps) are connected to
each other, whereas the decoder jobs are independent, only reading from
the shared filesystem. The trainer jobs are configured via a cluster spec
flag, whereas the decoder jobs are configured with individual flags.
"""
import os
from pipes import quote as shell_quote
import shutil
import subprocess
import sys
_SYNC_TRAIN_CLUSTER_SPEC = {
"worker": [
"worker0:43222",
"worker1:43222",
"worker2:43222",
],
"controller": ["controller:43214",],
"trainer_client": ["trainer_client:24601"],
}
_ASYNC_TRAIN_CLUSTER_SPEC = {
"trainer": [
"trainer0:43222",
"trainer1:43222",
"trainer2:43222",
],
"ps": [
"ps0:43221",
"ps1:43221",
],
"controller": ["controller:43214",],
}
DECODE_CLUSTER_SPEC = {
"evaler_test": ["evaler_test:23487"],
"decoder_test": ["decoder_test:24679"],
}
MODEL = "image.mnist.LeNet5"
DATADIR = "/tmp/mnist"
TRAIN_MODE = "sync"
TRAIN_CLUSTER_SPEC = (
_SYNC_TRAIN_CLUSTER_SPEC
if TRAIN_MODE == "sync" else _ASYNC_TRAIN_CLUSTER_SPEC)
DOCKER_BIN = "/usr/bin/docker"
# All that is required is that we have pip installed tensorflow.
DOCKER_IMAGE_NAME = "tensorflow:lingvo"
# This was created using
# bazel build -c opt //lingvo:trainer.par
# cp bazel-bin/lingvo/trainer.par .
# Since /tmp/lingvo is mounted, we can see it.
# TODO(drpng): hard-wiring below.
TRAINER_PACKAGE = "/tmp/lingvo/trainer.par"
DRY_RUN = False
NETWORK_NAME = "tf-net"
SHARED_FS_MOUNTPOINT = "/tmp/sharedfs"
def _RunDocker(args):
print("Running: docker %s" % args)
if DRY_RUN:
return 0
ret = subprocess.call([DOCKER_BIN] + args)
return ret
def _RunDockerOrDie(args):
ret = _RunDocker(args)
if ret != 0:
sys.stderr.write("Failed to run: %s\n" % ret)
sys.stderr.flush()
sys.exit(ret)
def _ExecInDocker(container_name,
cmd_array,
workdir=None,
logfile=None,
detach=False):
"""Execute in docker container."""
if not workdir:
workdir = "/tmp"
opts = ["-t", "-w", workdir]
if detach:
opts += ["-d"]
# TODO(drpng): avoid quoting hell.
base_cmd = ["exec"] + opts + [container_name]
if logfile:
# The logfile is in the container.
cmd = " ".join(shell_quote(x) for x in cmd_array)
cmd += " >& %s" % logfile
full_cmd = base_cmd + ["bash", "-c", cmd]
else:
full_cmd = base_cmd + cmd_array
ret = _RunDocker(full_cmd)
if ret != 0:
sys.stderr.write(
"Failed to exec within %s: %s" % (container_name, cmd_array))
sys.exit(ret)
def _Machine(machine_port):
# From host:port to host.
return machine_port[:machine_port.index(":")]
def Cleanup():
specs = list(TRAIN_CLUSTER_SPEC.values()) + list(DECODE_CLUSTER_SPEC.values())
for job_machines in specs:
machines = [_Machine(x) for x in job_machines]
_RunDocker(["stop", "-t", "0"] + machines)
_RunDocker(["network", "rm", NETWORK_NAME])
shutil.rmtree(SHARED_FS_MOUNTPOINT, ignore_errors=True)
def InitFiles():
os.mkdir(SHARED_FS_MOUNTPOINT, 0o1777)
# Create these directories so that we own them, not root.
os.mkdir(SHARED_FS_MOUNTPOINT + "/log", 0o1777)
os.mkdir(SHARED_FS_MOUNTPOINT + "/log/train", 0o1777)
os.mkdir(SHARED_FS_MOUNTPOINT + "/log/decoder_test", 0o1777)
os.mkdir(SHARED_FS_MOUNTPOINT + "/log/eval_test", 0o1777)
def InitNetwork():
_RunDockerOrDie(["network", "create", "--driver", "bridge", NETWORK_NAME])
def StartFleet():
specs = list(TRAIN_CLUSTER_SPEC.values()) + list(DECODE_CLUSTER_SPEC.values())
for job_machines in specs:
for machine_port in job_machines:
machine_name = _Machine(machine_port)
_RunDockerOrDie([
"run", "--rm", "--name", machine_name, "-dit", "--network",
NETWORK_NAME, "-v", ":".join([SHARED_FS_MOUNTPOINT] * 2), "-v",
":".join([DATADIR] * 2 + ["ro"]), DOCKER_IMAGE_NAME, "bash"
])
def MakeFlagClusterSpec(cluster_spec):
job_specs = []
for job_name in sorted(cluster_spec.keys()):
job_specs += [job_name + "=" + ",".join(cluster_spec[job_name])]
flag_spec = "@".join(job_specs)
return flag_spec
def CopyTrainerToSharedMount():
shutil.copy(TRAINER_PACKAGE, SHARED_FS_MOUNTPOINT + "/trainer.par")
def InstallAndStartProcess(cluster_spec):
"""Unpacks the trainer and kick off training."""
cluster_spec_flag = MakeFlagClusterSpec(cluster_spec)
for job_name, machines in cluster_spec.items():
task_idx = 0
for machine_port in machines:
machine_name = _Machine(machine_port)
_ExecInDocker(
machine_name, [
os.path.join(SHARED_FS_MOUNTPOINT, "trainer.par"),
"--cluster_spec=%s" % cluster_spec_flag,
"--job=%s" % job_name,
"--task=%d" % task_idx,
"--mode=%s" % TRAIN_MODE,
"--logtostderr",
"--model=%s" % MODEL,
"--logdir=%s/log" % SHARED_FS_MOUNTPOINT,
],
workdir="/tmp",
logfile="%s/%s.%d.log" % (SHARED_FS_MOUNTPOINT, job_name, task_idx),
detach=True)
task_idx += 1
def main():
Cleanup()
InitFiles()
InitNetwork()
StartFleet()
CopyTrainerToSharedMount()
InstallAndStartProcess(TRAIN_CLUSTER_SPEC)
for role in sorted(DECODE_CLUSTER_SPEC.keys()):
# Each decode process is its own spec.
machine_spec = DECODE_CLUSTER_SPEC[role]
InstallAndStartProcess({role: machine_spec})
if __name__ == "__main__":
main()
| {
"content_hash": "7a12ea58992a1edd9824b5d26d88b145",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 80,
"avg_line_length": 28.927884615384617,
"alnum_prop": 0.6337045039056008,
"repo_name": "tensorflow/lingvo",
"id": "6ebc6e22f830d31b0963f958310a3695201cf9ef",
"size": "6706",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docker/run_distributed.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5163"
},
{
"name": "C++",
"bytes": "556344"
},
{
"name": "Dockerfile",
"bytes": "8484"
},
{
"name": "Jupyter Notebook",
"bytes": "36721"
},
{
"name": "Python",
"bytes": "9574124"
},
{
"name": "Shell",
"bytes": "50408"
},
{
"name": "Starlark",
"bytes": "182688"
},
{
"name": "TeX",
"bytes": "37275"
}
],
"symlink_target": ""
} |
from rest_framework.test import APITestCase, APIRequestFactory, force_authenticate
from api.v2.views import QuotaViewSet as ViewSet
from api.tests.factories import UserFactory, AnonymousUserFactory,\
IdentityFactory, ProviderFactory, GroupFactory,\
IdentityMembershipFactory, QuotaFactory, AllocationFactory
from django.core.urlresolvers import reverse
from core.models import Identity
# Test cases should be re-written now that these methods are allowed.
# class GetListTests(APITestCase):
#
# def setUp(self):
# self.view = ViewSet.as_view({'get': 'list'})
# self.anonymous_user = AnonymousUserFactory()
# self.user = UserFactory.create()
# self.group = GroupFactory.create(name=self.user.username)
# self.staff_user = UserFactory.create(is_staff=True)
#
# self.quota = QuotaFactory.create()
#
# factory = APIRequestFactory()
# url = reverse('api:v2:quota-list')
# self.request = factory.get(url)
# force_authenticate(self.request, user=self.user)
# self.response = self.view(self.request)
#
# def tearDown(self):
# self.quota.delete()
#
# def test_is_not_public(self):
# force_authenticate(self.request, user=self.anonymous_user)
# response = self.view(self.request)
# self.assertEquals(response.status_code, 403)
#
# def test_is_visible_to_authenticated_user(self):
# force_authenticate(self.request, user=self.user)
# response = self.view(self.request)
# self.assertEquals(response.status_code, 200)
#
# def test_response_is_paginated(self):
# response = self.response
# self.assertIn('count', response.data)
# self.assertIn('results', response.data)
#
# def test_response_contains_expected_fields(self):
# force_authenticate(self.request, user=self.user)
# response = self.view(self.request)
# data = response.data.get('results')[0]
#
# self.assertEquals(len(data), 7)
# self.assertIn('id', data)
# self.assertIn('url', data)
# self.assertIn('cpu', data)
# self.assertIn('memory', data)
# self.assertIn('storage', data)
# self.assertIn('storage_count', data)
#
#
# class GetDetailTests(APITestCase):
#
# def setUp(self):
# self.view = ViewSet.as_view({'get': 'retrieve'})
# self.anonymous_user = AnonymousUserFactory()
# self.user = UserFactory.create()
# self.group = GroupFactory.create(name=self.user.username)
# self.staff_user = UserFactory.create(is_staff=True)
#
# self.quota = QuotaFactory.create()
#
# factory = APIRequestFactory()
# url = reverse('api:v2:quota-detail', args=(self.quota.id,))
# self.request = factory.get(url)
# force_authenticate(self.request, user=self.user)
# self.response = self.view(self.request, pk=self.quota.id)
#
# def tearDown(self):
# self.quota.delete()
#
# def test_is_not_public(self):
# force_authenticate(self.request, user=self.anonymous_user)
# response = self.view(self.request, pk=self.quota.id)
# self.assertEquals(response.status_code, 403)
#
# def test_is_visible_to_authenticated_user(self):
# force_authenticate(self.request, user=self.user)
# response = self.view(self.request, pk=self.quota.id)
# self.assertEquals(response.status_code, 200)
#
# def test_response_contains_expected_fields(self):
# force_authenticate(self.request, user=self.user)
# response = self.view(self.request, pk=self.quota.id)
# data = response.data
#
# self.assertEquals(len(data), 7)
# self.assertIn('id', data)
# self.assertIn('url', data)
# self.assertIn('cpu', data)
# self.assertIn('memory', data)
# self.assertIn('storage', data)
# self.assertIn('storage_count', data)
#
# class CreateTests(APITestCase):
#
# def test_endpoint_does_not_exist(self):
# self.assertTrue('post' not in ViewSet.http_method_names)
#
#
# class UpdateTests(APITestCase):
#
# def test_endpoint_does_not_exist(self):
# self.assertTrue('put' not in ViewSet.http_method_names)
#
#
# class DeleteTests(APITestCase):
#
# def test_endpoint_does_not_exist(self):
# self.assertTrue('delete' not in ViewSet.http_method_names)
| {
"content_hash": "ebf8b0eb5afaf9167260de050ac8902c",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 82,
"avg_line_length": 36.610169491525426,
"alnum_prop": 0.6576388888888889,
"repo_name": "CCI-MOC/GUI-Backend",
"id": "8c4fcbb517aed513f7cf6e2b2893dd58b2a0bdcd",
"size": "4320",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/tests/v2/test_quotas.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "11571"
},
{
"name": "Python",
"bytes": "2565922"
},
{
"name": "Ruby",
"bytes": "1345"
},
{
"name": "Shell",
"bytes": "42018"
}
],
"symlink_target": ""
} |
from pastpy.database_image import DatabaseImage
class DummyDatabaseImage(DatabaseImage):
def __init__(self, *, image_index, object_index):
self.__image_index = image_index
self.__object_index = object_index
@property
def full_size_url(self):
return "http://placehold.it/510x670"
@property
def thumbnail_url(self):
return "http://placehold.it/210x210"
@property
def title(self):
return "Dummy object %d image %d" % (self.__object_index, self.__image_index)
| {
"content_hash": "5b798cf86b0de874620dae3d5b27886c",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 85,
"avg_line_length": 27.842105263157894,
"alnum_prop": 0.6465028355387523,
"repo_name": "minorg/pastpy",
"id": "ddfe030b69cdc6b73477c604e7200311f0b99c21",
"size": "529",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pastpy/impl/dummy/dummy_database_image.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "107"
},
{
"name": "Python",
"bytes": "678754"
},
{
"name": "Thrift",
"bytes": "15261"
}
],
"symlink_target": ""
} |
response.view.replace('.html','.mobile.html')
#Mobile Views
from gluon.contrib.user_agent_parser import mobilize
# method for default loading page
@mobilize
def index():
response.flash = T("Welcome to KPW.org!")
response.title = T('KPW.org')
message = ""
return dict(message=message)
# method to show twitter feed
def twitterfeed():
return dict()
# method for user logins and registration (currently disabled)
'''
def user():
return dict(form=auth())
'''
# method for downloading files (currently disabled)
'''
@cache.action()
def download():
return response.download(request, db)
'''
# decorate with @services.jsonrpc the functions to expose
# supports xml, json, xmlrpc, jsonrpc, amfrpc, rss, csv
def call():
return service()
# method for SQL table manipulation (currently disabled)
'''
@auth.requires_signature()
def data():
return dict(form=crud())
'''
| {
"content_hash": "0eb5b47d409fa7eef1231ebf83831307",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 62,
"avg_line_length": 22.475,
"alnum_prop": 0.699666295884316,
"repo_name": "highlanderkev/kpw.org",
"id": "dfa65ab64e17fc57e2e7b9a473d02a27542ada9e",
"size": "1453",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kpw-org/controllers/default.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "118673"
},
{
"name": "HTML",
"bytes": "66391"
},
{
"name": "JavaScript",
"bytes": "32484"
},
{
"name": "Python",
"bytes": "265721"
}
],
"symlink_target": ""
} |
"""Amber relaxation."""
| {
"content_hash": "cdacfa8467310980b9b0f0ed83f46ce6",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 23,
"avg_line_length": 24,
"alnum_prop": 0.625,
"repo_name": "deepmind/alphafold",
"id": "98feaf80700f21bf7c1c5e7f755d3c38d0008dba",
"size": "618",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "alphafold/relax/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3420"
},
{
"name": "Jupyter Notebook",
"bytes": "43833"
},
{
"name": "Python",
"bytes": "796703"
},
{
"name": "Shell",
"bytes": "17888"
}
],
"symlink_target": ""
} |
from django.middleware.cache import CacheMiddleware
from django.utils.cache import patch_response_headers, get_max_age
from models import FancyModel
from django.http import HttpResponse
from django.core import serializers
from cache.manager import cache_manager
def fancy_view(request):
cache_version = cache_manager.get_group('fancy_model').get_version("content")
response = process_request(request, cache_version)
if response:
return response
data = serializers.serialize("xml", FancyModel.objects.all())
response = HttpResponse(data)
response = process_response(request, response, cache_version)
return response
def process_request(req, prefix, cache_time=60*60):
# retrieve the cache using the django's CacheMiddleware
cache_middleware = CacheMiddleware(cache_timeout=cache_time,
key_prefix=prefix)
response = cache_middleware.process_request(req)
# if no cache is found, return false
if not response:
return False
return response
def process_response(req, res, prefix, cache_time=60*60):
# update the cache using the django's CacheMiddleware
cache_middleware = CacheMiddleware(cache_timeout=cache_time,
key_prefix=prefix)
response = cache_middleware.process_response(req, res)
# update some header to prevent wrong client caching
max_age = get_max_age(response)
if max_age and max_age < max_age:
# Remove headers so patch_response works
for header in ('ETag', 'Last-Modified', 'Expires'):
if response.has_header(header):
del response[header]
patch_response_headers(response, max_age)
return response
| {
"content_hash": "96905774e796b62e111ed0e142f5637e",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 81,
"avg_line_length": 34.30612244897959,
"alnum_prop": 0.7144556811421773,
"repo_name": "ff0000/scarlet",
"id": "531cf221db67ea35bc04f161aef1dbdcc26b2515",
"size": "1681",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "samples/cache/functionview/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "217430"
},
{
"name": "HTML",
"bytes": "43216"
},
{
"name": "JavaScript",
"bytes": "2200686"
},
{
"name": "Python",
"bytes": "508579"
},
{
"name": "Ruby",
"bytes": "485"
},
{
"name": "Shell",
"bytes": "1813"
}
],
"symlink_target": ""
} |
""" This template creates a Pub/Sub (publish-subscribe) service. """
from hashlib import sha1
import json
def set_optional_property(destination, source, prop_name):
""" Copies the property value if present. """
if prop_name in source:
destination[prop_name] = source[prop_name]
def create_subscription(resource_name, project_id, spec):
""" Create a pull/push subscription from the simplified spec. """
suffix = 'subscription-{}'.format(sha1((resource_name + json.dumps(spec)).encode('utf-8')).hexdigest()[:10])
subscription = {
'name': '{}-{}'.format(resource_name, suffix),
# https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions
'type': 'gcp-types/pubsub-v1:projects.subscriptions',
'properties':{
'subscription': spec.get('name', suffix),
'name': 'projects/{}/subscriptions/{}'.format(project_id, spec.get('name', suffix)),
'topic': '$(ref.{}.name)'.format(resource_name)
}
}
resources_list = [subscription]
optional_properties = [
'labels',
'pushConfig',
'ackDeadlineSeconds',
'retainAckedMessages',
'messageRetentionDuration',
'expirationPolicy',
]
for prop in optional_properties:
set_optional_property(subscription['properties'], spec, prop)
push_endpoint = spec.get('pushEndpoint')
if push_endpoint is not None:
subscription['properties']['pushConfig'] = {
'pushEndpoint': push_endpoint,
}
return resources_list
def generate_config(context):
""" Entry point for the deployment resources. """
properties = context.properties
name = properties.get('name', properties.get('topic', context.env['name']))
project_id = properties.get('project', context.env['project'])
topic = {
'name': context.env['name'],
# https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.topics
'type': 'gcp-types/pubsub-v1:projects.topics',
'properties':{
'topic': name,
'name': 'projects/{}/topics/{}'.format(project_id, name),
}
}
resources_list = [topic]
optional_properties = [
'labels',
]
for prop in optional_properties:
set_optional_property(topic['properties'], properties, prop)
subscription_specs = properties.get('subscriptions', [])
for spec in subscription_specs:
resources_list = resources_list + create_subscription(context.env['name'], project_id, spec)
return {
'resources': resources_list,
'outputs': [
{
'name': 'topicName',
'value': '$(ref.{}.name)'.format(context.env['name'])
}
],
}
| {
"content_hash": "d77bfa09d83257f9193708e1dcbd960d",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 112,
"avg_line_length": 31.258426966292134,
"alnum_prop": 0.604960460100647,
"repo_name": "GoogleCloudPlatform/cloud-foundation-toolkit",
"id": "f496a32b36e15232d44a14f6fa6d74ecd132989e",
"size": "3378",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dm/templates/pubsub/pubsub.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "13744"
},
{
"name": "Go",
"bytes": "358673"
},
{
"name": "HCL",
"bytes": "133562"
},
{
"name": "JavaScript",
"bytes": "260"
},
{
"name": "Makefile",
"bytes": "12926"
},
{
"name": "Open Policy Agent",
"bytes": "20813"
},
{
"name": "Python",
"bytes": "316390"
},
{
"name": "Ruby",
"bytes": "1462"
},
{
"name": "Shell",
"bytes": "372901"
}
],
"symlink_target": ""
} |
from Tkinter import * # Importing the Tkinter (tool box) library
root = Tk() # Create a background window
# Create a list
li = 'Carl Patrick Lindsay Helmut Chris Gwen'.split()
listb = Listbox(root) # Create a listbox widget
for item in li: # Insert each item within li into the listbox
listb.insert(0,item)
listb.pack() # Pack listbox widget
root.mainloop() # Execute the main event handler | {
"content_hash": "1d8ea7f65a6c1a30d50823d4cd525def",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 77,
"avg_line_length": 51.9,
"alnum_prop": 0.5645472061657033,
"repo_name": "felixzhao/BookDigger",
"id": "1e255fa3208a37ead6d28eb2a2963f7dc8455cad",
"size": "519",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "digger_app/practice_1.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "47540"
},
{
"name": "Python",
"bytes": "30513"
}
],
"symlink_target": ""
} |
import sys
from litmos.api import API
from litmos.litmos import LitmosType
from litmos.team import Team
from litmos.user import User
from litmos.course import Course
from litmos.course_module import CourseModule
__version__ = "1.3.0"
class Litmos(object):
ACCEPTABLE_TYPES = ['User', 'Team', 'Course', 'CourseModule']
def __init__(self, api_key, app_name, root_url='https://api.litmos.com/v1.svc'):
API.api_key = api_key
API.app_name = app_name
API.ROOT_URL = root_url
self.litmos_api = API
def __getattr__(self, name):
if name in Litmos.ACCEPTABLE_TYPES:
return getattr(sys.modules[__name__], name)
else:
return object.__getattribute__(self, name)
| {
"content_hash": "3a8bee14ea21ecb18b344c24a1718825",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 84,
"avg_line_length": 27.40740740740741,
"alnum_prop": 0.6472972972972973,
"repo_name": "charliequinn/python-litmos-api",
"id": "107ee9c1ff710d4e892f0fa23baa097e193b2e4d",
"size": "740",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/litmos/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "55750"
}
],
"symlink_target": ""
} |
"""
Epydoc parser for ReStructuredText strings. ReStructuredText is the
standard markup language used by the Docutils project.
L{parse_docstring()} provides the primary interface to this module; it
returns a L{ParsedRstDocstring}, which supports all of the methods
defined by L{ParsedDocstring}.
L{ParsedRstDocstring} is basically just a L{ParsedDocstring} wrapper
for the C{docutils.nodes.document} class.
Creating C{ParsedRstDocstring}s
===============================
C{ParsedRstDocstring}s are created by the C{parse_document} function,
using the C{docutils.core.publish_string()} method, with the following
helpers:
- An L{_EpydocReader} is used to capture all error messages as it
parses the docstring.
- A L{_DocumentPseudoWriter} is used to extract the document itself,
without actually writing any output. The document is saved for
further processing. The settings for the writer are copied from
C{docutils.writers.html4css1.Writer}, since those settings will
be used when we actually write the docstring to html.
Using C{ParsedRstDocstring}s
============================
C{ParsedRstDocstring}s support all of the methods defined by
C{ParsedDocstring}; but only the following four methods have
non-default behavior:
- L{to_html()<ParsedRstDocstring.to_html>} uses an
L{_EpydocHTMLTranslator} to translate the C{ParsedRstDocstring}'s
document into an HTML segment.
- L{split_fields()<ParsedRstDocstring.split_fields>} uses a
L{_SplitFieldsTranslator} to divide the C{ParsedRstDocstring}'s
document into its main body and its fields. Special handling
is done to account for consolidated fields.
- L{summary()<ParsedRstDocstring.summary>} uses a
L{_SummaryExtractor} to extract the first sentence from
the C{ParsedRstDocstring}'s document.
- L{to_plaintext()<ParsedRstDocstring.to_plaintext>} uses
C{document.astext()} to convert the C{ParsedRstDocstring}'s
document to plaintext.
@todo: Add ParsedRstDocstring.to_latex()
@var CONSOLIDATED_FIELDS: A dictionary encoding the set of
'consolidated fields' that can be used. Each consolidated field is
marked by a single tag, and contains a single bulleted list, where
each list item starts with an identifier, marked as interpreted text
(C{`...`}). This module automatically splits these consolidated
fields into individual fields. The keys of C{CONSOLIDATED_FIELDS} are
the names of possible consolidated fields; and the values are the
names of the field tags that should be used for individual entries in
the list.
"""
__docformat__ = 'epytext en'
# Imports
import re, os, os.path
from xml.dom.minidom import *
from docutils.core import publish_string
from docutils.writers import Writer
from docutils.writers.html4css1 import HTMLTranslator, Writer as HTMLWriter
from docutils.writers.latex2e import LaTeXTranslator, Writer as LaTeXWriter
from docutils.readers.standalone import Reader as StandaloneReader
from docutils.utils import new_document
from docutils.nodes import NodeVisitor, Text, SkipChildren
from docutils.nodes import SkipNode, TreeCopyVisitor
from docutils.frontend import OptionParser
from docutils.parsers.rst import directives, roles
import docutils.nodes
import docutils.transforms.frontmatter
import docutils.transforms
import docutils.utils
from epydoc.compat import * # Backwards compatibility
from epydoc.markup import *
from epydoc.apidoc import ModuleDoc, ClassDoc
from epydoc.docwriter.dotgraph import *
from epydoc.docwriter.xlink import ApiLinkReader
from epydoc.util import wordwrap, plaintext_to_html, plaintext_to_latex
from epydoc.markup.doctest import doctest_to_html, doctest_to_latex, \
HTMLDoctestColorizer, \
LaTeXDoctestColorizer
#: A dictionary whose keys are the "consolidated fields" that are
#: recognized by epydoc; and whose values are the corresponding epydoc
#: field names that should be used for the individual fields.
CONSOLIDATED_FIELDS = {
'parameters': 'param',
'arguments': 'arg',
'exceptions': 'except',
'variables': 'var',
'ivariables': 'ivar',
'cvariables': 'cvar',
'groups': 'group',
'types': 'type',
'keywords': 'keyword',
}
#: A list of consolidated fields whose bodies may be specified using a
#: definition list, rather than a bulleted list. For these fields, the
#: 'classifier' for each term in the definition list is translated into
#: a @type field.
CONSOLIDATED_DEFLIST_FIELDS = ['param', 'arg', 'var', 'ivar', 'cvar', 'keyword']
def parse_docstring(docstring, errors, **options):
"""
Parse the given docstring, which is formatted using
ReStructuredText; and return a L{ParsedDocstring} representation
of its contents.
@param docstring: The docstring to parse
@type docstring: C{string}
@param errors: A list where any errors generated during parsing
will be stored.
@type errors: C{list} of L{ParseError}
@param options: Extra options. Unknown options are ignored.
Currently, no extra options are defined.
@rtype: L{ParsedDocstring}
"""
writer = _DocumentPseudoWriter()
reader = _EpydocReader(errors) # Outputs errors to the list.
publish_string(docstring, writer=writer, reader=reader,
settings_overrides={'report_level':10000,
'halt_level':10000,
'warning_stream':None})
return ParsedRstDocstring(writer.document)
class OptimizedReporter(docutils.utils.Reporter):
"""A reporter that ignores all debug messages. This is used to
shave a couple seconds off of epydoc's run time, since docutils
isn't very fast about processing its own debug messages."""
def debug(self, *args, **kwargs): pass
class ParsedRstDocstring(ParsedDocstring):
"""
An encoded version of a ReStructuredText docstring. The contents
of the docstring are encoded in the L{_document} instance
variable.
@ivar _document: A ReStructuredText document, encoding the
docstring.
@type _document: C{docutils.nodes.document}
"""
def __init__(self, document):
"""
@type document: C{docutils.nodes.document}
"""
self._document = document
# The default document reporter and transformer are not
# pickle-able; so replace them with stubs that are.
document.reporter = OptimizedReporter(
document.reporter.source, 'SEVERE', 'SEVERE', '')
document.transformer = docutils.transforms.Transformer(document)
def split_fields(self, errors=None):
# Inherit docs
if errors is None: errors = []
visitor = _SplitFieldsTranslator(self._document, errors)
self._document.walk(visitor)
if len(self._document.children) > 0:
return self, visitor.fields
else:
return None, visitor.fields
def summary(self):
# Inherit docs
visitor = _SummaryExtractor(self._document)
try: self._document.walk(visitor)
except docutils.nodes.NodeFound: pass
return visitor.summary, bool(visitor.other_docs)
# def concatenate(self, other):
# result = self._document.copy()
# for child in (self._document.get_children() +
# other._document.get_children()):
# visitor = TreeCopyVisitor(self._document)
# child.walkabout(visitor)
# result.append(visitor.get_tree_copy())
# return ParsedRstDocstring(result)
def to_html(self, docstring_linker, directory=None,
docindex=None, context=None, **options):
# Inherit docs
visitor = _EpydocHTMLTranslator(self._document, docstring_linker,
directory, docindex, context)
self._document.walkabout(visitor)
return ''.join(visitor.body)
def to_latex(self, docstring_linker, directory=None,
docindex=None, context=None, **options):
# Inherit docs
visitor = _EpydocLaTeXTranslator(self._document, docstring_linker,
directory, docindex, context)
self._document.walkabout(visitor)
return ''.join(visitor.body).strip()+'\n'
def to_plaintext(self, docstring_linker, **options):
# This is should be replaced by something better:
return self._document.astext()
def __repr__(self): return '<ParsedRstDocstring: ...>'
def index_terms(self):
visitor = _TermsExtractor(self._document)
self._document.walkabout(visitor)
return visitor.terms
class _EpydocReader(ApiLinkReader):
"""
A reader that captures all errors that are generated by parsing,
and appends them to a list.
"""
# Remove the DocInfo transform, to ensure that :author: fields are
# correctly handled. This needs to be handled differently
# depending on the version of docutils that's being used, because
# the default_transforms attribute was deprecated & replaced by
# get_transforms().
version = [int(v) for v in docutils.__version__.split('.')]
version += [ 0 ] * (3 - len(version))
if version < [0,4,0]:
default_transforms = list(ApiLinkReader.default_transforms)
try: default_transforms.remove(docutils.transforms.frontmatter.DocInfo)
except ValueError: pass
else:
def get_transforms(self):
return [t for t in ApiLinkReader.get_transforms(self)
if t != docutils.transforms.frontmatter.DocInfo]
del version
def __init__(self, errors):
self._errors = errors
ApiLinkReader.__init__(self)
def new_document(self):
document = new_document(self.source.source_path, self.settings)
# Capture all warning messages.
document.reporter.attach_observer(self.report)
# These are used so we know how to encode warning messages:
self._encoding = document.reporter.encoding
self._error_handler = document.reporter.error_handler
# Return the new document.
return document
def report(self, error):
try: is_fatal = int(error['level']) > 2
except: is_fatal = 1
try: linenum = int(error['line'])
except: linenum = None
msg = ''.join([c.astext().encode(self._encoding, self._error_handler)
for c in error])
self._errors.append(ParseError(msg, linenum, is_fatal))
class _DocumentPseudoWriter(Writer):
"""
A pseudo-writer for the docutils framework, that can be used to
access the document itself. The output of C{_DocumentPseudoWriter}
is just an empty string; but after it has been used, the most
recently processed document is available as the instance variable
C{document}
@type document: C{docutils.nodes.document}
@ivar document: The most recently processed document.
"""
def __init__(self):
self.document = None
Writer.__init__(self)
def translate(self):
self.output = ''
class _SummaryExtractor(NodeVisitor):
"""
A docutils node visitor that extracts the first sentence from
the first paragraph in a document.
"""
def __init__(self, document):
NodeVisitor.__init__(self, document)
self.summary = None
self.other_docs = None
def visit_document(self, node):
self.summary = None
_SUMMARY_RE = re.compile(r'(\s*[\w\W]*?\.)(\s|$)')
def visit_paragraph(self, node):
if self.summary is not None:
# found a paragraph after the first one
self.other_docs = True
raise docutils.nodes.NodeFound('Found summary')
summary_pieces = []
# Extract the first sentence.
for child in node:
if isinstance(child, docutils.nodes.Text):
m = self._SUMMARY_RE.match(child)
if m:
summary_pieces.append(docutils.nodes.Text(m.group(1)))
other = child[m.end():]
if other and not other.isspace():
self.other_docs = True
break
summary_pieces.append(child)
summary_doc = self.document.copy() # shallow copy
summary_para = node.copy() # shallow copy
summary_doc[:] = [summary_para]
summary_para[:] = summary_pieces
self.summary = ParsedRstDocstring(summary_doc)
def visit_field(self, node):
raise SkipNode
def unknown_visit(self, node):
'Ignore all unknown nodes'
class _TermsExtractor(NodeVisitor):
"""
A docutils node visitor that extracts the terms from documentation.
Terms are created using the C{:term:} interpreted text role.
"""
def __init__(self, document):
NodeVisitor.__init__(self, document)
self.terms = None
"""
The terms currently found.
@type: C{list}
"""
def visit_document(self, node):
self.terms = []
self._in_term = False
def visit_emphasis(self, node):
if 'term' in node.get('classes'):
self._in_term = True
def depart_emphasis(self, node):
if 'term' in node.get('classes'):
self._in_term = False
def visit_Text(self, node):
if self._in_term:
doc = self.document.copy()
doc[:] = [node.copy()]
self.terms.append(ParsedRstDocstring(doc))
def unknown_visit(self, node):
'Ignore all unknown nodes'
def unknown_departure(self, node):
'Ignore all unknown nodes'
class _SplitFieldsTranslator(NodeVisitor):
"""
A docutils translator that removes all fields from a document, and
collects them into the instance variable C{fields}
@ivar fields: The fields of the most recently walked document.
@type fields: C{list} of L{Field<markup.Field>}
"""
ALLOW_UNMARKED_ARG_IN_CONSOLIDATED_FIELD = True
"""If true, then consolidated fields are not required to mark
arguments with C{`backticks`}. (This is currently only
implemented for consolidated fields expressed as definition lists;
consolidated fields expressed as unordered lists still require
backticks for now."""
def __init__(self, document, errors):
NodeVisitor.__init__(self, document)
self._errors = errors
self.fields = []
self._newfields = {}
def visit_document(self, node):
self.fields = []
def visit_field(self, node):
# Remove the field from the tree.
node.parent.remove(node)
# Extract the field name & optional argument
tag = node[0].astext().split(None, 1)
tagname = tag[0]
if len(tag)>1: arg = tag[1]
else: arg = None
# Handle special fields:
fbody = node[1]
if arg is None:
for (list_tag, entry_tag) in CONSOLIDATED_FIELDS.items():
if tagname.lower() == list_tag:
try:
self.handle_consolidated_field(fbody, entry_tag)
return
except ValueError, e:
estr = 'Unable to split consolidated field '
estr += '"%s" - %s' % (tagname, e)
self._errors.append(ParseError(estr, node.line,
is_fatal=0))
# Use a @newfield to let it be displayed as-is.
if tagname.lower() not in self._newfields:
newfield = Field('newfield', tagname.lower(),
parse(tagname, 'plaintext'))
self.fields.append(newfield)
self._newfields[tagname.lower()] = 1
self._add_field(tagname, arg, fbody)
def _add_field(self, tagname, arg, fbody):
field_doc = self.document.copy()
for child in fbody: field_doc.append(child)
field_pdoc = ParsedRstDocstring(field_doc)
self.fields.append(Field(tagname, arg, field_pdoc))
def visit_field_list(self, node):
# Remove the field list from the tree. The visitor will still walk
# over the node's children.
node.parent.remove(node)
def handle_consolidated_field(self, body, tagname):
"""
Attempt to handle a consolidated section.
"""
if len(body) != 1:
raise ValueError('does not contain a single list.')
elif body[0].tagname == 'bullet_list':
self.handle_consolidated_bullet_list(body[0], tagname)
elif (body[0].tagname == 'definition_list' and
tagname in CONSOLIDATED_DEFLIST_FIELDS):
self.handle_consolidated_definition_list(body[0], tagname)
elif tagname in CONSOLIDATED_DEFLIST_FIELDS:
raise ValueError('does not contain a bulleted list or '
'definition list.')
else:
raise ValueError('does not contain a bulleted list.')
def handle_consolidated_bullet_list(self, items, tagname):
# Check the contents of the list. In particular, each list
# item should have the form:
# - `arg`: description...
n = 0
_BAD_ITEM = ("list item %d is not well formed. Each item must "
"consist of a single marked identifier (e.g., `x`), "
"optionally followed by a colon or dash and a "
"description.")
for item in items:
n += 1
if item.tagname != 'list_item' or len(item) == 0:
raise ValueError('bad bulleted list (bad child %d).' % n)
if item[0].tagname != 'paragraph':
if item[0].tagname == 'definition_list':
raise ValueError(('list item %d contains a definition '+
'list (it\'s probably indented '+
'wrong).') % n)
else:
raise ValueError(_BAD_ITEM % n)
if len(item[0]) == 0:
raise ValueError(_BAD_ITEM % n)
if item[0][0].tagname != 'title_reference':
raise ValueError(_BAD_ITEM % n)
# Everything looks good; convert to multiple fields.
for item in items:
# Extract the arg
arg = item[0][0].astext()
# Extract the field body, and remove the arg
fbody = item[:]
fbody[0] = fbody[0].copy()
fbody[0][:] = item[0][1:]
# Remove the separating ":", if present
if (len(fbody[0]) > 0 and
isinstance(fbody[0][0], docutils.nodes.Text)):
child = fbody[0][0]
if child[:1] in ':-':
child = child[1:].lstrip()
elif child[:2] in (' -', ' :'):
child = child[2:].lstrip()
# Wrap the field body, and add a new field
self._add_field(tagname, arg, fbody)
def handle_consolidated_definition_list(self, items, tagname):
# Check the list contents.
n = 0
_BAD_ITEM = ("item %d is not well formed. Each item's term must "
"consist of a single marked identifier (e.g., `x`), "
"optionally followed by a space, colon, space, and "
"a type description.")
for item in items:
n += 1
if (item.tagname != 'definition_list_item' or len(item) < 2 or
item[0].tagname != 'term' or
item[-1].tagname != 'definition'):
raise ValueError('bad definition list (bad child %d).' % n)
if len(item) > 3:
raise ValueError(_BAD_ITEM % n)
if not ((item[0][0].tagname == 'title_reference') or
(self.ALLOW_UNMARKED_ARG_IN_CONSOLIDATED_FIELD and
isinstance(item[0][0], docutils.nodes.Text))):
raise ValueError(_BAD_ITEM % n)
for child in item[0][1:]:
if child.astext() != '':
raise ValueError(_BAD_ITEM % n)
# Extract it.
for item in items:
# The basic field.
arg = item[0][0].astext()
fbody = item[-1]
self._add_field(tagname, arg, fbody)
# If there's a classifier, treat it as a type.
if len(item) == 3:
type_descr = item[1]
self._add_field('type', arg, type_descr)
def unknown_visit(self, node):
'Ignore all unknown nodes'
def latex_head_prefix():
document = new_document('<fake>')
translator = _EpydocLaTeXTranslator(document)
return translator.head_prefix
_TARGET_RE = re.compile(r'^(.*?)\s*<(?:URI:|URL:)?([^<>]+)>$')
class _EpydocDocumentClass:
SECTIONS = ['EpydocUserSection',
'EpydocUserSubsection',
'EpydocUserSubsubsection']
def section(self, level):
if level <= len(self.SECTIONS):
return self.SECTIONS[level-1]
else:
return self.SECTIONS[-1]
class _EpydocLaTeXTranslator(LaTeXTranslator):
settings = None
def __init__(self, document, docstring_linker=None, directory=None,
docindex=None, context=None):
# Set the document's settings.
if self.settings is None:
settings = OptionParser([LaTeXWriter()]).get_default_values()
settings.output_encoding = 'utf-8'
# This forces eg \EpydocUserSection rather than
# \EpydocUserSEction*:
settings.use_latex_toc = True
self.__class__.settings = settings
document.settings = self.settings
LaTeXTranslator.__init__(self, document)
self._linker = docstring_linker
self._directory = directory
self._docindex = docindex
self._context = context
# Use custom section names.
self.d_class = _EpydocDocumentClass()
# Handle interpreted text (crossreferences)
def visit_title_reference(self, node):
m = _TARGET_RE.match(node.astext())
if m: text, target = m.groups()
else: target = text = node.astext()
text = plaintext_to_latex(text)
xref = self._linker.translate_identifier_xref(target, text)
self.body.append(xref)
raise SkipNode()
def visit_document(self, node): pass
def depart_document(self, node): pass
def visit_dotgraph(self, node):
if self._directory is None: raise SkipNode() # [xx] warning?
# Generate the graph.
graph = node.graph(self._docindex, self._context, self._linker)
if graph is None: raise SkipNode()
# Write the graph.
self.body.append(graph.to_latex(self._directory))
raise SkipNode()
def visit_doctest_block(self, node):
pysrc = node[0].astext()
if node.get('codeblock'):
self.body.append(LaTeXDoctestColorizer().colorize_codeblock(pysrc))
else:
self.body.append(doctest_to_latex(pysrc))
raise SkipNode()
def visit_admonition(self, node, name=''):
self.body.append('\\begin{reSTadmonition}[%s]\n' %
self.language.labels[name])
def depart_admonition(self, node=None):
self.body.append('\\end{reSTadmonition}\n');
class _EpydocHTMLTranslator(HTMLTranslator):
settings = None
def __init__(self, document, docstring_linker, directory,
docindex, context):
self._linker = docstring_linker
self._directory = directory
self._docindex = docindex
self._context = context
# Set the document's settings.
if self.settings is None:
settings = OptionParser([HTMLWriter()]).get_default_values()
self.__class__.settings = settings
document.settings = self.settings
# Call the parent constructor.
HTMLTranslator.__init__(self, document)
# Handle interpreted text (crossreferences)
def visit_title_reference(self, node):
m = _TARGET_RE.match(node.astext())
if m: text, target = m.groups()
else: target = text = node.astext()
text = plaintext_to_latex(text)
xref = self._linker.translate_identifier_xref(target, text)
self.body.append(xref)
raise SkipNode()
def should_be_compact_paragraph(self, node):
if self.document.children == [node]:
return True
else:
return HTMLTranslator.should_be_compact_paragraph(self, node)
def visit_document(self, node): pass
def depart_document(self, node): pass
def starttag(self, node, tagname, suffix='\n', **attributes):
"""
This modified version of starttag makes a few changes to HTML
tags, to prevent them from conflicting with epydoc. In particular:
- existing class attributes are prefixed with C{'rst-'}
- existing names are prefixed with C{'rst-'}
- hrefs starting with C{'#'} are prefixed with C{'rst-'}
- hrefs not starting with C{'#'} are given target='_top'
- all headings (C{<hM{n}>}) are given the css class C{'heading'}
"""
# Get the list of all attribute dictionaries we need to munge.
attr_dicts = [attributes]
if isinstance(node, docutils.nodes.Node):
attr_dicts.append(node.attributes)
if isinstance(node, dict):
attr_dicts.append(node)
# Munge each attribute dictionary. Unfortunately, we need to
# iterate through attributes one at a time because some
# versions of docutils don't case-normalize attributes.
for attr_dict in attr_dicts:
for (key, val) in attr_dict.items():
# Prefix all CSS classes with "rst-"; and prefix all
# names with "rst-" to avoid conflicts.
if key.lower() in ('class', 'id', 'name'):
attr_dict[key] = 'rst-%s' % val
elif key.lower() in ('classes', 'ids', 'names'):
attr_dict[key] = ['rst-%s' % cls for cls in val]
elif key.lower() == 'href':
if attr_dict[key][:1]=='#':
attr_dict[key] = '#rst-%s' % attr_dict[key][1:]
else:
# If it's an external link, open it in a new
# page.
attr_dict['target'] = '_top'
# For headings, use class="heading"
if re.match(r'^h\d+$', tagname):
attributes['class'] = ' '.join([attributes.get('class',''),
'heading']).strip()
return HTMLTranslator.starttag(self, node, tagname, suffix,
**attributes)
def visit_dotgraph(self, node):
if self._directory is None: raise SkipNode() # [xx] warning?
# Generate the graph.
graph = node.graph(self._docindex, self._context, self._linker)
if graph is None: raise SkipNode()
# Write the graph.
self.body.append(graph.to_html(self._directory))
raise SkipNode()
def visit_doctest_block(self, node):
pysrc = node[0].astext()
if node.get('codeblock'):
self.body.append(HTMLDoctestColorizer().colorize_codeblock(pysrc))
else:
self.body.append(doctest_to_html(pysrc))
raise SkipNode()
def visit_emphasis(self, node):
# Generate a corrent index term anchor
if 'term' in node.get('classes') and node.children:
doc = self.document.copy()
doc[:] = [node.children[0].copy()]
self.body.append(
self._linker.translate_indexterm(ParsedRstDocstring(doc)))
raise SkipNode()
HTMLTranslator.visit_emphasis(self, node)
def python_code_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
"""
A custom restructuredtext directive which can be used to display
syntax-highlighted Python code blocks. This directive takes no
arguments, and the body should contain only Python code. This
directive can be used instead of doctest blocks when it is
inconvenient to list prompts on each line, or when you would
prefer that the output not contain prompts (e.g., to make
copy/paste easier).
"""
required_arguments = 0
optional_arguments = 0
text = '\n'.join(content)
node = docutils.nodes.doctest_block(text, text, codeblock=True)
return [ node ]
python_code_directive.arguments = (0, 0, 0)
python_code_directive.content = True
directives.register_directive('python', python_code_directive)
def term_role(name, rawtext, text, lineno, inliner,
options={}, content=[]):
text = docutils.utils.unescape(text)
node = docutils.nodes.emphasis(rawtext, text, **options)
node.attributes['classes'].append('term')
return [node], []
roles.register_local_role('term', term_role)
######################################################################
#{ Graph Generation Directives
######################################################################
# See http://docutils.sourceforge.net/docs/howto/rst-directives.html
class dotgraph(docutils.nodes.image):
"""
A custom docutils node that should be rendered using Graphviz dot.
This node does not directly store the graph; instead, it stores a
pointer to a function that can be used to generate the graph.
This allows the graph to be built based on information that might
not be available yet at parse time. This graph generation
function has the following signature:
>>> def generate_graph(docindex, context, linker, *args):
... 'generates and returns a new DotGraph'
Where C{docindex} is a docindex containing the documentation that
epydoc has built; C{context} is the C{APIDoc} whose docstring
contains this dotgraph node; C{linker} is a L{DocstringLinker}
that can be used to resolve crossreferences; and C{args} is any
extra arguments that are passed to the C{dotgraph} constructor.
"""
def __init__(self, generate_graph_func, *generate_graph_args):
docutils.nodes.image.__init__(self)
self.graph_func = generate_graph_func
self.args = generate_graph_args
def graph(self, docindex, context, linker):
return self.graph_func(docindex, context, linker, *self.args)
def _dir_option(argument):
"""A directive option spec for the orientation of a graph."""
argument = argument.lower().strip()
if argument == 'right': return 'LR'
if argument == 'left': return 'RL'
if argument == 'down': return 'TB'
if argument == 'up': return 'BT'
raise ValueError('%r unknown; choose from left, right, up, down' %
argument)
def digraph_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
"""
A custom restructuredtext directive which can be used to display
Graphviz dot graphs. This directive takes a single argument,
which is used as the graph's name. The contents of the directive
are used as the body of the graph. Any href attributes whose
value has the form <name> will be replaced by the URL of the object
with that name. Here's a simple example::
.. digraph:: example_digraph
a -> b -> c
c -> a [dir=\"none\"]
"""
if arguments: title = arguments[0]
else: title = ''
return [ dotgraph(_construct_digraph, title, options.get('caption'),
'\n'.join(content)) ]
digraph_directive.arguments = (0, 1, True)
digraph_directive.options = {'caption': directives.unchanged}
digraph_directive.content = True
directives.register_directive('digraph', digraph_directive)
def _construct_digraph(docindex, context, linker, title, caption,
body):
"""Graph generator for L{digraph_directive}"""
graph = DotGraph(title, body, caption=caption)
graph.link(linker)
return graph
def classtree_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
"""
A custom restructuredtext directive which can be used to
graphically display a class hierarchy. If one or more arguments
are given, then those classes and all their descendants will be
displayed. If no arguments are given, and the directive is in a
class's docstring, then that class and all its descendants will be
displayed. It is an error to use this directive with no arguments
in a non-class docstring.
Options:
- C{:dir:} -- Specifies the orientation of the graph. One of
C{down}, C{right} (default), C{left}, C{up}.
"""
return [ dotgraph(_construct_classtree, arguments, options) ]
classtree_directive.arguments = (0, 1, True)
classtree_directive.options = {'dir': _dir_option}
classtree_directive.content = False
directives.register_directive('classtree', classtree_directive)
def _construct_classtree(docindex, context, linker, arguments, options):
"""Graph generator for L{classtree_directive}"""
if len(arguments) == 1:
bases = [docindex.find(name, context) for name in
arguments[0].replace(',',' ').split()]
bases = [d for d in bases if isinstance(d, ClassDoc)]
elif isinstance(context, ClassDoc):
bases = [context]
else:
log.warning("Could not construct class tree: you must "
"specify one or more base classes.")
return None
return class_tree_graph(bases, linker, context, **options)
def packagetree_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
"""
A custom restructuredtext directive which can be used to
graphically display a package hierarchy. If one or more arguments
are given, then those packages and all their submodules will be
displayed. If no arguments are given, and the directive is in a
package's docstring, then that package and all its submodules will
be displayed. It is an error to use this directive with no
arguments in a non-package docstring.
Options:
- C{:dir:} -- Specifies the orientation of the graph. One of
C{down}, C{right} (default), C{left}, C{up}.
"""
return [ dotgraph(_construct_packagetree, arguments, options) ]
packagetree_directive.arguments = (0, 1, True)
packagetree_directive.options = {
'dir': _dir_option,
'style': lambda a:directives.choice(a.lower(), ('uml', 'tree'))}
packagetree_directive.content = False
directives.register_directive('packagetree', packagetree_directive)
def _construct_packagetree(docindex, context, linker, arguments, options):
"""Graph generator for L{packagetree_directive}"""
if len(arguments) == 1:
packages = [docindex.find(name, context) for name in
arguments[0].replace(',',' ').split()]
packages = [d for d in packages if isinstance(d, ModuleDoc)]
elif isinstance(context, ModuleDoc):
packages = [context]
else:
log.warning("Could not construct package tree: you must "
"specify one or more root packages.")
return None
return package_tree_graph(packages, linker, context, **options)
def importgraph_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return [ dotgraph(_construct_importgraph, arguments, options) ]
importgraph_directive.arguments = (0, 1, True)
importgraph_directive.options = {'dir': _dir_option}
importgraph_directive.content = False
directives.register_directive('importgraph', importgraph_directive)
def _construct_importgraph(docindex, context, linker, arguments, options):
"""Graph generator for L{importgraph_directive}"""
if len(arguments) == 1:
modules = [ docindex.find(name, context)
for name in arguments[0].replace(',',' ').split() ]
modules = [d for d in modules if isinstance(d, ModuleDoc)]
else:
modules = [d for d in docindex.root if isinstance(d, ModuleDoc)]
return import_graph(modules, docindex, linker, context, **options)
def callgraph_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return [ dotgraph(_construct_callgraph, arguments, options) ]
callgraph_directive.arguments = (0, 1, True)
callgraph_directive.options = {'dir': _dir_option,
'add_callers': directives.flag,
'add_callees': directives.flag}
callgraph_directive.content = False
directives.register_directive('callgraph', callgraph_directive)
def _construct_callgraph(docindex, context, linker, arguments, options):
"""Graph generator for L{callgraph_directive}"""
if len(arguments) == 1:
docs = [docindex.find(name, context) for name in
arguments[0].replace(',',' ').split()]
docs = [doc for doc in docs if doc is not None]
else:
docs = [context]
return call_graph(docs, docindex, linker, context, **options)
| {
"content_hash": "8ee70d5ae83bff7f6306217c125d49dd",
"timestamp": "",
"source": "github",
"line_count": 940,
"max_line_length": 80,
"avg_line_length": 40.26276595744681,
"alnum_prop": 0.6142626892488177,
"repo_name": "danse/epydoc",
"id": "da35c17f58406b5b888c15523738898fd226da05",
"size": "38007",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "epydoc/markup/restructuredtext.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1115656"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "search.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| {
"content_hash": "2a26187f9ce14cafc97174e5da1d53e1",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 77,
"avg_line_length": 37.23809523809524,
"alnum_prop": 0.6202046035805626,
"repo_name": "shailendert/Vertical-Search-Engine",
"id": "d3fadeab366ccdb0eba8b64ccce4ec0004f8f22f",
"size": "804",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "55792"
},
{
"name": "HTML",
"bytes": "6434"
},
{
"name": "JavaScript",
"bytes": "97109"
},
{
"name": "Python",
"bytes": "29740"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('forms', '0005_auto_20170624_1747'),
]
operations = [
migrations.CreateModel(
name='TurnexType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_type', models.CharField(max_length=100)),
('color', models.CharField(max_length=10)),
('form_owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='forms.TurnexForm')),
],
),
]
| {
"content_hash": "dbd6f60f82f7c03bb7e6264b56147543",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 118,
"avg_line_length": 31.52173913043478,
"alnum_prop": 0.5958620689655172,
"repo_name": "mleger45/turnex",
"id": "4ffd543749d841cd029559675acf16c64b11df4a",
"size": "798",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "forms/migrations/0006_turnextype.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "91270"
},
{
"name": "Dockerfile",
"bytes": "282"
},
{
"name": "JavaScript",
"bytes": "153971"
},
{
"name": "Jinja",
"bytes": "7295"
},
{
"name": "Procfile",
"bytes": "100"
},
{
"name": "Python",
"bytes": "38258"
},
{
"name": "Shell",
"bytes": "324"
}
],
"symlink_target": ""
} |
from harpoon.docker import _create_image_matcher
class TestImageMatcher(object):
def test_image_without_registry_and_tag(self):
matcher = _create_image_matcher("spam")
assert matcher("spam")
assert not matcher("spamsuffix")
assert not matcher("prefixspam")
def test_image_with_tag_and_without_registry(self):
matcher = _create_image_matcher("spam")
assert matcher("spam:42")
assert not matcher("spamsuffix:42")
assert not matcher("prefixspam:42")
def test_image_with_registry_and_tag(self):
matcher = _create_image_matcher("spam")
assert matcher("hub.example.com/spam:42")
assert not matcher("hub.example.com/spamsuffix:42")
assert not matcher("hub.example.com/prefixspam:42")
| {
"content_hash": "854a7b02ea67eb5e7be6395ab218b8ef",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 59,
"avg_line_length": 37.57142857142857,
"alnum_prop": 0.6653992395437263,
"repo_name": "Trundle/harpoon",
"id": "8b7f51a9a8341234c75f3223ff6f66c0b890de1a",
"size": "789",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "harpoon/test/test_docker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13961"
}
],
"symlink_target": ""
} |
"""Config flow to configure the GDACS integration."""
import logging
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import (
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_RADIUS,
CONF_SCAN_INTERVAL,
)
from homeassistant.helpers import config_validation as cv
from .const import CONF_CATEGORIES, DEFAULT_RADIUS, DEFAULT_SCAN_INTERVAL, DOMAIN
DATA_SCHEMA = vol.Schema(
{vol.Optional(CONF_RADIUS, default=DEFAULT_RADIUS): cv.positive_int}
)
_LOGGER = logging.getLogger(__name__)
class GdacsFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a GDACS config flow."""
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
async def _show_form(self, errors=None):
"""Show the form to the user."""
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors or {}
)
async def async_step_import(self, import_config):
"""Import a config entry from configuration.yaml."""
return await self.async_step_user(import_config)
async def async_step_user(self, user_input=None):
"""Handle the start of the config flow."""
_LOGGER.debug("User input: %s", user_input)
if not user_input:
return await self._show_form()
latitude = user_input.get(CONF_LATITUDE, self.hass.config.latitude)
user_input[CONF_LATITUDE] = latitude
longitude = user_input.get(CONF_LONGITUDE, self.hass.config.longitude)
user_input[CONF_LONGITUDE] = longitude
identifier = f"{user_input[CONF_LATITUDE]}, {user_input[CONF_LONGITUDE]}"
await self.async_set_unique_id(identifier)
self._abort_if_unique_id_configured()
scan_interval = user_input.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL)
user_input[CONF_SCAN_INTERVAL] = scan_interval.seconds
categories = user_input.get(CONF_CATEGORIES, [])
user_input[CONF_CATEGORIES] = categories
return self.async_create_entry(title=identifier, data=user_input)
| {
"content_hash": "38ede4744ba54b5f4077209d03a24bb5",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 81,
"avg_line_length": 33.885245901639344,
"alnum_prop": 0.683599419448476,
"repo_name": "adrienbrault/home-assistant",
"id": "b672b56ad9b9b672ee93092f9d4d4f2d80b2e81d",
"size": "2067",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/gdacs/config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "32021043"
},
{
"name": "Shell",
"bytes": "4900"
}
],
"symlink_target": ""
} |
__author__ = "Juan C. Caicedo, caicedo@illinois.edu"
from pybrain.rl.learners.valuebased.interface import ActionValueInterface
import caffe
import os
import utils.utils as cu
import numpy as np
import random
import learn.rl.RLConfig as config
EXPLORE = 0
EXPLOIT = 1
def defaultSampler():
return np.random.random([1, config.geti('outputActions')])
class QNetwork(ActionValueInterface):
networkFile = config.get('networkDir') + config.get('snapshotPrefix') + '_iter_' + config.get('trainingIterationsPerBatch') + '.caffemodel'
def __init__(self):
self.net = None
print 'QNetwork::Init. Loading ',self.networkFile
self.loadNetwork()
self.sampler = defaultSampler
def releaseNetwork(self):
if self.net != None:
del self.net
self.net = None
def loadNetwork(self, definition='deploy.prototxt'):
if os.path.isfile(self.networkFile):
modelFile = config.get('networkDir') + definition
self.net = caffe.Net(modelFile, self.networkFile)
self.net.set_phase_test()
self.net.set_mode_gpu()
print 'QNetwork loaded'
else:
self.net = None
print 'QNetwork not found'
def getMaxAction(self, state):
values = self.getActionValues(state)
return np.argmax(values, 1)
def getActionValues(self, state):
if self.net == None or self.exploreOrExploit() == EXPLORE:
return self.sampler()
else:
return self.getActivations(state)
def getActivations(self, state):
out = self.net.forward_all( **{self.net.inputs[0]: state.reshape( (state.shape[0], state.shape[1], 1, 1) )} )
return out['qvalues'].squeeze(axis=(2,3))
def setEpsilonGreedy(self, epsilon, sampler=None):
if sampler is not None:
self.sampler = sampler
self.epsilon = epsilon
def exploreOrExploit(self):
if self.epsilon > 0:
if random.random() < self.epsilon:
return EXPLORE
return EXPLOIT
| {
"content_hash": "870140e460b2685f46f1c0e48e674dc0",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 141,
"avg_line_length": 28.55223880597015,
"alnum_prop": 0.6826973340303188,
"repo_name": "jccaicedo/localization-agent",
"id": "41379926a9d951c3524b6d208816f87d1b92e20a",
"size": "1913",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "detection/boxsearch/QNetwork.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "619"
},
{
"name": "Python",
"bytes": "391367"
},
{
"name": "Shell",
"bytes": "18874"
}
],
"symlink_target": ""
} |
import os
import csv
import re
import pandas as pd
from pandas import Series, DataFrame
siteDict = {}
for dirPath, dirNames, fileNames in os.walk('BSR'):
print '#file:' + str(len(fileNames))
for i,f in enumerate(fileNames[:]):
if f[0] == '.':
continue
else:
fullfile = os.path.join(dirPath, f)
print i
df = pd.read_csv(fullfile)
if len(df.columns) == 5: #file contain '序'
df = df.drop( df.columns[0], axis=1 ) #drop original index
df.columns = ['securities', 'price', 'buy', 'sell']
#only retain id of securities
for index in df.index:
df.ix[index, 'securities'] = df.ix[index, 'securities'].split(' ')[0]
df.to_csv(fullfile, encoding='utf-8', index=False)
| {
"content_hash": "2db67a2db24cae60d036bf74cc14a1bd",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 73,
"avg_line_length": 28.16,
"alnum_prop": 0.6448863636363636,
"repo_name": "ypochien/TaiwanStockBSR",
"id": "9e2359ee893259c81d066f98b49e455c98df058b",
"size": "730",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "batch_trim_BSR.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15079"
}
],
"symlink_target": ""
} |
import tomviz.operators
import tomviz.utils
import os
from enum import Enum
import numpy as np
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import dijkstra
class DistanceMethod(Enum):
Eucledian = 0
CityBlock = 1
ChessBoard = 2
class PropagationDirection(Enum):
Xpos = 0
Xneg = 1
Ypos = 2
Yneg = 3
Zpos = 4
Zneg = 5
class OperatorStages(Enum):
GraphGeneration = 0
GraphTraversal = 1
def coord_iterator(extent):
if len(extent) == 1:
for i in range(extent[0][0], extent[0][1]):
yield (i, )
elif len(extent) > 1:
for i in range(extent[0][0], extent[0][1]):
for c in coord_iterator(extent[1:]):
yield (i, ) + c
def neighbor_iterator(ndim):
for c in coord_iterator(((-1, 2),) * ndim):
if any(c):
yield c
def get_distance_function(method):
if method == DistanceMethod.Eucledian:
import math
def distance_fn(vec):
return math.sqrt(sum(x * x for x in vec))
return distance_fn
elif method == DistanceMethod.ChessBoard:
def distance_fn(vec):
return 1
return distance_fn
elif method == DistanceMethod.CityBlock:
def distance_fn(vec):
if sum(abs(x) for x in vec) > 1:
return None
else:
return 1
return distance_fn
else:
raise Exception("Unknown distance method %s" % method)
def volume_to_graph(volume, phase, method, update_progress=None):
invalid_node_idx = -1
# The distance function
distance = get_distance_function(method)
# Map voxel coordinate to node index
node_map = {}
# Map node index to voxel coordinate
inv_node_map = {}
# Map voxel coordinate to node index (array form).
# This is useful to easily slice when doing analysis.
# We can do without if memory becomes an issue.
node_map_array = np.empty(shape=volume.shape, dtype=np.int32)
node_map_array.fill(invalid_node_idx)
# Weighted (i.e. distance) connections between nodes
edges = {}
# Edges between auxiliary face nodes and actual nodes on the first slice
# (x+, x-, y+, y-, z+, z-)
aux_edges = {}
# Reserve node_idx 0 to ndim * 2 - 1 for the auxiliary nodes.
# These are nodes at the beginning/end of the x/y direction
# In the propagation this are the distances we are calculating from
node_idx = volume.ndim * 2
# Map node indices to positions in the volume
extent = tuple((0, s) for s in volume.shape)
for coord in coord_iterator(extent):
if volume[coord] == phase:
node_map_array[coord] = node_idx
inv_node_map[node_idx] = coord
node_map[coord] = node_idx
node_idx += 1
# Find edges between the nodes
delta_coordinates = [d for d in neighbor_iterator(volume.ndim)]
delta_distances = [distance(c) for c in delta_coordinates]
n_nodes = len(node_map)
# Update every 5%
update_every = max(n_nodes // 20, 1)
for i, (node_idx, node_coord) in enumerate(inv_node_map.items()):
if i % update_every == 0 and update_progress is not None:
update_progress(i / n_nodes)
for delta_coord, delta_dist in zip(delta_coordinates, delta_distances):
neighbor_coord = tuple(c + d
for c, d in zip(node_coord, delta_coord))
neighbor_idx = node_map.get(neighbor_coord)
if neighbor_idx is None:
continue
if delta_dist is not None:
# Reduce memory footprint by only saving edges once
edge_key = (min(node_idx, neighbor_idx),
max(node_idx, neighbor_idx))
edges[edge_key] = delta_dist
# Add edges between aux nodes at the faces of the volume
for i in range(volume.ndim):
for j in range(2):
node_idx = i * 2 + j
face_slice = [slice(None)] * volume.ndim
face_slice[i] = -j
face_slice = tuple(face_slice)
for neighbor_idx in node_map_array[face_slice].flatten():
if neighbor_idx != invalid_node_idx:
aux_edges[(node_idx, neighbor_idx)] = 1
if update_progress is not None:
update_progress(1)
return node_map, inv_node_map, node_map_array, edges, aux_edges
def edges_to_sparse_matrix(edges, aux_edges, aux_node_idx, n_nodes, ndim):
n_edges = len(edges)
n_aux_edges = 0
for (from_idx, _), _ in aux_edges.items():
if from_idx == aux_node_idx:
n_aux_edges += 1
row = np.empty(shape=(n_edges + n_aux_edges,), dtype=np.int32)
col = np.empty(shape=(n_edges + n_aux_edges,), dtype=np.int32)
data = np.empty(shape=(n_edges + n_aux_edges,), dtype=np.float32)
i = 0
for (from_idx, to_idx), distance in edges.items():
row[i] = from_idx
col[i] = to_idx
data[i] = distance
i += 1
for (from_idx, to_idx), distance in aux_edges.items():
if from_idx == aux_node_idx:
row[i] = from_idx
col[i] = to_idx
data[i] = distance
i += 1
assert (i == n_edges + n_aux_edges)
total_nodes = n_nodes + 2 * ndim
sparse_edge_matrix = csr_matrix((data, (row, col)),
shape=(total_nodes, total_nodes))
return sparse_edge_matrix
def distance_matrix_to_volume(inv_node_map, dist_matrix, shape):
volume = np.empty(shape=shape, dtype=np.float32)
unreachable_scalar_value = -1
volume.fill(unreachable_scalar_value)
for node_idx, node_coord in inv_node_map.items():
volume[node_coord] = dist_matrix[node_idx]
volume[volume == np.inf] = unreachable_scalar_value
volume[volume == np.nan] = unreachable_scalar_value
return volume
def get_slice_scalars(volume, propagation_direction, slice_number):
axis_idx = propagation_direction // 2
start = 0
increment = 1
if propagation_direction % 2 == 1:
increment = -1
start = volume.shape[axis_idx] - 1
s = start + increment * slice_number
assert (s >= 0 and s < volume.shape[axis_idx])
slice_obj = [slice(None)] * volume.ndim
slice_obj[axis_idx] = s
slice_obj = tuple(slice_obj)
scalars = volume[slice_obj]
return scalars
def calculate_avg_path_length(volume, propagation_direction):
unreachable_scalar_value = -1
axis_idx = propagation_direction // 2
n_slices = volume.shape[axis_idx]
column_names = ["Linear Distance", "Actual Distance"]
table_data = np.empty(shape=(n_slices, 2))
for i in range(n_slices):
scalars = get_slice_scalars(volume, propagation_direction, i)
scalars = np.extract(scalars != unreachable_scalar_value, scalars)
table_data[i, 0] = i + 1
table_data[i, 1] = scalars.mean()
return column_names, table_data
def calculate_tortuosity(straight_distance, path_length):
column_names = ["Scale", "End", "Average", "Slope"]
table_data = np.empty(shape=(1, 4))
# Scale Tortuosity
m, _ = np.polyfit(straight_distance, path_length, deg=1)
table_data[0, 0] = m
# End Tortuosity
table_data[0, 1] = path_length[-1] / straight_distance[-1]
# Average Tortuosity
table_data[0, 2] = np.mean(path_length / straight_distance)
# Slope Tortuosity
# TODO: implement Slope Tortuosity
table_data[0, 3] = -1
return column_names, table_data
def calculate_tortuosity_distribution(volume, propagation_direction):
unreachable_scalar_value = -1
axis_idx = propagation_direction // 2
n_slices = volume.shape[axis_idx]
last_slice = n_slices - 1
scalars = get_slice_scalars(volume, propagation_direction, last_slice)
scalars = np.extract(scalars != unreachable_scalar_value, scalars)
linear_distance = last_slice + 1
scalars /= linear_distance
n_bins = 100
column_names = ["Tortuosity", "Occurrence"]
table_data = np.empty(shape=(n_bins, 2))
bins = np.linspace(1, 6, num=n_bins + 1)
occurrence, _ = np.histogram(scalars, bins)
for i in range(n_bins):
table_data[i, 0] = bins[i]
table_data[i, 1] = occurrence[i]
return column_names, table_data
def get_update_progress_fn(progress, stage):
GRAPH_GENERATION_FRACTION = 0.9
OTHER_FRACTION = 1 - GRAPH_GENERATION_FRACTION
if stage == OperatorStages.GraphGeneration:
def update_progress(value):
progress.value = round(progress.maximum * value *
GRAPH_GENERATION_FRACTION)
return update_progress
elif stage == OperatorStages.GraphTraversal:
def update_progress(value):
progress.value = round(progress.maximum *
(value * OTHER_FRACTION +
GRAPH_GENERATION_FRACTION))
return update_progress
else:
def update_progress(value):
progress.value = round(progress.maximum * value)
return update_progress
class TortuosityOperator(tomviz.operators.CancelableOperator):
"""Distance propagation method for calculating tortuosity.
https://doi.org/10.1016/j.jpowsour.2013.10.026
"""
def transform(self, dataset, phase=1,
distance_method=DistanceMethod.Eucledian,
propagation_direction=PropagationDirection.Xpos,
save_to_file=False, output_folder=""):
"""Operator transform method
Args:
phase (int): the scalar value in the dataset that is considered
a pore
distance_method (enum): the distance method to calculate distance
between nodes
propagation_direction (enum): the face from which distances are
calculated (X+, X-, Y+, etc.)
save_to_file (bool): save the detailed output of the operator to
files. If set to True, propagate along all
six directions and save the results, but only
display results for one in the application.
output_folder (str): the path to the folder where the optional
output files are written to
"""
distance_method = DistanceMethod(distance_method)
propagation_direction = PropagationDirection(propagation_direction)
scalars = dataset.active_scalars
if scalars is None:
raise RuntimeError("No scalars found!")
if save_to_file and not os.access(output_folder, os.W_OK):
import warnings
save_to_file = False
warnings.warn(
"Unable to write to destination folder %s" % output_folder)
if save_to_file:
propagation_directions = list(PropagationDirection)
else:
propagation_directions = [propagation_direction]
self.progress.maximum = 100
graph_generation_update_progress_fn = get_update_progress_fn(
self.progress, OperatorStages.GraphGeneration)
graph_traversal_update_progress_fn = get_update_progress_fn(
self.progress, OperatorStages.GraphTraversal)
self.progress.message = "Converting volume to graph..."
node_map, inv_node_map, node_map_array, edges, aux_edges = (
volume_to_graph(
scalars, phase, distance_method,
graph_generation_update_progress_fn)
)
n_directions = len(propagation_directions)
return_values = {}
for i, direction in enumerate(propagation_directions):
self.progress.message = "Propagating along %s" % direction.name
graph_traversal_update_progress_fn(i / n_directions)
csgraph = edges_to_sparse_matrix(
edges, aux_edges, direction.value,
len(inv_node_map), scalars.ndim)
graph_traversal_update_progress_fn(i / n_directions + 0.33)
dist_matrix = dijkstra(csgraph, directed=False,
indices=direction.value)
graph_traversal_update_progress_fn(i / n_directions + 0.9)
# Generate the distance map
result = distance_matrix_to_volume(inv_node_map, dist_matrix,
scalars.shape)
if save_to_file:
filename = "distance_map_%s.npy" % direction.name
np.save(os.path.join(output_folder, filename), result)
if direction == propagation_direction:
dataset.active_scalars = result
# Calculate the average path length per slice
column_names, table_data = calculate_avg_path_length(
result, direction.value)
if save_to_file:
filename = "path_length_%s.csv" % direction.name
np.savetxt(os.path.join(output_folder, filename), table_data,
delimiter=", ", header=", ".join(column_names))
if direction == propagation_direction:
table = tomviz.utils.make_spreadsheet(column_names, table_data)
return_values["path_length"] = table
# Calculate the tortuosity (4 different ways)
column_names, table_data = calculate_tortuosity(
table_data[:, 0], table_data[:, 1])
if save_to_file:
filename = "tortuosity_%s.csv" % direction.name
np.savetxt(os.path.join(output_folder, filename), table_data,
delimiter=", ", header=", ".join(column_names))
if direction == propagation_direction:
table = tomviz.utils.make_spreadsheet(column_names, table_data)
return_values["tortuosity"] = table
# Calculate the tortuosity distribution of the last slice
column_names, table_data = calculate_tortuosity_distribution(
result, direction.value)
if save_to_file:
filename = "tortuosity_distribution_%s.csv" % direction.name
np.savetxt(os.path.join(output_folder, filename), table_data,
delimiter=", ", header=", ".join(column_names))
if direction == propagation_direction:
table = tomviz.utils.make_spreadsheet(column_names, table_data)
return_values["tortuosity_distribution"] = table
return return_values
| {
"content_hash": "e759c1795f3b4748dafcca6e501ca57c",
"timestamp": "",
"source": "github",
"line_count": 434,
"max_line_length": 79,
"avg_line_length": 33.83410138248848,
"alnum_prop": 0.5917324979569599,
"repo_name": "OpenChemistry/tomviz",
"id": "03c48fe70b26c6d0a6a633f97f13d2c946cc6607",
"size": "14684",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tomviz/python/Tortuosity.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "107"
},
{
"name": "C",
"bytes": "251"
},
{
"name": "C++",
"bytes": "2173416"
},
{
"name": "CMake",
"bytes": "47116"
},
{
"name": "Dockerfile",
"bytes": "7467"
},
{
"name": "Python",
"bytes": "552224"
},
{
"name": "Shell",
"bytes": "10796"
}
],
"symlink_target": ""
} |
from time import sleep
import zmq
from locust.rpc import zmqrpc, Message
from locust.test.testcases import LocustTestCase
from locust.exception import RPCError
class ZMQRPC_tests(LocustTestCase):
def setUp(self):
super().setUp()
self.server = zmqrpc.Server("127.0.0.1", 0)
self.client = zmqrpc.Client("localhost", self.server.port, "identity")
def tearDown(self):
self.server.close()
self.client.close()
super().tearDown()
def test_constructor(self):
self.assertEqual(self.server.socket.getsockopt(zmq.TCP_KEEPALIVE), 1)
self.assertEqual(self.server.socket.getsockopt(zmq.TCP_KEEPALIVE_IDLE), 30)
self.assertEqual(self.client.socket.getsockopt(zmq.TCP_KEEPALIVE), 1)
self.assertEqual(self.client.socket.getsockopt(zmq.TCP_KEEPALIVE_IDLE), 30)
def test_client_send(self):
self.client.send(Message("test", "message", "identity"))
addr, msg = self.server.recv_from_client()
self.assertEqual(addr, "identity")
self.assertEqual(msg.type, "test")
self.assertEqual(msg.data, "message")
def test_client_recv(self):
sleep(0.1)
# We have to wait for the client to finish connecting
# before sending a msg to it.
self.server.send_to_client(Message("test", "message", "identity"))
msg = self.client.recv()
self.assertEqual(msg.type, "test")
self.assertEqual(msg.data, "message")
self.assertEqual(msg.node_id, "identity")
def test_client_retry(self):
server = zmqrpc.Server("127.0.0.1", 0)
server.socket.close()
with self.assertRaises(RPCError):
server.recv_from_client()
def test_rpc_error(self):
server = zmqrpc.Server("127.0.0.1", 0)
with self.assertRaises(RPCError):
server = zmqrpc.Server("127.0.0.1", server.port)
server.close()
with self.assertRaises(RPCError):
server.send_to_client(Message("test", "message", "identity"))
| {
"content_hash": "abad393d6373663cf8e1688a2a6b8afb",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 83,
"avg_line_length": 37.592592592592595,
"alnum_prop": 0.6433497536945813,
"repo_name": "mbeacom/locust",
"id": "abaa8144adbf09d863af29b6c714099d0a332960",
"size": "2030",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "locust/test/test_zmqrpc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "732"
},
{
"name": "HTML",
"bytes": "30187"
},
{
"name": "JavaScript",
"bytes": "17229"
},
{
"name": "Makefile",
"bytes": "436"
},
{
"name": "Python",
"bytes": "809070"
},
{
"name": "Sass",
"bytes": "10379"
},
{
"name": "Shell",
"bytes": "3452"
}
],
"symlink_target": ""
} |
"""The application's model objects"""
from zope.sqlalchemy import ZopeTransactionExtension
from sqlalchemy.orm import scoped_session, sessionmaker
# from sqlalchemy import MetaData
from sqlalchemy.ext.declarative import declarative_base
# Global session manager: DBSession() returns the Thread-local
# session object appropriate for the current web request.
maker = sessionmaker( autoflush = True, autocommit = False,
extension = ZopeTransactionExtension() )
DBSession = scoped_session( maker )
# Base class for all of our model classes: By default, the data model is
# defined with SQLAlchemy's declarative extension, but if you need more
# control, you can switch to the traditional method.
DeclarativeBase = declarative_base()
# There are two convenient ways for you to spare some typing.
# You can have a query property on all your model classes by doing this:
# DeclarativeBase.query = DBSession.query_property()
# Or you can use a session-aware mapper as it was used in TurboGears 1:
# DeclarativeBase = declarative_base(mapper=DBSession.mapper)
# Global metadata.
# The default metadata is the one from the declarative base.
metadata = DeclarativeBase.metadata
# If you have multiple databases with overlapping table names, you'll need a
# metadata for each database. Feel free to rename 'metadata2'.
# metadata2 = MetaData()
#####
# Generally you will not want to define your table's mappers, and data objects
# here in __init__ but will want to create modules them in the model directory
# and import them at the bottom of this file.
#
######
def init_model( engine ):
"""Call me before using any of the tables or classes in the model."""
DBSession.configure( bind = engine )
# If you are using reflection to introspect your database and create
# table objects for you, your tables must be defined and mapped inside
# the init_model function, so that the engine is available if you
# use the model outside tg2, you need to make sure this is called before
# you use the model.
#
# See the following example:
# global t_reflected
# t_reflected = Table("Reflected", metadata,
# autoload=True, autoload_with=engine)
# mapper(Reflected, t_reflected)
# Import your model modules here.
from auth import User, Group, Permission
from logic import *
from sysutil import *
from fileutil import *
| {
"content_hash": "38eaa6fdf8943752cdd1e3e7ef0d7cf1",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 78,
"avg_line_length": 37.25,
"alnum_prop": 0.7441275167785235,
"repo_name": "LamCiuLoeng/budget",
"id": "b7047b1f44f9376538330052c860be6e82338daf",
"size": "2408",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "budget/model/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "44995"
},
{
"name": "JavaScript",
"bytes": "28497"
},
{
"name": "Python",
"bytes": "183340"
}
],
"symlink_target": ""
} |
import logging
import os
import re
__version__ = '1.29.16'
class NullHandler(logging.Handler):
def emit(self, record):
pass
# Configure default logger to do nothing
log = logging.getLogger('botocore')
log.addHandler(NullHandler())
_INITIALIZERS = []
_first_cap_regex = re.compile('(.)([A-Z][a-z]+)')
_end_cap_regex = re.compile('([a-z0-9])([A-Z])')
# The regex below handles the special case where some acronym
# name is pluralized, e.g GatewayARNs, ListWebACLs, SomeCNAMEs.
_special_case_transform = re.compile('[A-Z]{2,}s$')
# Prepopulate the cache with special cases that don't match
# our regular transformation.
_xform_cache = {
('CreateCachediSCSIVolume', '_'): 'create_cached_iscsi_volume',
('CreateCachediSCSIVolume', '-'): 'create-cached-iscsi-volume',
('DescribeCachediSCSIVolumes', '_'): 'describe_cached_iscsi_volumes',
('DescribeCachediSCSIVolumes', '-'): 'describe-cached-iscsi-volumes',
('DescribeStorediSCSIVolumes', '_'): 'describe_stored_iscsi_volumes',
('DescribeStorediSCSIVolumes', '-'): 'describe-stored-iscsi-volumes',
('CreateStorediSCSIVolume', '_'): 'create_stored_iscsi_volume',
('CreateStorediSCSIVolume', '-'): 'create-stored-iscsi-volume',
('ListHITsForQualificationType', '_'): 'list_hits_for_qualification_type',
('ListHITsForQualificationType', '-'): 'list-hits-for-qualification-type',
('ExecutePartiQLStatement', '_'): 'execute_partiql_statement',
('ExecutePartiQLStatement', '-'): 'execute-partiql-statement',
('ExecutePartiQLTransaction', '_'): 'execute_partiql_transaction',
('ExecutePartiQLTransaction', '-'): 'execute-partiql-transaction',
('ExecutePartiQLBatch', '_'): 'execute_partiql_batch',
('ExecutePartiQLBatch', '-'): 'execute-partiql-batch',
}
# The items in this dict represent partial renames to apply globally to all
# services which might have a matching argument or operation. This way a
# common mis-translation can be fixed without having to call out each
# individual case.
ScalarTypes = ('string', 'integer', 'boolean', 'timestamp', 'float', 'double')
BOTOCORE_ROOT = os.path.dirname(os.path.abspath(__file__))
# Used to specify anonymous (unsigned) request signature
class UNSIGNED:
def __copy__(self):
return self
def __deepcopy__(self, memodict):
return self
UNSIGNED = UNSIGNED()
def xform_name(name, sep='_', _xform_cache=_xform_cache):
"""Convert camel case to a "pythonic" name.
If the name contains the ``sep`` character, then it is
returned unchanged.
"""
if sep in name:
# If the sep is in the name, assume that it's already
# transformed and return the string unchanged.
return name
key = (name, sep)
if key not in _xform_cache:
if _special_case_transform.search(name) is not None:
is_special = _special_case_transform.search(name)
matched = is_special.group()
# Replace something like ARNs, ACLs with _arns, _acls.
name = f"{name[: -len(matched)]}{sep}{matched.lower()}"
s1 = _first_cap_regex.sub(r'\1' + sep + r'\2', name)
transformed = _end_cap_regex.sub(r'\1' + sep + r'\2', s1).lower()
_xform_cache[key] = transformed
return _xform_cache[key]
def register_initializer(callback):
"""Register an initializer function for session creation.
This initializer function will be invoked whenever a new
`botocore.session.Session` is instantiated.
:type callback: callable
:param callback: A callable that accepts a single argument
of type `botocore.session.Session`.
"""
_INITIALIZERS.append(callback)
def unregister_initializer(callback):
"""Unregister an initializer function.
:type callback: callable
:param callback: A callable that was previously registered
with `botocore.register_initializer`.
:raises ValueError: If a callback is provided that is not currently
registered as an initializer.
"""
_INITIALIZERS.remove(callback)
def invoke_initializers(session):
"""Invoke all initializers for a session.
:type session: botocore.session.Session
:param session: The session to initialize.
"""
for initializer in _INITIALIZERS:
initializer(session)
| {
"content_hash": "fe47929b833f17cfd252047985444a6c",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 78,
"avg_line_length": 34.296,
"alnum_prop": 0.6773967809657103,
"repo_name": "boto/botocore",
"id": "9b7116342e243bfc70e803195fffe34fc28d7c2d",
"size": "4914",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "botocore/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Gherkin",
"bytes": "23820"
},
{
"name": "Python",
"bytes": "3352371"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import traceback
import sys
import threading
from contextlib import contextmanager
from stat import S_IXOTH
from os import pardir, stat, chmod, access, X_OK, pathsep, environ
from os import makedirs, listdir
from os.path import join, dirname, isfile, split
from os.path import exists
from tempfile import mkdtemp
from shutil import rmtree
import time
from sys import version_info
import webob
from webtest import TestApp
from webtest.http import StopableWSGIServer
import galaxy.util
from galaxy.util.bunch import Bunch
from galaxy.jobs.metrics import NULL_JOB_INSTRUMENTER
from pulsar.tools import ToolBox
from pulsar.managers.base import JobDirectory
from pulsar.web.framework import file_response
if version_info < (2, 7):
from unittest2 import TestCase, skip
else:
from unittest import TestCase, skip
try:
from nose.tools import nottest
except ImportError:
def nottest(x):
return x
TEST_DIR = dirname(__file__)
ROOT_DIR = join(TEST_DIR, pardir)
class TempDirectoryTestCase(TestCase):
def setUp(self):
self.temp_directory = mkdtemp()
def tearDown(self):
rmtree(self.temp_directory)
def get_test_toolbox():
toolbox_path = join(dirname(__file__), pardir, "test_data", "test_shed_toolbox.xml")
toolbox = ToolBox(toolbox_path)
return toolbox
def get_test_tool():
return get_test_toolbox().get_tool("tool1")
class TestManager(object):
def setup_temp_directory(self):
self.temp_directory = mkdtemp()
self.__job_directory = JobDirectory(self.temp_directory, '1')
def cleanup_temp_directory(self):
rmtree(self.temp_directory)
def job_directory(self, job_id):
return self.__job_directory
@contextmanager
def test_job_directory():
with temp_directory() as directory:
yield JobDirectory(directory, '1')
@contextmanager
def temp_directory():
directory = mkdtemp()
try:
yield directory
finally:
rmtree(directory)
@contextmanager
def test_manager():
manager = TestManager()
manager.setup_temp_directory()
yield manager
manager.cleanup_temp_directory()
class TestAuthorization(object):
def __init__(self):
self.allow_setup = True
self.allow_tool_file = True
self.allow_execution = True
self.allow_config = True
def authorize_setup(self):
if not self.allow_setup:
raise Exception
def authorize_tool_file(self, name, contents):
if not self.allow_tool_file:
raise Exception
def authorize_execution(self, job_directory, command_line):
if not self.allow_execution:
raise Exception
def authorize_config_file(self, job_directory, name, path):
if not self.allow_config:
raise Exception
class TestDependencyManager(object):
def dependency_shell_commands(self, requirements, **kwds):
return []
class BaseManagerTestCase(TestCase):
def setUp(self):
self.app = minimal_app_for_managers()
self.staging_directory = self.app.staging_directory
self.authorizer = self.app.authorizer
def tearDown(self):
rmtree(self.staging_directory)
@nottest
def _test_simple_execution(self, manager):
command = """python -c "import sys; sys.stdout.write(\'Hello World!\'); sys.stderr.write(\'moo\')" """
job_id = manager.setup_job("123", "tool1", "1.0.0")
manager.launch(job_id, command)
while manager.get_status(job_id) not in ['complete', 'cancelled']:
pass
self.assertEquals(manager.stderr_contents(job_id), 'moo')
self.assertEquals(manager.stdout_contents(job_id), 'Hello World!')
self.assertEquals(manager.return_code(job_id), 0)
manager.clean(job_id)
self.assertEquals(len(listdir(self.staging_directory)), 0)
def _test_cancelling(self, manager):
job_id = manager.setup_job("124", "tool1", "1.0.0")
command = self._python_to_command("import time; time.sleep(1000)")
manager.launch(job_id, command)
time.sleep(0.05)
manager.kill(job_id)
manager.kill(job_id) # Make sure kill doesn't choke if pid doesn't exist
self._assert_status_becomes_cancelled(job_id, manager)
manager.clean(job_id)
def _python_to_command(self, code, quote='"'):
assert '"' not in code
return 'python -c "%s"' % "; ".join(code.split("\n"))
def _assert_status_becomes_cancelled(self, job_id, manager):
i = 0
while True:
i += 1
status = manager.get_status(job_id)
if status in ["complete", "failed"]:
raise AssertionError("Expected cancelled status but got %s." % status)
elif status == "cancelled":
break
time.sleep(0.01)
if i > 100: # Wait one second
raise AssertionError("Job failed to cancel quickly.")
def minimal_app_for_managers():
""" Minimimal app description for consumption by managers.
"""
staging_directory = mkdtemp()
rmtree(staging_directory)
authorizer = TestAuthorizer()
return Bunch(staging_directory=staging_directory,
authorizer=authorizer,
job_metrics=NullJobMetrics(),
dependency_manager=TestDependencyManager())
class NullJobMetrics(object):
def __init__(self):
self.default_job_instrumenter = NULL_JOB_INSTRUMENTER
@nottest
@contextmanager
def server_for_test_app(app):
try:
from paste.exceptions.errormiddleware import ErrorMiddleware
error_app = ErrorMiddleware(app.app, debug=True, error_log="errors.log")
except ImportError:
# paste.exceptions not available for Python 3.
error_app = app
server = StopableWSGIServer.create(error_app)
try:
server.wait()
yield server
finally:
server.shutdown()
# There seem to be persistent transient problems with the testing, sleeping
# between creation of test app instances for greater than .5 seconds seems
# to help (async loop length in code is .5 so this maybe makes some sense?)
if "TEST_WEBAPP_POST_SHUTDOWN_SLEEP" in environ:
time.sleep(int(environ.get("TEST_WEBAPP_POST_SHUTDOWN_SLEEP")))
@nottest
@contextmanager
def test_pulsar_server(global_conf={}, app_conf={}, test_conf={}):
with test_pulsar_app(global_conf, app_conf, test_conf) as app:
with server_for_test_app(app) as test_pulsar_server:
yield test_pulsar_server
class RestartablePulsarAppProvider(object):
def __init__(self, global_conf={}, app_conf={}, test_conf={}, web=True):
self.staging_directory = mkdtemp()
self.global_conf = global_conf
self.app_conf = app_conf
self.test_conf = test_conf
self.web = web
@contextmanager
def new_app(self):
with test_pulsar_app(
self.global_conf,
self.app_conf,
self.test_conf,
staging_directory=self.staging_directory,
web=self.web,
) as app:
yield app
def cleanup(self):
try:
rmtree(self.staging_directory)
except Exception:
pass
@contextmanager
def restartable_pulsar_app_provider(**kwds):
try:
has_app = RestartablePulsarAppProvider(**kwds)
yield has_app
finally:
has_app.cleanup()
@nottest
@contextmanager
def test_pulsar_app(
global_conf={},
app_conf={},
test_conf={},
staging_directory=None,
web=True,
):
clean_staging_directory = False
if staging_directory is None:
staging_directory = mkdtemp()
clean_staging_directory = True
# Make staging directory world executable for run as user tests.
mode = stat(staging_directory).st_mode
chmod(staging_directory, mode | S_IXOTH)
cache_directory = mkdtemp()
app_conf["staging_directory"] = staging_directory
app_conf["file_cache_dir"] = cache_directory
app_conf["ensure_cleanup"] = True
try:
with _yield_app(global_conf, app_conf, test_conf, web) as app:
yield app
finally:
to_clean = [cache_directory]
if clean_staging_directory:
to_clean.append(staging_directory)
for directory in to_clean:
try:
rmtree(directory)
pass
except Exception:
pass
@contextmanager
def _yield_app(global_conf, app_conf, test_conf, web):
# Yield either wsgi webapp of the underlying pulsar
# app object if the web layer is not needed.
try:
if web:
from pulsar.web.wsgi import app_factory
app = app_factory(global_conf, **app_conf)
yield TestApp(app, **test_conf)
else:
from pulsar.main import load_app_configuration
from pulsar.core import PulsarApp
app_conf = load_app_configuration(local_conf=app_conf)
app = PulsarApp(**app_conf)
yield app
finally:
try:
shutdown_args = []
if not web:
shutdown_args.append(2)
app.shutdown(*shutdown_args)
except Exception:
pass
def skip_unless_environ(var):
if var in environ:
return lambda func: func
return skip("Environment variable %s not found, dependent test skipped." % var)
def skip_unless_executable(executable):
if _which(executable):
return lambda func: func
return skip("PATH doesn't contain executable %s" % executable)
def skip_unless_module(module):
available = True
try:
__import__(module)
except ImportError:
available = False
if available:
return lambda func: func
return skip("Module %s could not be loaded, dependent test skipped." % module)
def skip_unless_any_module(modules):
available = False
for module in modules:
try:
__import__(module)
except ImportError:
continue
available = True
if available:
return lambda func: func
return skip("None of the modules %s could be loaded, dependent test skipped." % modules)
def _which(program):
def is_exe(fpath):
return isfile(fpath) and access(fpath, X_OK)
fpath, fname = split(program)
if fpath:
if is_exe(program):
return program
else:
for path in environ["PATH"].split(pathsep):
path = path.strip('"')
exe_file = join(path, program)
if is_exe(exe_file):
return exe_file
return None
class TestAuthorizer(object):
def __init__(self):
self.authorization = TestAuthorization()
def get_authorization(self, tool_id):
return self.authorization
class JobFilesApp(object):
def __init__(self, root_directory=None):
self.root_directory = root_directory
def __call__(self, environ, start_response):
req = webob.Request(environ)
params = req.params.mixed()
method = req.method
if method == "POST":
resp = self._post(req, params)
elif method == "GET":
resp = self._get(req, params)
else:
raise Exception("Unhandled request method %s" % method)
return resp(environ, start_response)
def _post(self, request, params):
path = params['path']
if not galaxy.util.in_directory(path, self.root_directory):
assert False, "%s not in %s" % (path, self.root_directory)
parent_directory = dirname(path)
if not exists(parent_directory):
makedirs(parent_directory)
galaxy.util.copy_to_path(params["file"].file, path)
return webob.Response(body='')
def _get(self, request, params):
path = params['path']
if not galaxy.util.in_directory(path, self.root_directory):
assert False, "%s not in %s" % (path, self.root_directory)
return file_response(path)
@contextmanager
def files_server(directory=None):
if not directory:
with temp_directory() as directory:
app = TestApp(JobFilesApp(directory))
with server_for_test_app(app) as server:
yield server, directory
else:
app = TestApp(JobFilesApp(directory))
with server_for_test_app(app) as server:
yield server
def dump_other_threads():
# Utility for debugging threads that aren't dying during
# tests.
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
print(t.getName())
traceback.print_stack(sys._current_frames()[t.ident])
| {
"content_hash": "7344d3e3b51d4ed0245398550ceb6d4d",
"timestamp": "",
"source": "github",
"line_count": 448,
"max_line_length": 110,
"avg_line_length": 28.598214285714285,
"alnum_prop": 0.6293318763659069,
"repo_name": "jmchilton/pulsar",
"id": "e8e16898289e3fe896307e8c9e25d7289aea880a",
"size": "12812",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "3639"
},
{
"name": "Python",
"bytes": "753626"
},
{
"name": "Shell",
"bytes": "12474"
}
],
"symlink_target": ""
} |
import logging
import os
import shutil
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger("post_gen_project")
DOCS_SOURCES = "docs_sources"
ALL_TEMP_FOLDERS = [DOCS_SOURCES, "licenses", "macros"]
DOCS_FILES_BY_TOOL = {
"mkdocs": ["index.md", "/mkdocs.yml"],
"sphinx": ["conf.py", "index.rst", "make.bat", "Makefile"],
}
def move_docs_files(docs_tool, docs_files, docs_sources):
if docs_tool == "none":
return
root = os.getcwd()
docs = "docs"
logger.info("Initializing docs for %s", docs_tool)
if not os.path.exists(docs):
os.mkdir(docs)
for item in docs_files[docs_tool]:
dst, name = (root, item[1:]) if item.startswith("/") else (docs, item)
src_path = os.path.join(docs_sources, docs_tool, name)
dst_path = os.path.join(dst, name)
logger.info("Moving %s to %s.", src_path, dst_path)
if os.path.exists(dst_path):
os.unlink(dst_path)
os.rename(src_path, dst_path)
def remove_temp_folders(temp_folders):
for folder in temp_folders:
logger.info("Remove temporary folder: %s", folder)
shutil.rmtree(folder)
if __name__ == "__main__":
move_docs_files("{{cookiecutter.docs_tool}}", DOCS_FILES_BY_TOOL, DOCS_SOURCES)
remove_temp_folders(ALL_TEMP_FOLDERS)
| {
"content_hash": "0c8e656feb1936b21efd8326bd2918fe",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 83,
"avg_line_length": 27.97872340425532,
"alnum_prop": 0.6281368821292775,
"repo_name": "pytest-dev/cookiecutter-pytest-plugin",
"id": "a300dd33cbd7aaf59414e60f1148dcfd0f54e7dd",
"size": "1362",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "hooks/post_gen_project.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7029"
},
{
"name": "Makefile",
"bytes": "8361"
},
{
"name": "Python",
"bytes": "17155"
}
],
"symlink_target": ""
} |
import datetime
import boto3
import argparse
import sys
import time
import random
session = boto3.Session(profile_name='admin')
client = session.client('emr')
def make_instances():
return {
'InstanceGroups': [
{
'InstanceRole': 'MASTER',
'Market': 'SPOT',
'BidPrice': '.6',
'InstanceType': 'm1.medium',
'InstanceCount': 1,
'Configurations': [
{"Classification":"emrfs-site",
"Properties":{"fs.s3.consistent.retryPeriodSeconds":"10",
"consistent":"true",
"Consistent":"true",
"fs.s3.consistent.retryCount":"5",
"fs.s3.consistent.metadata.tableName":"EmrFSMetadata"},
"Configurations":[]
},
],
},
],
#'Ec2KeyName': 'ubuntu-home',
'KeepJobFlowAliveWhenNoSteps': False,
'Ec2SubnetId': "subnet-0f159079",
}
def make_bootstrap():
return[
{
'Name': 'Generic bootstrap',
'ScriptBootstrapAction': {
'Path': 's3://paulhtremblay/emr_bootstraps/emr_bootstrap.sh',
}
},
{
'Name': 'python-hdfs',
'ScriptBootstrapAction': {
'Path': 's3://paulhtremblay/emr_bootstraps/emr_bootstrap_python.sh',
}
},
]
def create_response(chunk_num, test = False, validation = False):
args = ['python34', '/usr/local/bin/upload_noaa1_to_s3.py']
args.extend(['--chunk-num', str(chunk_num)])
if test:
args.append('--test')
if validation:
args.append('--validation')
response = client.run_job_flow(
Name = "ftp upload example {0}".format(datetime.datetime.now()),
LogUri = "s3n://paulhtremblay/emr-logs/",
ReleaseLabel = 'emr-5.3.0',
Instances = make_instances(),
JobFlowRole = 'EMR_EC2_DefaultRole',
ServiceRole = 'EMR_DefaultRole',
BootstrapActions= make_bootstrap(),
Steps=[{'HadoopJarStep': {'Args': args,
'Jar': 'command-runner.jar'},
'Name': 'simple step', 'ActionOnFailure': 'TERMINATE_CLUSTER'}],
Applications = [
{
"Name": "Hadoop",
},
{
"Name": "Spark",
}
],
Configurations = [{
'Classification': 'spark-log4j',
'Properties': {
'log4j.rootCategory': 'ERROR, console'
}
}, {
'Classification': 'spark-defaults',
'Properties': {
'spark.ui.showConsoleProgress': 'false'
}
}, {
'Classification': 'spark-env',
'Properties': {},
'Configurations': [{
'Classification': 'export',
'Properties': {
'PYSPARK_PYTHON': 'python34',
'PYSPARK_DRIVER_PYTHON': 'python34'
}
}]
}],
)
return response
def test_run(start_year, end_year, local =False, test = False, write_dir = None, md5_sum = True):
import subprocess
args = ['python3', '/home/paul/Documents/projects/emr_admin/boto_emr/scripts/upload_noaa1_to_s3.py']
args.extend(['--start-year', str(start_year), '--end-year', str(end_year)])
if test:
args.append('--test')
if write_dir:
args.extend(['--write-dir', write_dir])
if md5_sum:
args.append('--md5-sum')
subprocess.call(args)
def _get_last_chunk():
return 93
def _num_clusters_free(max_num_emr_clusters):
s3_client = boto3.client("emr")
return max_num_emr_clusters\
- len(list(filter(lambda x: x['State'][0:10] != 'TERMINATED', [x['Status'] for x in s3_client.list_clusters()['Clusters']])))
def _get_args():
parser = argparse.ArgumentParser(description='upload ftp files to S3')
parser.add_argument('--test', action = 'store_true',
help = 'test run on smaller data')
parser.add_argument('--validation', action = 'store_true',
help = 'a validation run')
parser.add_argument('--s3_dir', type = str,
help ="s3 directory in the root directory", default = "noaa")
args = parser.parse_args()
return args
def main():
args = _get_args()
print(create_response(chunk_num = 0, test = False, validation = False))
print(create_response(chunk_num = 1, test = False, validation = False))
return
start = 0
max_num_emr_clusters = 18
end = start + max_num_emr_clusters + 1
last_chunk_num = _get_last_chunk()
sleep_time = 60 * 25
while 1:
for chunk_num in range(start, end):
print("start up emr cluster with args of chunk_num {0}".format(chunk_num))
print(create_response(chunk_num = chunk_num, test = args.test, validation = args.validation))
if end > last_chunk_num:
break
while 1:
time.sleep(sleep_time)
num_clusters_free = _num_clusters_free(max_num_emr_clusters)
if num_clusters_free > 0:
start = end
end = start + num_clusters_free + 1
if end > last_chunk_num:
end = last_chunk_num + 1
break
if __name__ == '__main__':
main()
| {
"content_hash": "e582e1eb7df79105eca32b2018d8c91e",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 137,
"avg_line_length": 32.92727272727273,
"alnum_prop": 0.522547395545739,
"repo_name": "paulhtremblay/boto_emr",
"id": "b9e51031464f45866e4d0d3ea61523b4837d1ebf",
"size": "5433",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/launch_ftp_upload.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30472"
},
{
"name": "Shell",
"bytes": "444"
}
],
"symlink_target": ""
} |
import os
import sys
import pymysql
try:
pymysql
except ImportError:
'Error import pymysql'
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "catalog.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| {
"content_hash": "cc6aa3685b598907880bf4a4c6ab236a",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 77,
"avg_line_length": 29.82758620689655,
"alnum_prop": 0.6289017341040463,
"repo_name": "Sergey19940808/catalog",
"id": "b935b90368824e7141b5e275e3ddf0bda17f5986",
"size": "887",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "201"
},
{
"name": "HTML",
"bytes": "8825"
},
{
"name": "Python",
"bytes": "10074"
}
],
"symlink_target": ""
} |
import argparse
import json
import os
import subprocess
import sys
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--action', metavar="action", default="setup",
choices=['setup', 'teardown', 'run_test'])
parser.add_argument(
'--service-name', metavar="service_name", default="full_service")
parser.add_argument(
'--icontrol-hostname', metavar="icontrol_hostname",
default=os.getenv('icontrol_hostname', "10.1.0.170"))
parser.add_argument(
'--member-ip', metavar="member_ip", default=os.getenv('member_ip', "192.168.101.4"))
parser.add_argument(
'--bigip-selfip', metavar="bigip_selfip",
default=os.getenv('bigip_selfip', "192.168.101.5"))
parser.add_argument(
'--vni', metavar="vxlan_vni", default=os.getenv('vxlan_vni', "94"))
parser.add_argument(
'--test-case', metavar="test_case", default="test_rename_service_objects")
return parser.parse_args()
def setup_symbols(symbols_file, args):
with open(symbols_file, "r") as f:
symbols = json.load(f)
f.close()
symbols['bigip_ip'] = args.icontrol_hostname
symbols['bigip_selfip'] = args.bigip_selfip
symbols['server_ip'] = args.member_ip
symbols['server_vxlan'] = args.vni
symbols['vip_vxlan_segid'] = args.vni
with open(symbols_file, "w") as f:
json.dump(symbols, f)
f.close()
def main(args):
test_root = "test/functional/neutronless/service_object_rename"
test_name = "test_service_object_rename.py"
symbols_file = "%s/conf_symbols.json" % test_root
# Initialize symbols file
setup_symbols(symbols_file, args)
# Build the command to run.
cmd = "py.test -sv --symbols=%s --service-name=%s %s/%s" % (
symbols_file,
args.service_name,
test_root,
test_name)
# Get the test to run.
if args.action == 'setup':
print("Setting up service rename tests...")
test = "test_create_config"
elif args.action == 'teardown':
print("Teardown service rename tests...")
test = "test_cleanup_config"
elif args.action == 'run_test':
print("Teardown service rename tests...")
test = args.test_case
else:
print("invalid option")
sys.exit(1)
# Build the test case into the command
cmd = "%s::%s" % (cmd, test)
try:
returncode = subprocess.call(cmd.split())
except subprocess.CalledProcessError as e:
returncode = e.returncode
sys.exit(returncode)
if __name__ == '__main__':
main(parse_args())
| {
"content_hash": "f3ee16c80deff32e67bd5a8b0f4106e9",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 92,
"avg_line_length": 30.453488372093023,
"alnum_prop": 0.6139747995418099,
"repo_name": "richbrowne/f5-openstack-agent",
"id": "11a6dac1f85a2543591cd5742e110f7811dd12e6",
"size": "3219",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/functional/neutronless/service_object_rename/run_tests.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "853"
},
{
"name": "Python",
"bytes": "1388158"
},
{
"name": "Ruby",
"bytes": "78"
},
{
"name": "Shell",
"bytes": "20290"
}
],
"symlink_target": ""
} |
from fibpro.rpc import GenericClient, Server
from fibpro.logsink import LogSinkClient
from fibpro.pricing import PricingClient
class ControllerBase(object):
NAME = "controller"
LOG_RPC = True
class ControllerServer(ControllerBase, Server):
def server_init(self):
self.log = LogSinkClient(self.service_dir_client)
self.pricing_client = PricingClient(self.service_dir_client)
self.compute_worker_client = GenericClient(self.service_dir_client, name="compute_worker")
def parse_requested_fib(self, raw_requested_fib):
try:
return int(raw_requested_fib)
except ValueError, e:
self.log.warn('Request to %s resulted in %s' % (
raw_requested_fib, str(e)))
return None
def call_compute_worker(self, requested_fib):
return self.compute_worker_client.compute_fib(
requested_fib=requested_fib)
def generate_response(self, raw_requested_fib=None, username=None):
requested_fib = self.parse_requested_fib(raw_requested_fib)
if requested_fib is None:
return ["404 NOT FOUND", "404: %s" % raw_requested_fib]
# verify and update user credit
credit_ok, pricing_response = self.pricing_client.pay_for_user_request(
requested_fib, username)
if credit_ok:
return ["200 OK", self.call_compute_worker(requested_fib)]
return ["403 FORBIDDEN", pricing_response]
| {
"content_hash": "e2f36ad932a009a98fa340567fdbb071",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 98,
"avg_line_length": 39.67567567567568,
"alnum_prop": 0.6607629427792916,
"repo_name": "neumark/practical-microservices",
"id": "599ed41c9db10f305901ee389a77ced35bdc45de",
"size": "1468",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fibpro/controller.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "39779"
},
{
"name": "Shell",
"bytes": "1720"
}
],
"symlink_target": ""
} |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 30, transform = "BoxCox", sigma = 0.0, exog_count = 100, ar_order = 12); | {
"content_hash": "8c69a29012956d27a59faa83b20cf545",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 169,
"avg_line_length": 38.42857142857143,
"alnum_prop": 0.7100371747211895,
"repo_name": "antoinecarme/pyaf",
"id": "919450f1577bc28af5016a8ec784a71e6002bf01",
"size": "269",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_BoxCox/trend_MovingAverage/cycle_30/ar_12/test_artificial_1024_BoxCox_MovingAverage_30_12_100.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
from config import *
import time
class Load():
def __init__(self, chan):
self.channel = chan.channel
self.botnick = chan.botnick
self.sendmsg = chan.sendmsg
self.leavechan = chan.leavechan
self.joinchan = chan.joinchan
self.name = "ignore"
self.sendmsg("Caution: module 'ignore' should be first loaded module")
def run(self, ircmsg):
if ircmsg.lower().find(self.channel) != -1 and ircmsg.lower().find(self.botnick) != -1:
user = ircmsg
user = user.lower().split(":")[1].split('!')[0]
if user in blacklist:
self.sendmsg("I don't take orders from you, " + user + "!")
return True
elif "unignore" in ircmsg.lower() and not "loadmod" in ircmsg.lower() :
ignuser = ircmsg.lower().split("unignore")[1].strip()
if ignuser in blacklist:
self.sendmsg("Unignoring " + ignuser + " from now on")
del blacklist[blacklist.index(ignuser)]
else:
self.sendmsg("User not ignored")
return True
elif "ignore" in ircmsg.lower() and not "loadmod" in ircmsg.lower():
ignuser = ircmsg.lower().split("ignore")[1].strip()
if ignuser not in blacklist:
blacklist.append(ignuser)
self.sendmsg("Ignoring " + ignuser + " from now on")
else:
self.sendmsg("Already ignoring " + ignuser)
return True
def stop(self):
pass
def stop():
pass
| {
"content_hash": "ad81499a3b6c5e2a0b8bc326260d983e",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 95,
"avg_line_length": 35.87234042553192,
"alnum_prop": 0.5118623962040332,
"repo_name": "rbracken/internbot",
"id": "3256f7154561ae1d24294402cae941cdcd3e81c4",
"size": "1686",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/ignore/ignore.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "28387"
},
{
"name": "Shell",
"bytes": "1061"
}
],
"symlink_target": ""
} |
TableList = [
'LatestMirror',
'CommitLock',
'Branches',
'Items',
'Versions',
'Labels',
'LabelMap',
'Flavors',
'FlavorMap',
'FlavorScores',
'Users',
'UserGroups',
'UserGroupMembers',
'EntitlementGroups',
'Entitlements',
'EntitlementOwners',
'EntitlementAccessMap',
'Permissions',
'FileStreams',
'Nodes',
'ChangeLogs',
'Instances',
'TroveInfo',
'Dependencies',
'Metadata',
'MetadataItems',
'PGPKeys',
'PGPFingerprints',
'Provides',
'Requires',
'TroveRedirects',
'TroveTroves',
'Dirnames',
'Basenames',
'Prefixes',
'FilePaths',
'TroveFiles',
'CheckTroveCache',
'UserGroupTroves',
'UserGroupInstancesCache',
'UserGroupAllPermissions',
'UserGroupAllTroves',
'LatestCache'
]
| {
"content_hash": "dc2b4cef6988f2d6506a3d2539425a22",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 30,
"avg_line_length": 18.77777777777778,
"alnum_prop": 0.5810650887573965,
"repo_name": "fedora-conary/conary",
"id": "4f36521750c74cb5fe39165759064a9403f54e92",
"size": "1492",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/migration/tablelist.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "481681"
},
{
"name": "C++",
"bytes": "8244"
},
{
"name": "CSS",
"bytes": "3920"
},
{
"name": "Erlang",
"bytes": "477"
},
{
"name": "Perl",
"bytes": "45629"
},
{
"name": "Python",
"bytes": "10586616"
},
{
"name": "Shell",
"bytes": "4657"
},
{
"name": "Standard ML",
"bytes": "2756"
}
],
"symlink_target": ""
} |
import contextlib
import string
import sys
import tempfile
from unittest import mock
import numpy as np
import pytest
import cupy
from cupy import testing
@contextlib.contextmanager
def use_temporary_cache_dir():
target = 'cupy.fft._callback.get_cache_dir'
with tempfile.TemporaryDirectory() as path:
with mock.patch(target, lambda: path):
yield path
_load_callback = r'''
__device__ ${data_type} CB_ConvertInput(
void* dataIn, size_t offset, void* callerInfo, void* sharedPtr)
{
${data_type} x = ((${data_type}*)dataIn)[offset];
${element} *= 2.5;
return x;
}
__device__ ${load_type} d_loadCallbackPtr = CB_ConvertInput;
'''
_load_callback_with_aux = r'''
__device__ ${data_type} CB_ConvertInput(
void* dataIn, size_t offset, void* callerInfo, void* sharedPtr)
{
${data_type} x = ((${data_type}*)dataIn)[offset];
${element} *= *((${aux_type}*)callerInfo);
return x;
}
__device__ ${load_type} d_loadCallbackPtr = CB_ConvertInput;
'''
_load_callback_with_aux2 = r'''
__device__ ${data_type} CB_ConvertInput(
void* dataIn, size_t offset, void* callerInfo, void* sharedPtr)
{
${data_type} x = ((${data_type}*)dataIn)[offset];
${element} *= ((${aux_type}*)callerInfo)[offset];
return x;
}
__device__ ${load_type} d_loadCallbackPtr = CB_ConvertInput;
'''
_store_callback = r'''
__device__ void CB_ConvertOutput(
void *dataOut, size_t offset, ${data_type} element,
void *callerInfo, void *sharedPointer)
{
${data_type} x = element;
${element} /= 3.8;
((${data_type}*)dataOut)[offset] = x;
}
__device__ ${store_type} d_storeCallbackPtr = CB_ConvertOutput;
'''
_store_callback_with_aux = r'''
__device__ void CB_ConvertOutput(
void *dataOut, size_t offset, ${data_type} element,
void *callerInfo, void *sharedPointer)
{
${data_type} x = element;
${element} /= *((${aux_type}*)callerInfo);
((${data_type}*)dataOut)[offset] = x;
}
__device__ ${store_type} d_storeCallbackPtr = CB_ConvertOutput;
'''
def _set_load_cb(code, element, data_type, callback_type, aux_type=None):
return string.Template(code).substitute(
data_type=data_type,
aux_type=aux_type,
load_type=callback_type,
element=element)
def _set_store_cb(code, element, data_type, callback_type, aux_type=None):
return string.Template(code).substitute(
data_type=data_type,
aux_type=aux_type,
store_type=callback_type,
element=element)
@testing.parameterize(*testing.product({
'n': [None, 5, 10, 15],
'shape': [(10, 7), (10,), (10, 10)],
'norm': [None, 'ortho'],
}))
@testing.with_requires('cython>=0.29.0')
@testing.gpu
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason='callbacks are only supported on Linux')
@pytest.mark.skipif(cupy.cuda.runtime.is_hip,
reason='hipFFT does not support callbacks')
class Test1dCallbacks:
def _test_load_helper(self, xp, dtype, fft_func):
fft = getattr(xp.fft, fft_func)
code = _load_callback
if dtype == np.complex64:
types = ('x.x', 'cufftComplex', 'cufftCallbackLoadC')
elif dtype == np.complex128:
types = ('x.x', 'cufftDoubleComplex', 'cufftCallbackLoadZ')
elif dtype == np.float32:
types = ('x', 'cufftReal', 'cufftCallbackLoadR')
else:
types = ('x', 'cufftDoubleReal', 'cufftCallbackLoadD')
cb_load = _set_load_cb(code, *types)
a = testing.shaped_random(self.shape, xp, dtype)
if xp is np:
a.real *= 2.5
out = fft(a, n=self.n, norm=self.norm)
if dtype in (np.float32, np.complex64):
if fft_func != 'irfft':
out = out.astype(np.complex64)
else:
out = out.astype(np.float32)
else:
with use_temporary_cache_dir():
with xp.fft.config.set_cufft_callbacks(cb_load=cb_load):
out = fft(a, n=self.n, norm=self.norm)
return out
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_fft_load(self, xp, dtype):
return self._test_load_helper(xp, dtype, 'fft')
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_ifft_load(self, xp, dtype):
return self._test_load_helper(xp, dtype, 'ifft')
@testing.for_float_dtypes(no_float16=True)
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_rfft_load(self, xp, dtype):
return self._test_load_helper(xp, dtype, 'rfft')
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_irfft_load(self, xp, dtype):
return self._test_load_helper(xp, dtype, 'irfft')
def _test_store_helper(self, xp, dtype, fft_func):
fft = getattr(xp.fft, fft_func)
code = _store_callback
if dtype == np.complex64:
if fft_func != 'irfft':
types = ('x.y', 'cufftComplex', 'cufftCallbackStoreC')
else:
types = ('x', 'cufftReal', 'cufftCallbackStoreR')
elif dtype == np.complex128:
if fft_func != 'irfft':
types = ('x.y', 'cufftDoubleComplex', 'cufftCallbackStoreZ')
else:
types = ('x', 'cufftDoubleReal', 'cufftCallbackStoreD')
elif dtype == np.float32:
types = ('x.y', 'cufftComplex', 'cufftCallbackStoreC')
elif dtype == np.float64:
types = ('x.y', 'cufftDoubleComplex', 'cufftCallbackStoreZ')
cb_store = _set_store_cb(code, *types)
a = testing.shaped_random(self.shape, xp, dtype)
if xp is np:
out = fft(a, n=self.n, norm=self.norm)
if fft_func != 'irfft':
out.imag /= 3.8
if dtype in (np.float32, np.complex64):
out = out.astype(np.complex64)
else:
out /= 3.8
if dtype in (np.float32, np.complex64):
out = out.astype(np.float32)
else:
with use_temporary_cache_dir():
with xp.fft.config.set_cufft_callbacks(cb_store=cb_store):
out = fft(a, n=self.n, norm=self.norm)
return out
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_fft_store(self, xp, dtype):
return self._test_store_helper(xp, dtype, 'fft')
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_ifft_store(self, xp, dtype):
return self._test_store_helper(xp, dtype, 'ifft')
@testing.for_float_dtypes(no_float16=True)
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_rfft_store(self, xp, dtype):
return self._test_store_helper(xp, dtype, 'rfft')
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_irfft_store(self, xp, dtype):
return self._test_store_helper(xp, dtype, 'irfft')
def _test_load_store_helper(self, xp, dtype, fft_func):
fft = getattr(xp.fft, fft_func)
load_code = _load_callback
store_code = _store_callback
if fft_func in ('fft', 'ifft'):
if dtype == np.complex64:
load_types = ('x.x', 'cufftComplex', 'cufftCallbackLoadC')
store_types = ('x.y', 'cufftComplex', 'cufftCallbackStoreC')
else:
load_types = (
'x.x', 'cufftDoubleComplex', 'cufftCallbackLoadZ')
store_types = (
'x.y', 'cufftDoubleComplex', 'cufftCallbackStoreZ')
elif fft_func == 'rfft':
if dtype == np.float32:
load_types = ('x', 'cufftReal', 'cufftCallbackLoadR')
store_types = ('x.y', 'cufftComplex', 'cufftCallbackStoreC')
else:
load_types = ('x', 'cufftDoubleReal', 'cufftCallbackLoadD')
store_types = (
'x.y', 'cufftDoubleComplex', 'cufftCallbackStoreZ')
else: # irfft
if dtype == np.complex64:
load_types = ('x.x', 'cufftComplex', 'cufftCallbackLoadC')
store_types = ('x', 'cufftReal', 'cufftCallbackStoreR')
else:
load_types = (
'x.x', 'cufftDoubleComplex', 'cufftCallbackLoadZ')
store_types = ('x', 'cufftDoubleReal', 'cufftCallbackStoreD')
cb_load = _set_load_cb(load_code, *load_types)
cb_store = _set_store_cb(store_code, *store_types)
a = testing.shaped_random(self.shape, xp, dtype)
if xp is np:
a.real *= 2.5
out = fft(a, n=self.n, norm=self.norm)
if fft_func != 'irfft':
out.imag /= 3.8
if dtype in (np.float32, np.complex64):
out = out.astype(np.complex64)
else:
out /= 3.8
if dtype in (np.float32, np.complex64):
out = out.astype(np.float32)
else:
with use_temporary_cache_dir():
with xp.fft.config.set_cufft_callbacks(
cb_load=cb_load, cb_store=cb_store):
out = fft(a, n=self.n, norm=self.norm)
return out
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_fft_load_store(self, xp, dtype):
return self._test_load_store_helper(xp, dtype, 'fft')
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_ifft_load_store(self, xp, dtype):
return self._test_load_store_helper(xp, dtype, 'ifft')
@testing.for_float_dtypes(no_float16=True)
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_rfft_load_store(self, xp, dtype):
return self._test_load_store_helper(xp, dtype, 'rfft')
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_irfft_load_store(self, xp, dtype):
return self._test_load_store_helper(xp, dtype, 'irfft')
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_fft_load_aux(self, xp, dtype):
fft = xp.fft.fft
c = _load_callback_with_aux2
if dtype == np.complex64:
cb_load = _set_load_cb(
c, 'x.x', 'cufftComplex', 'cufftCallbackLoadC', 'float')
else:
cb_load = _set_load_cb(
c, 'x.x', 'cufftDoubleComplex', 'cufftCallbackLoadZ', 'double')
a = testing.shaped_random(self.shape, xp, dtype)
out_last = self.n if self.n is not None else self.shape[-1]
out_shape = list(self.shape)
out_shape[-1] = out_last
last_min = min(self.shape[-1], out_last)
b = xp.arange(np.prod(out_shape), dtype=xp.dtype(dtype).char.lower())
b = b.reshape(out_shape)
if xp is np:
x = np.zeros(out_shape, dtype=dtype)
x[..., 0:last_min] = a[..., 0:last_min]
x.real *= b
out = fft(x, n=self.n, norm=self.norm)
if dtype in (np.float32, np.complex64):
out = out.astype(np.complex64)
else:
with use_temporary_cache_dir():
with xp.fft.config.set_cufft_callbacks(
cb_load=cb_load, cb_load_aux_arr=b):
out = fft(a, n=self.n, norm=self.norm)
return out
def _test_load_store_aux_helper(self, xp, dtype, fft_func):
fft = getattr(xp.fft, fft_func)
load_code = _load_callback_with_aux
store_code = _store_callback_with_aux
if xp is cupy:
load_aux = xp.asarray(2.5, dtype=xp.dtype(dtype).char.lower())
store_aux = xp.asarray(3.8, dtype=xp.dtype(dtype).char.lower())
if fft_func in ('fft', 'ifft'):
if dtype == np.complex64:
load_types = (
'x.x', 'cufftComplex', 'cufftCallbackLoadC', 'float')
store_types = (
'x.y', 'cufftComplex', 'cufftCallbackStoreC', 'float')
else:
load_types = ('x.x', 'cufftDoubleComplex',
'cufftCallbackLoadZ', 'double')
store_types = ('x.y', 'cufftDoubleComplex',
'cufftCallbackStoreZ', 'double')
elif fft_func == 'rfft':
if dtype == np.float32:
load_types = (
'x', 'cufftReal', 'cufftCallbackLoadR', 'float')
store_types = (
'x.y', 'cufftComplex', 'cufftCallbackStoreC', 'float')
else:
load_types = (
'x', 'cufftDoubleReal', 'cufftCallbackLoadD', 'double')
store_types = ('x.y', 'cufftDoubleComplex',
'cufftCallbackStoreZ', 'double')
else: # irfft
if dtype == np.complex64:
load_types = (
'x.x', 'cufftComplex', 'cufftCallbackLoadC', 'float')
store_types = (
'x', 'cufftReal', 'cufftCallbackStoreR', 'float')
else:
load_types = ('x.x', 'cufftDoubleComplex',
'cufftCallbackLoadZ', 'double')
store_types = ('x', 'cufftDoubleReal',
'cufftCallbackStoreD', 'double')
cb_load = _set_load_cb(load_code, *load_types)
cb_store = _set_store_cb(store_code, *store_types)
a = testing.shaped_random(self.shape, xp, dtype)
if xp is np:
a.real *= 2.5
out = fft(a, n=self.n, norm=self.norm)
if fft_func != 'irfft':
out.imag /= 3.8
if dtype in (np.float32, np.complex64):
out = out.astype(np.complex64)
else:
out /= 3.8
if dtype in (np.float32, np.complex64):
out = out.astype(np.float32)
else:
with use_temporary_cache_dir():
with xp.fft.config.set_cufft_callbacks(
cb_load=cb_load, cb_store=cb_store,
cb_load_aux_arr=load_aux, cb_store_aux_arr=store_aux):
out = fft(a, n=self.n, norm=self.norm)
return out
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_fft_load_store_aux(self, xp, dtype):
return self._test_load_store_aux_helper(xp, dtype, 'fft')
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_ifft_load_store_aux(self, xp, dtype):
return self._test_load_store_aux_helper(xp, dtype, 'ifft')
@testing.for_float_dtypes(no_float16=True)
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_rfft_load_store_aux(self, xp, dtype):
return self._test_load_store_aux_helper(xp, dtype, 'rfft')
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_irfft_load_store_aux(self, xp, dtype):
return self._test_load_store_aux_helper(xp, dtype, 'irfft')
@testing.parameterize(
{'shape': (3, 4), 's': None, 'axes': None, 'norm': None},
{'shape': (3, 4), 's': (1, None), 'axes': None, 'norm': None},
{'shape': (3, 4), 's': (1, 5), 'axes': None, 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (-2, -1), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': None, 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': None, 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': (1, 4, None), 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': (1, 4, 10), 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-3, -2, -1), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': None, 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': (2, 3), 'axes': (0, 1, 2), 'norm': 'ortho'},
)
@testing.with_requires('cython>=0.29.0')
@testing.gpu
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason='callbacks are only supported on Linux')
@pytest.mark.skipif(cupy.cuda.runtime.is_hip,
reason='hipFFT does not support callbacks')
class TestNdCallbacks:
def _test_load_helper(self, xp, dtype, fft_func):
fft = getattr(xp.fft, fft_func)
load_code = _load_callback
if dtype == np.complex64:
types = ('x.x', 'cufftComplex', 'cufftCallbackLoadC')
elif dtype == np.complex128:
types = ('x.x', 'cufftDoubleComplex', 'cufftCallbackLoadZ')
elif dtype == np.float32:
types = ('x', 'cufftReal', 'cufftCallbackLoadR')
else:
types = ('x', 'cufftDoubleReal', 'cufftCallbackLoadD')
cb_load = _set_load_cb(load_code, *types)
a = testing.shaped_random(self.shape, xp, dtype)
if xp is np:
a.real *= 2.5
out = fft(a, s=self.s, axes=self.axes, norm=self.norm)
if dtype in (np.float32, np.complex64):
if fft_func != 'irfftn':
out = out.astype(np.complex64)
else:
out = out.astype(np.float32)
else:
with use_temporary_cache_dir():
with xp.fft.config.set_cufft_callbacks(cb_load=cb_load):
out = fft(a, s=self.s, axes=self.axes, norm=self.norm)
return out
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_fftn_load(self, xp, dtype):
return self._test_load_helper(xp, dtype, 'fftn')
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_ifftn_load(self, xp, dtype):
return self._test_load_helper(xp, dtype, 'ifftn')
@testing.for_float_dtypes(no_float16=True)
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_rfftn_load(self, xp, dtype):
return self._test_load_helper(xp, dtype, 'rfftn')
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_irfftn_load(self, xp, dtype):
return self._test_load_helper(xp, dtype, 'irfftn')
def _test_store_helper(self, xp, dtype, fft_func):
fft = getattr(xp.fft, fft_func)
store_code = _store_callback
if dtype == np.complex64:
if fft_func != 'irfftn':
types = ('x.y', 'cufftComplex', 'cufftCallbackStoreC')
else:
types = ('x', 'cufftReal', 'cufftCallbackStoreR')
elif dtype == np.complex128:
if fft_func != 'irfftn':
types = ('x.y', 'cufftDoubleComplex', 'cufftCallbackStoreZ')
else:
types = ('x', 'cufftDoubleReal', 'cufftCallbackStoreD')
elif dtype == np.float32:
types = ('x.y', 'cufftComplex', 'cufftCallbackStoreC')
elif dtype == np.float64:
types = ('x.y', 'cufftDoubleComplex', 'cufftCallbackStoreZ')
cb_store = _set_store_cb(store_code, *types)
a = testing.shaped_random(self.shape, xp, dtype)
if xp is np:
out = fft(a, s=self.s, axes=self.axes, norm=self.norm)
if fft_func != 'irfftn':
out.imag /= 3.8
if dtype in (np.float32, np.complex64):
out = out.astype(np.complex64)
else:
out /= 3.8
if dtype in (np.float32, np.complex64):
out = out.astype(np.float32)
else:
with use_temporary_cache_dir():
with xp.fft.config.set_cufft_callbacks(cb_store=cb_store):
out = fft(a, s=self.s, axes=self.axes, norm=self.norm)
return out
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_fftn_store(self, xp, dtype):
return self._test_store_helper(xp, dtype, 'fftn')
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_ifftn_store(self, xp, dtype):
return self._test_store_helper(xp, dtype, 'ifftn')
@testing.for_float_dtypes(no_float16=True)
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_rfftn_store(self, xp, dtype):
return self._test_store_helper(xp, dtype, 'rfftn')
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_irfftn_store(self, xp, dtype):
return self._test_store_helper(xp, dtype, 'irfftn')
def _test_load_store_helper(self, xp, dtype, fft_func):
fft = getattr(xp.fft, fft_func)
load_code = _load_callback
store_code = _store_callback
if fft_func in ('fftn', 'ifftn'):
if dtype == np.complex64:
load_types = ('x.x', 'cufftComplex', 'cufftCallbackLoadC')
store_types = ('x.y', 'cufftComplex', 'cufftCallbackStoreC')
else:
load_types = (
'x.x', 'cufftDoubleComplex', 'cufftCallbackLoadZ')
store_types = (
'x.y', 'cufftDoubleComplex', 'cufftCallbackStoreZ')
elif fft_func == 'rfftn':
if dtype == np.float32:
load_types = ('x', 'cufftReal', 'cufftCallbackLoadR')
store_types = ('x.y', 'cufftComplex', 'cufftCallbackStoreC')
else:
load_types = ('x', 'cufftDoubleReal', 'cufftCallbackLoadD')
store_types = (
'x.y', 'cufftDoubleComplex', 'cufftCallbackStoreZ')
else: # irfft
if dtype == np.complex64:
load_types = ('x.x', 'cufftComplex', 'cufftCallbackLoadC')
store_types = ('x', 'cufftReal', 'cufftCallbackStoreR')
else:
load_types = (
'x.x', 'cufftDoubleComplex', 'cufftCallbackLoadZ')
store_types = ('x', 'cufftDoubleReal', 'cufftCallbackStoreD')
cb_load = _set_load_cb(load_code, *load_types)
cb_store = _set_store_cb(store_code, *store_types)
a = testing.shaped_random(self.shape, xp, dtype)
if xp is np:
a.real *= 2.5
out = fft(a, s=self.s, axes=self.axes, norm=self.norm)
if fft_func != 'irfftn':
out.imag /= 3.8
if dtype in (np.float32, np.complex64):
out = out.astype(np.complex64)
else:
out /= 3.8
if dtype in (np.float32, np.complex64):
out = out.astype(np.float32)
else:
with use_temporary_cache_dir():
with xp.fft.config.set_cufft_callbacks(
cb_load=cb_load, cb_store=cb_store):
out = fft(a, s=self.s, axes=self.axes, norm=self.norm)
return out
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_fftn_load_store(self, xp, dtype):
return self._test_load_store_helper(xp, dtype, 'fftn')
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_ifftn_load_store(self, xp, dtype):
return self._test_load_store_helper(xp, dtype, 'ifftn')
@testing.for_float_dtypes(no_float16=True)
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_rfftn_load_store(self, xp, dtype):
return self._test_load_store_helper(xp, dtype, 'rfftn')
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_irfftn_load_store(self, xp, dtype):
return self._test_load_store_helper(xp, dtype, 'irfftn')
def _test_load_store_aux_helper(self, xp, dtype, fft_func):
fft = getattr(xp.fft, fft_func)
load_code = _load_callback_with_aux
store_code = _store_callback_with_aux
if xp is cupy:
load_aux = xp.asarray(2.5, dtype=xp.dtype(dtype).char.lower())
store_aux = xp.asarray(3.8, dtype=xp.dtype(dtype).char.lower())
if fft_func in ('fftn', 'ifftn'):
if dtype == np.complex64:
load_types = (
'x.x', 'cufftComplex', 'cufftCallbackLoadC', 'float')
store_types = (
'x.y', 'cufftComplex', 'cufftCallbackStoreC', 'float')
else:
load_types = ('x.x', 'cufftDoubleComplex',
'cufftCallbackLoadZ', 'double')
store_types = ('x.y', 'cufftDoubleComplex',
'cufftCallbackStoreZ', 'double')
elif fft_func == 'rfftn':
if dtype == np.float32:
load_types = (
'x', 'cufftReal', 'cufftCallbackLoadR', 'float')
store_types = (
'x.y', 'cufftComplex', 'cufftCallbackStoreC', 'float')
else:
load_types = (
'x', 'cufftDoubleReal', 'cufftCallbackLoadD', 'double')
store_types = ('x.y', 'cufftDoubleComplex',
'cufftCallbackStoreZ', 'double')
else: # irfftn
if dtype == np.complex64:
load_types = (
'x.x', 'cufftComplex', 'cufftCallbackLoadC', 'float')
store_types = (
'x', 'cufftReal', 'cufftCallbackStoreR', 'float')
else:
load_types = ('x.x', 'cufftDoubleComplex',
'cufftCallbackLoadZ', 'double')
store_types = ('x', 'cufftDoubleReal',
'cufftCallbackStoreD', 'double')
cb_load = _set_load_cb(load_code, *load_types)
cb_store = _set_store_cb(store_code, *store_types)
a = testing.shaped_random(self.shape, xp, dtype)
if xp is np:
a.real *= 2.5
out = fft(a, s=self.s, axes=self.axes, norm=self.norm)
if fft_func != 'irfftn':
out.imag /= 3.8
if dtype in (np.float32, np.complex64):
out = out.astype(np.complex64)
else:
out /= 3.8
if dtype in (np.float32, np.complex64):
out = out.astype(np.float32)
else:
with use_temporary_cache_dir():
with xp.fft.config.set_cufft_callbacks(
cb_load=cb_load, cb_store=cb_store,
cb_load_aux_arr=load_aux, cb_store_aux_arr=store_aux):
out = fft(a, s=self.s, axes=self.axes, norm=self.norm)
return out
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_fftn_load_store_aux(self, xp, dtype):
return self._test_load_store_aux_helper(xp, dtype, 'fftn')
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_ifftn_load_store_aux(self, xp, dtype):
return self._test_load_store_aux_helper(xp, dtype, 'ifftn')
@testing.for_float_dtypes(no_float16=True)
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_rfftn_load_store_aux(self, xp, dtype):
return self._test_load_store_aux_helper(xp, dtype, 'rfftn')
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_irfftn_load_store_aux(self, xp, dtype):
return self._test_load_store_aux_helper(xp, dtype, 'irfftn')
| {
"content_hash": "45cf7aedc250c3d09dce92851e630310",
"timestamp": "",
"source": "github",
"line_count": 710,
"max_line_length": 79,
"avg_line_length": 41.426760563380284,
"alnum_prop": 0.5499269030700711,
"repo_name": "cupy/cupy",
"id": "de9995fdb86116da5630504b0f6a9db950483a8d",
"size": "29413",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/cupy_tests/fft_tests/test_callback.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "38"
},
{
"name": "C",
"bytes": "712019"
},
{
"name": "C++",
"bytes": "895316"
},
{
"name": "Cuda",
"bytes": "151799"
},
{
"name": "Cython",
"bytes": "1996454"
},
{
"name": "Dockerfile",
"bytes": "40251"
},
{
"name": "PowerShell",
"bytes": "7361"
},
{
"name": "Python",
"bytes": "4841354"
},
{
"name": "Shell",
"bytes": "24521"
}
],
"symlink_target": ""
} |
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import pickle
import os.path
from datetime import datetime
from sklearn.metrics import mean_squared_error
from sklearn.metrics.regression import r2_score
from sklearn.model_selection import StratifiedKFold
from include.dataset_fnames import generate_station_data_fname, generate_data_fname, generate_response_data_fname
from include.feature_lists import numeric_features, numeric_missing_features_list, categoric_features
from xgboost import XGBRegressor
from catboost import CatBoostRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.neighbors import KNeighborsRegressor
from include.regressor_fnames import generate_regressor_fname, save_regressor, load_regressor
from include import SimpleMeanRegressor, SimpleMedianRegressor, SimpleModeRegressor
split_count = 3
def xgb_regressor_wrapper(X_train, y_train):
xgb_regressor = XGBRegressor(objective='reg:linear', n_estimators=1000, learning_rate=0.01, base_score=0.005, n_jobs=6)
xgb_regressor.fit(X=X_train, y=y_train) #, eval_set=[(X_test, y_test)], verbose=True)
return xgb_regressor
def cb_regressor_wrapper(X_train, y_train):
cb_regressor = CatBoostRegressor(iterations=1500, thread_count=8)
cb_regressor.fit(X=X_train, y=y_train) #, verbose=True)
return cb_regressor
def rf_regressor_wrapper(X_train, y_train):
rf_regressor = RandomForestRegressor(n_estimators=1500, max_features='sqrt', max_depth=6, n_jobs=-1)
rf_regressor.fit(X=X_train, y=y_train) #, verbose=True)
return rf_regressor
regressor_list = {
# 'XGB_Regressor': xgb_regressor_wrapper,
'CB_Regressor': cb_regressor_wrapper,
# 'RF_Regressor': rf_regressor_wrapper,
}
prev_station = {
'L0S00' : '', 'L0S01' : 'L0S00', 'L0S02' : 'L0S01', 'L0S03' : 'L0S02', 'L0S04' : 'L0S03', 'L0S05' : 'L0S04', 'L0S06' : 'L0S05',
'L0S07' : 'L0S06', 'L0S08' : 'L0S07', 'L0S09' : 'L0S08', 'L0S10' : 'L0S09', 'L0S11' : 'L0S10', 'L0S12' : 'L0S11', 'L0S13' : 'L0S12',
'L0S14' : 'L0S13', 'L0S15' : 'L0S14', 'L0S16' : 'L0S15', 'L0S17' : 'L0S16', 'L0S18' : 'L0S17', 'L0S19' : 'L0S18', 'L0S20' : 'L0S19',
'L0S21' : 'L0S20', 'L0S22' : 'L0S21', 'L0S23' : 'L0S22', 'L1S24' : 'L0S23', 'L1S25' : 'L1S24', 'L2S26' : 'L1S25', 'L2S27' : 'L2S26',
'L2S28' : 'L2S27', 'L3S29' : 'L2S28', 'L3S30' : 'L3S29', 'L3S31' : 'L3S30', 'L3S32' : 'L3S31', 'L3S33' : 'L3S32', 'L3S34' : 'L3S33',
'L3S35' : 'L3S34', 'L3S36' : 'L3S35', 'L3S37' : 'L3S36', 'L3S38' : 'L3S37', 'L3S39' : 'L3S38', 'L3S40' : 'L3S39', 'L3S41' : 'L3S40',
'L3S42' : 'L3S41', 'L3S43' : 'L3S42', 'L3S44' : 'L3S43', 'L3S45' : 'L3S44', 'L3S46' : 'L3S45', 'L3S47' : 'L3S46', 'L3S48' : 'L3S47',
'L3S49' : 'L3S48', 'L3S50' : 'L3S49', 'L3S51' : 'L3S50'
}
def load_station_df(station_id, sample_type='train', use_time=False, use_null_tracker=False, use_category=False, use_prev_station=False, use_product_id=False, use_response=False):
fname = generate_station_data_fname(station_id, sample_type=sample_type, data_type='numeric', use_product=False, allow_nan_values=False)
print fname
station_features = numeric_features[station_id]
features = ['Id'] + station_features
if use_time:
features = features + ['time']
station_df = pd.read_csv(fname, usecols=features, index_col=['Id'])
if use_null_tracker:
for feature in station_features:
new_column = feature + '_isnull'
null_indices = station_df[station_df[feature].isnull()].index
station_df[new_column] = 0
station_df.loc[null_indices, new_column] = 1
if use_product_id:
fname2 = generate_station_data_fname(station_id, sample_type=sample_type, data_type='numeric', use_product=True, allow_nan_values=False)
print fname2
features = ['Id'] + ['product']
station_df2 = pd.read_csv(fname2, usecols=features, index_col=['Id'], dtype=object)
station_df['product'] = station_df2['product']
# print station_df.head()
if use_category:
fname2 = generate_station_data_fname(station_id, sample_type=sample_type, data_type='categorical', use_product=False, allow_nan_values=False)
print fname2
station_features = categoric_features[station_id]
features = ['Id'] + station_features
station_df2 = pd.read_csv(fname2, usecols=features, index_col=['Id'], dtype=object)
# print station_df2.head()
indices = station_df.index
station_df = pd.concat([station_df, station_df2], axis=1)
station_df = station_df.loc[indices]
station_df = pd.get_dummies(station_df)
if use_prev_station:
if station_id != 'L0S00':
prev_id = prev_station[station_id]
fname2 = generate_station_data_fname(prev_id, sample_type=sample_type, data_type='numeric', use_product=False, allow_nan_values=False)
print fname2
station_features = numeric_features[prev_id]
features = ['Id'] + station_features
station_df2 = pd.read_csv(fname2, usecols=features, index_col=['Id'], dtype=object)
indices = station_df.index
station_df = pd.concat([station_df, station_df2], axis=1)
station_df = station_df.loc[indices]
# print station_df.head()
# print station_df.info()
# exit(-1)
response_df = None
if (sample_type=='train'):
fname = generate_response_data_fname()
field_list = ['Id'] + ['Response']
response_df = pd.read_csv(fname, usecols=field_list, index_col=['Id'], dtype='int')
indices = station_df.index
response_df = response_df.loc[indices]
if use_response:
station_df = pd.concat([station_df, response_df], axis=1)
# station_df = station_df.loc[indices]
# print response_df['Response'].values
return station_df, response_df
def regressor_for_station_feature(station_id, feature, X, y, response):
assert len(X) == len(response)
skf = StratifiedKFold(n_splits=split_count, shuffle=False, random_state=42)
# split_idx = len(y) / 3
#
# X_train = X[:-split_idx,:]
# y_train = y[:-split_idx]
#
# X_test = X[-split_idx:,:]
# y_test = y[-split_idx:]
# fname = generate_station_data_fname(station_id, sample_type='test', data_type='numeric', allow_nan_values=False)
# print fname
# station_features = numeric_features[station_id]
# features = ['Id'] + station_features
# station_df = pd.read_csv(fname, usecols=features, index_col=['Id'], dtype='float32')
# dataset_df = station_df[station_df[feature].notnull()]
# dataset_y_df = dataset_df[feature]
# del dataset_df[feature]
# dataset_X_df = dataset_df
#
# X_new = dataset_X_df.values
# y_new = dataset_y_df.values
best_regressor = None
best_regressor_name = None
best_r2_score = -9999
for name in regressor_list:
# print name,
regressor_wrapper = regressor_list.get(name)
r2score = 0
for train_index, test_index in skf.split(X, response):
X_train, y_train = X[train_index], y[train_index]
X_test, y_test = X[test_index], y[test_index]
regressor = regressor_wrapper(X_train, y_train)
y_predict = regressor.predict(X_test)
# score = ((y_test - y_predict) ** 2).sum()
# print r2_score(y_test, y_predict),
r2score += 1.0 * r2_score(y_test, y_predict) / split_count
# print r2score,
if r2score > best_r2_score:
# print "new r2score"
best_r2_score = r2score
best_regressor = regressor
best_regressor_name = name
# print
return best_r2_score, best_regressor
# y_predict = regressor.predict(X_new)
# print "Test Score: ", ((y_new - y_predict) ** 2).sum()
# print "Test r2_score:", r2_score(y_new, y_predict)
def get_missing_features(station_id, sample_type='train'):
fname = generate_station_data_fname(station_id=station_id, sample_type=sample_type, data_type='numeric', allow_nan_values=False)
features = ['Id'] + numeric_features[station_id]
station_df = pd.read_csv(fname, usecols=features, index_col=['Id'], dtype=np.float32)
station_df_null_features = []
for feature in station_df.columns:
if station_df[feature].isnull().any():
station_df_null_features.append(feature)
del station_df
return station_df_null_features
def save_df(station_id, station_df, sample_type, iter_no):
fname = generate_station_data_fname(station_id, sample_type, 'numeric', iter_no=iter_no)
station_df.to_csv(fname)
def regressor_for_missing_features(station_id, iter_count=10):
print "Station:", station_id
train_missing_features = get_missing_features(station_id, 'train')
test_missing_features = get_missing_features(station_id, 'test')
station_df_missing_features = sorted(list(set(train_missing_features).union(set(test_missing_features))))
if len(station_df_missing_features) == 0:
print "Nothing to do. Skip."
return
station_df, response_df = load_station_df(station_id, sample_type='train', use_time=True, use_null_tracker=False, use_category=True, use_response=False, use_product_id=False)
full_features = station_df.columns
# predict: Load test dataset
station_test_df, _ = load_station_df(station_id, sample_type='test', use_time=True, use_null_tracker=False, use_category=True, use_response=False, use_product_id=False)
# print "Station: %s, # of features: %d" % (station_id, len(station_df.columns))
station_null_indices = {}
test_null_indices = {}
for feature in station_df_missing_features:
feature_null_indices = station_df[station_df[feature].isnull()].index
station_null_indices[feature] = feature_null_indices
# predict: Initialize missing indices
feature_null_indices = station_test_df[station_test_df[feature].isnull()].index
test_null_indices[feature] = feature_null_indices
print "Train:", station_null_indices
print "Test:", test_null_indices
temp_df = station_df.copy()
# predict: don't know if we need this
temp_test_df = station_test_df.copy()
# Fill in initial values for missing values
stds = station_df.std()
means = station_df.mean()
for feature in station_df_missing_features:
std, mean = stds[feature], means[feature]
print feature, std, mean
station_df.loc[station_null_indices[feature], feature] = np.random.normal(mean, std, len(station_null_indices[feature]))
# predict: Initialize missing values
# stationtest_df.loc[test_null_indices[feature], feature] = np.random.normal(mean, std, len(test_null_indices[feature]))
feature_scores = {}
print "Initial Scores:"
for feature in station_df_missing_features:
print station_id, ";", feature, ";",
train_features = list((set(full_features) - set([feature])))
train_indices = station_df[station_df[feature].notnull()].index
test_indices = station_df[station_df[feature].isnull()].index
X_train = station_df.loc[train_indices, train_features]
y_train = station_df.loc[train_indices, feature]
X_test = station_df.loc[test_indices, train_features] # feature == null
response = response_df.loc[X_train.index]
response = response['Response']
best_score, best_regressor = regressor_for_station_feature(station_id=station_id, feature=feature,
X=X_train.values, y=y_train.values,
response=response.values)
print "%10f; %s" % (best_score, str(type(best_regressor)))
save_regressor(station_id, feature, best_regressor, 0)
# break
feature_scores[feature] = best_score
print "Iteration Scores:"
for iter in range(iter_count):
print "Iteration:", iter+1
t0 = datetime.now()
for feature in sorted(feature_scores, key=feature_scores.get, reverse=True): #station_df_missing_features:
print station_id, ";", feature, ";",
regressor_fname = generate_regressor_fname(station_id, feature, iter+1)
if os.path.exists(regressor_fname):
print "Regressor exists. Continue."
continue
train_features = list((set(full_features) - set([feature])))
train_indices = station_df[temp_df[feature].notnull()].index
test_indices = station_df[temp_df[feature].isnull()].index
X_train = station_df.loc[train_indices, train_features]
y_train = station_df.loc[train_indices, feature]
X_test = station_df.loc[test_indices, train_features] # feature == null
response = response_df.loc[X_train.index]
response = response['Response']
best_score, best_regressor = regressor_for_station_feature(station_id=station_id, feature=feature,
X=X_train.values, y=y_train.values,
response=response.values)
print "%10f; %s" % (best_score, str(type(best_regressor)))
# break
save_regressor(station_id, feature, best_regressor, iter+1)
feature_scores[feature] = best_score
if (len(X_test) > 0):
y_predict = best_regressor.predict(X_test.values)
station_df.loc[test_indices, feature] = y_predict
else:
print ("Station %s has no missing values in train dataset!", station_id)
# predict: use regressor to update station_test_df
test_indices = station_test_df[temp_test_df[feature].isnull()].index
if (len(test_indices) > 0):
X_test = station_test_df.loc[test_indices, train_features] # feature == null
y_predict = best_regressor.predict(X_test.values)
station_test_df.loc[test_indices, feature] = y_predict
else:
print ("Station %s has no missing values in test dataset!", station_id)
save_df(station_id, station_df, 'train', iter+1)
# predict: save test data
save_df(station_id, station_test_df, 'test', iter+1)
t1 = datetime.now()
print "Iteration:", iter+1, t1 - t0
# predict: Save prediction file
del station_df
if __name__ == '__main__':
for station_id in sorted(numeric_features):
# station_id = 'L1S24'
# station_id = 'L3S49'
# regressor_check_for_station(station_id)
regressor_for_missing_features(station_id, iter_count=3)
| {
"content_hash": "5c2354f9c5b6328cf93c1eec9f3e7a2f",
"timestamp": "",
"source": "github",
"line_count": 365,
"max_line_length": 179,
"avg_line_length": 41.74794520547945,
"alnum_prop": 0.6130725817036357,
"repo_name": "zakkum42/Bosch",
"id": "00f52359cec2c934d30d7622df25fe6c76e91978",
"size": "15329",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/03-feature_engineering/iterative_regressor_for_numeric_features_prefilled.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "11904112"
},
{
"name": "Python",
"bytes": "541304"
},
{
"name": "Shell",
"bytes": "1566"
},
{
"name": "sed",
"bytes": "112773"
}
],
"symlink_target": ""
} |
import os, sys
import tensorflow as tf
class IW(tf.keras.Model):
def __init__(self, model_sd, iw_max=1e6):
super().__init__()
self.model_sd = model_sd
self.iw_max = iw_max
def __call__(self, x, training=False, iw=True):
probs = self.model_sd(x, training=False)['probs']
g = probs[:, 1]
if iw:
return tf.math.minimum(1/g - 1.0, self.iw_max)
else:
return g
@tf.custom_gradient
def grad_reverse(x):
y = tf.identity(x)
def custom_grad(dy):
return -dy
return y, custom_grad
class GradReverseLayer(tf.keras.layers.Layer):
def __init__(self):
super().__init__()
def call(self, x, training=False):
if training:
return grad_reverse(x)
else:
return x
class DAN(tf.keras.Model):
def __init__(self, model, model_adv):
super().__init__()
self.model = model
self.model_adv = model_adv
def call(self, x, training=False):
out = self.model(x, training=training)
out['domain'] = self.model_adv(out['embeds'], training=training)
return out
def set_trainable(model, mode):
for l in model.layers:
l.trainable = mode
| {
"content_hash": "3a9ad6717ee2e97ef978fb56de79a008",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 72,
"avg_line_length": 23.547169811320753,
"alnum_prop": 0.5560897435897436,
"repo_name": "googleinterns/intern2020_cocal",
"id": "32175f19d8da53e696122e4302ff7de3135468a7",
"size": "1248",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "uncertainty/model/util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "293984"
},
{
"name": "Shell",
"bytes": "3237"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from django.contrib.auth.models import User
from django.core.cache import cache
from boardinghouse.models import Schema
from boardinghouse.schema import get_active_schemata, get_all_schemata
class TestUserSchemataCache(TestCase):
def test_adding_schema_to_user_clears_cache(self):
User.objects.create_user(username='a', email='a@example.com', password='a')
Schema.objects.mass_create('a','b','c')
user = User.objects.get(username='a')
self.assertEquals(0, len(user.visible_schemata))
self.assertEquals([], list(cache.get('visible-schemata-%s' % user.pk)))
user.schemata.add(Schema.objects.get(schema='a'))
self.assertEquals(None, cache.get('visible-schemata-%s' % user.pk))
def test_removing_schema_from_user_clears_cache(self):
User.objects.create_user(username='a', email='a@example.com', password='a')
Schema.objects.mass_create('a','b','c')
user = User.objects.get(username='a')
user.schemata.add(*Schema.objects.all())
self.assertEquals(3, len(user.visible_schemata))
user.schemata.remove(Schema.objects.get(schema='a'))
self.assertEquals(None, cache.get('visible-schemata-%s' % user.pk))
def test_adding_users_to_schema_clears_cache(self):
User.objects.create_user(username='a', email='a@example.com', password='a')
Schema.objects.mass_create('a','b','c')
user = User.objects.get(username='a')
self.assertEquals(0, len(user.visible_schemata))
self.assertEquals([], list(cache.get('visible-schemata-%s' % user.pk)))
schema = Schema.objects.get(schema='a')
schema.users.add(user)
self.assertEquals(None, cache.get('visible-schemata-%s' % user.pk))
def test_removing_users_from_schema_clears_cache(self):
User.objects.create_user(username='a', email='a@example.com', password='a')
Schema.objects.mass_create('a','b','c')
user = User.objects.get(username='a')
user.schemata.add(*Schema.objects.all())
self.assertEquals(3, len(user.visible_schemata))
schema = Schema.objects.get(schema='a')
schema.users.remove(user)
self.assertEquals(None, cache.get('visible-schemata-%s' % user.pk))
def test_saving_schema_clears_cache_for_related_users(self):
User.objects.create_user(username='a', email='a@example.com', password='a')
Schema.objects.mass_create('a','b','c')
user = User.objects.get(username='a')
user.schemata.add(*Schema.objects.all())
self.assertEquals(3, len(user.visible_schemata))
Schema.objects.get(schema='a').save()
self.assertEquals(None, cache.get('visible-schemata-%s' % user.pk))
def test_saving_schema_clears_global_active_schemata_cache(self):
Schema.objects.mass_create('a','b','c')
schema = Schema.objects.get(schema='a')
self.assertEquals(3, len(get_all_schemata()))
self.assertEquals(3, len(get_active_schemata()))
schema.is_active = False
schema.save()
self.assertEquals(None, cache.get('active-schemata'))
self.assertEquals(None, cache.get('all-schemata')) | {
"content_hash": "1cf2abc6c48473a3a98d6ed39268b946",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 83,
"avg_line_length": 37.95294117647059,
"alnum_prop": 0.6553006819590824,
"repo_name": "luzfcb/django-boardinghouse",
"id": "3b1f56cdf532b46517eba6f332f87456d184f707",
"size": "3226",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/tests/test_user_schemata.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "4516"
},
{
"name": "Makefile",
"bytes": "614"
},
{
"name": "PLpgSQL",
"bytes": "928"
},
{
"name": "Python",
"bytes": "138866"
},
{
"name": "Shell",
"bytes": "330"
}
],
"symlink_target": ""
} |
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.contrib.data.python.ops import batching
from tensorflow.contrib.data.python.ops import optimization
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class ModelDatasetTest(test.TestCase):
def testModelMap(self):
k = 1024 * 1024
dataset = dataset_ops.Dataset.from_tensors((np.random.rand(1, 4 * k),
np.random.rand(4 * k,
1))).repeat()
dataset = dataset.map(math_ops.matmul)
iterator = dataset.apply(optimization.model()).make_one_shot_iterator()
get_next = iterator.get_next()
deltas = []
with self.test_session() as sess:
for _ in range(5):
sess.run(get_next.op)
for _ in range(100):
start = time.time()
sess.run(get_next.op)
end = time.time()
deltas.append(end - start)
print("%f (median), %f (mean), %f (stddev), %f (min), %f (max)\n" %
(np.median(deltas), np.mean(deltas), np.std(deltas), np.min(deltas),
np.max(deltas)))
def testModelParallelMap(self):
k = 1024 * 1024
dataset = dataset_ops.Dataset.from_tensors((np.random.rand(1, 4 * k),
np.random.rand(4 * k,
1))).repeat()
dataset = dataset.map(math_ops.matmul, num_parallel_calls=56)
iterator = dataset.apply(optimization.model()).make_one_shot_iterator()
get_next = iterator.get_next()
deltas = []
with self.test_session() as sess:
for _ in range(5):
sess.run(get_next.op)
for _ in range(1000):
start = time.time()
sess.run(get_next.op)
end = time.time()
deltas.append(end - start)
print("%f (median), %f (mean), %f (stddev), %f (min), %f (max)\n" %
(np.median(deltas), np.mean(deltas), np.std(deltas), np.min(deltas),
np.max(deltas)))
def testModelMapAndBatch(self):
batch_size = 16
k = 1024 * 1024
dataset = dataset_ops.Dataset.from_tensors((np.random.rand(1, 4 * k),
np.random.rand(4 * k,
1))).repeat()
dataset = dataset.apply(
batching.map_and_batch(
math_ops.matmul, num_parallel_calls=28, batch_size=batch_size))
iterator = dataset.apply(optimization.model()).make_one_shot_iterator()
get_next = iterator.get_next()
deltas = []
with self.test_session() as sess:
for _ in range(5):
sess.run(get_next.op)
for _ in range(10):
start = time.time()
sess.run(get_next.op)
end = time.time()
deltas.append(end - start)
print("%f (median), %f (mean), %f (stddev), %f (min), %f (max)\n" %
(np.median(deltas), np.mean(deltas), np.std(deltas), np.min(deltas),
np.max(deltas)))
def testModelParallelInterleave(self):
k = 1024 * 1024
dataset = dataset_ops.Dataset.from_tensors((np.random.rand(1, 4 * k),
np.random.rand(4 * k,
1))).repeat()
dataset = dataset.map(math_ops.matmul)
dataset = dataset_ops.Dataset.range(1).repeat().interleave(
lambda _: dataset, cycle_length=56, num_parallel_calls=56)
iterator = dataset.apply(optimization.model()).make_one_shot_iterator()
get_next = iterator.get_next()
deltas = []
with self.test_session() as sess:
for _ in range(5):
sess.run(get_next.op)
for _ in range(1000):
start = time.time()
sess.run(get_next.op)
end = time.time()
deltas.append(end - start)
print("%f (median), %f (mean), %f (stddev), %f (min), %f (max)\n" %
(np.median(deltas), np.mean(deltas), np.std(deltas), np.min(deltas),
np.max(deltas)))
def testModelNested(self):
k = 1024 * 1024
a = (np.random.rand(1, 8 * k), np.random.rand(8 * k, 1))
b = (np.random.rand(1, 4 * k), np.random.rand(4 * k, 1))
c = (np.random.rand(1, 2 * k), np.random.rand(2 * k, 1))
dataset = dataset_ops.Dataset.from_tensors((a, b, c)).repeat()
def f1(a, b, c):
x, y = a
return math_ops.matmul(x, y), b, c
def f2(a, b, c):
x, y = b
return a, math_ops.matmul(x, y), c
def f3(a, b, c):
x, y = c
return a, b, math_ops.matmul(x, y)
dataset = dataset.map(f1, num_parallel_calls=32)
dataset = dataset_ops.Dataset.range(1).repeat().interleave(
lambda _: dataset, cycle_length=2)
dataset = dataset.map(f2, num_parallel_calls=16)
dataset = dataset_ops.Dataset.range(1).repeat().interleave(
lambda _: dataset, cycle_length=2)
dataset = dataset.map(f3, num_parallel_calls=10)
iterator = dataset.apply(optimization.model()).make_one_shot_iterator()
get_next = iterator.get_next()
deltas = []
with self.test_session() as sess:
for _ in range(5):
sess.run(get_next)
for _ in range(100):
start = time.time()
sess.run(get_next)
end = time.time()
deltas.append(end - start)
print("%f (median), %f (mean), %f (stddev), %f (min), %f (max)\n" %
(np.median(deltas), np.mean(deltas), np.std(deltas), np.min(deltas),
np.max(deltas)))
if __name__ == "__main__":
test.main()
| {
"content_hash": "96d33a72b8f716181663f12adbb858f8",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 78,
"avg_line_length": 35.07361963190184,
"alnum_prop": 0.553262200454784,
"repo_name": "AnishShah/tensorflow",
"id": "0a87d3e90550da8485b4f9acd941c836d7b62951",
"size": "6406",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/data/python/kernel_tests/optimization/model_dataset_op_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1286"
},
{
"name": "Batchfile",
"bytes": "9258"
},
{
"name": "C",
"bytes": "337393"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "48452986"
},
{
"name": "CMake",
"bytes": "195768"
},
{
"name": "Dockerfile",
"bytes": "36400"
},
{
"name": "Go",
"bytes": "1210238"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "834103"
},
{
"name": "Jupyter Notebook",
"bytes": "2584246"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "52618"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "40782103"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "458367"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('basicviz', '0011_auto_20160717_2121'),
]
operations = [
migrations.AddField(
model_name='experiment',
name='description',
field=models.CharField(max_length=1024, null=True),
preserve_default=True,
),
]
| {
"content_hash": "402334a29961c39726a43fc9148e9466",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 63,
"avg_line_length": 22.842105263157894,
"alnum_prop": 0.5990783410138248,
"repo_name": "sdrogers/ms2ldaviz",
"id": "49c8e625606dead1e3dc26d669aea3d9a18d1fcc",
"size": "458",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ms2ldaviz/basicviz/migrations/0012_experiment_description.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "155389"
},
{
"name": "Dockerfile",
"bytes": "324"
},
{
"name": "HTML",
"bytes": "281089"
},
{
"name": "JavaScript",
"bytes": "564464"
},
{
"name": "Jupyter Notebook",
"bytes": "22354299"
},
{
"name": "Python",
"bytes": "897444"
},
{
"name": "Shell",
"bytes": "561"
}
],
"symlink_target": ""
} |
from core.himesis import Himesis
import uuid
class Hlayer1rule15(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the DSLTrans rule layer1rule15.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(Hlayer1rule15, self).__init__(name='Hlayer1rule15', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """layer1rule15"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'layer1rule15')
# match model. We only support one match model
self.add_node()
self.vs[0]["mm__"] = """MatchModel"""
# apply model node
self.add_node()
self.vs[1]["mm__"] = """ApplyModel"""
# paired with relation between match and apply models
self.add_node()
self.vs[2]["mm__"] = """paired_with"""
# match class Operation(layer1rule15class0) node
self.add_node()
self.vs[3]["mm__"] = """Operation"""
self.vs[3]["attr1"] = """+"""
# match_contains node for class Operation(layer1rule15class0)
self.add_node()
self.vs[4]["mm__"] = """match_contains"""
# match class OperationParameter(layer1rule15class1) node
self.add_node()
self.vs[5]["mm__"] = """OperationParameter"""
self.vs[5]["attr1"] = """+"""
# match_contains node for class OperationParameter(layer1rule15class1)
self.add_node()
self.vs[6]["mm__"] = """match_contains"""
# apply class FunctionPrototype(layer1rule15class2) node
self.add_node()
self.vs[7]["mm__"] = """FunctionPrototype"""
self.vs[7]["attr1"] = """1"""
# apply_contains node for class FunctionPrototype(layer1rule15class2)
self.add_node()
self.vs[8]["mm__"] = """apply_contains"""
# apply class Argument(layer1rule15class3) node
self.add_node()
self.vs[9]["mm__"] = """Argument"""
self.vs[9]["attr1"] = """1"""
# apply_contains node for class Argument(layer1rule15class3)
self.add_node()
self.vs[10]["mm__"] = """apply_contains"""
# match association Operation--parameters-->OperationParameter node
self.add_node()
self.vs[11]["attr1"] = """parameters"""
self.vs[11]["mm__"] = """directLink_S"""
# apply association FunctionPrototype--arguments-->Argument node
self.add_node()
self.vs[12]["attr1"] = """arguments"""
self.vs[12]["mm__"] = """directLink_T"""
# backward association Operation---->FunctionPrototype node
self.add_node()
self.vs[13]["mm__"] = """backward_link"""
# Add the edges
self.add_edges([
(0,4), # matchmodel -> match_contains
(4,3), # match_contains -> match_class Operation(layer1rule15class0)
(0,6), # matchmodel -> match_contains
(6,5), # match_contains -> match_class OperationParameter(layer1rule15class1)
(1,8), # applymodel -> apply_contains
(8,7), # apply_contains -> apply_class FunctionPrototype(layer1rule15class2)
(1,10), # applymodel -> apply_contains
(10,9), # apply_contains -> apply_class Argument(layer1rule15class3)
(3,11), # match_class Operation(layer1rule15class0) -> association parameters
(11,5), # association parameters -> match_class OperationParameter(layer1rule15class1)
(7,12), # apply_class FunctionPrototype(layer1rule15class2) -> association arguments
(12,9), # association arguments -> apply_class Argument(layer1rule15class3)
(7,13), # apply_class FunctionPrototype(layer1rule15class2) -> backward_association
(13,3), # backward_association -> apply_class Operation(layer1rule15class0)
(0,2), # matchmodel -> pairedwith
(2,1) # pairedwith -> applyModel
])
# Add the attribute equations
self["equations"] = [((7,'__ApplyAttribute'),('constant','ProvidedPortFunctionPrototype')), ((9,'__ApplyAttribute'),('constant','ProvidedPortFunctionPrototypeArgument')), ((9,'name'),(5,'name')), ]
| {
"content_hash": "399f540928faa5fa2ba0d605986cb5f3",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 205,
"avg_line_length": 39.76724137931034,
"alnum_prop": 0.5378278777368307,
"repo_name": "levilucio/SyVOLT",
"id": "51a9d1d3546ad99bd99753d0698709f524329505",
"size": "4613",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mbeddr2C_MM/transformation_from_eclipse/Hlayer1rule15.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "166159"
},
{
"name": "Python",
"bytes": "34207588"
},
{
"name": "Shell",
"bytes": "1118"
}
],
"symlink_target": ""
} |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scatter.marker.colorbar.title"
_path_str = "scatter.marker.colorbar.title.font"
_valid_props = {"color", "family", "size"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Font object
Sets this color bar's title font. Note that the title's font
used to be set by the now deprecated `titlefont` attribute.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scatter.marker
.colorbar.title.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatter.marker.colorbar.title.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatter.marker.colorbar.title.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| {
"content_hash": "55fbfaead8ac178b1efd668b06355535",
"timestamp": "",
"source": "github",
"line_count": 228,
"max_line_length": 82,
"avg_line_length": 37.69298245614035,
"alnum_prop": 0.5609727717011869,
"repo_name": "plotly/python-api",
"id": "3c7d1ca13e2784ad51c47412edb9767d00d7a950",
"size": "8594",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/graph_objs/scatter/marker/colorbar/title/_font.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
import os
from datetime import datetime
from math import ceil
from flask.ext.sqlalchemy import SQLAlchemy
from flask import Flask, request, session, redirect, url_for, render_template, flash
basedir = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
'''Configuration - Debug can be removed for production use'''
app.config.update(dict(
SQLALCHEMY_DATABASE_URI ='sqlite:///' + os.path.join(basedir, 'data.sqlite'),
SECRET_KEY='not a password',
DEBUG=True,
SQLALCHEMY_COMMIT_ON_TEARDOWN = True,
USERNAME='admin',
PASSWORD='default',
PER_PAGE=3
))
app.config.from_envvar('FLASKR_SETTINGS', silent=True)
db = SQLAlchemy(app)
'''Data model - one (Post) to many (Comment)'''
class Post(db.Model):
__tablename__ = 'posts'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String, nullable=False)
text = db.Column(db.Text, nullable=False)
timestamp = db.Column(db.DateTime, index=True)
comments = db.relationship('Comment', backref='post', lazy='dynamic')
def __repr__(self):
return '<Post %r>' % self.title
class Comment(db.Model):
__tablename__ = 'comments'
id = db.Column(db.Integer, primary_key=True)
reply = db.Column(db.Text, nullable=False)
role_id = db.Column(db.Integer, db.ForeignKey('posts.id'))
def __repr__(self):
return '<Comment %r>' % self.reply
'''index page showing all posts paginated'''
@app.route('/')
def show_entries():
page=request.args.get('page', 1, type=int)
pagination = Post.query.order_by(Post.id.desc()).paginate(page,per_page=app.config['PER_PAGE'],error_out=False)
entries=pagination.items
return render_template('show_entries.html', entries=entries, pagination=pagination)
'''url for each post and its guest comments'''
@app.route('/post/<int:id>', methods=['GET', 'POST'])
def post(id):
post = Post.query.get_or_404(id)
comments = post.comments.all()
if request.method == 'POST':
addcomments = Comment(reply=request.form['reply'], post=post)
db.session.add(addcomments)
return redirect(url_for('show_entries'))
return render_template('post.html', post=post, comments=comments)
'''add a post if the admin is logged in'''
@app.route('/add', methods=['GET', 'POST'])
def add_entry():
if not session.get('logged_in'):
return redirect(url_for('login'))
if request.method == 'POST':
post=Post(title=request.form['title'], text=request.form['text'], timestamp=datetime.now())
db.session.add(post)
flash('New entry was successfully posted')
return redirect(url_for('show_entries'))
return render_template('add.html')
'''delete a post if admin is logged in'''
@app.route('/delete/<int:id>')
def delete_entry(id):
if not session.get('logged_in'):
return redirect(url_for('login'))
else:
post = Post.query.get_or_404(id)
db.session.delete(post)
flash('The post has been deleted')
return redirect(url_for('show_entries'))
'''login page with error message'''
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != app.config['USERNAME']:
error = 'Invalid username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'Invalid password'
else:
session['logged_in'] = True
flash('You were logged in')
return redirect(url_for('show_entries'))
return render_template('login.html', error=error)
'''log admin out; return None if key 'logged_in' doesn't exsit'''
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('show_entries'))
if __name__ == '__main__':
app.run() | {
"content_hash": "2b8c1ab2d7506606cc6a6ea19aecd805",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 115,
"avg_line_length": 34.294642857142854,
"alnum_prop": 0.6456651913564176,
"repo_name": "sicongl/miniblog",
"id": "0c8582a9938423cacf4445e2af1827727b00852b",
"size": "3841",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "miniblog.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "14321"
},
{
"name": "JavaScript",
"bytes": "10318"
},
{
"name": "PHP",
"bytes": "2274"
},
{
"name": "Python",
"bytes": "3841"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.